var/home/core/zuul-output/0000755000175000017500000000000015134334272014531 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015134361767015506 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000411110615134361617020261 0ustar corecoreqikubelet.log_o[;r)Br'o b-n(!9t%Cs7}g/غIs,r.k9GfD ~Fu6b}Wߟ/nm͊wqɻlOzN_~𒆷7̗8zTY\].f}嗷ovϷw_>on3cvX~egQBeH,nWb m/m}*L~AzHev_uαHJ2E$(Ͽ|/+k*z>p R⥑gF)49)(oՈ7_k0m^p9PneQn͂YEeeɹ ^ʙ|ʕ0MۂAraZR}@E1%]˜(O)X(6I;Ff"mcI۫d@FNsdxό?2$&tg*Y%\ߘfDP'F%Ab*d@e˛H,љ:72 2ƴ40tr>PYD'vt'oI¢w}o٬owko%gQ(%t#NL֜ eh&Ƨ,RHv@%٘e#dc0Fn 촂iHSr`岮X7̝4?qKf, # qe䧤 ss]QzH.ad!rJBi`V +|i}}THW{y|*/BP3m3A- ZPmN^iL[NrrݝE)~QGGAj^3}wy/{47[q)&c(޸0"$5ڪҾη*t:%?vEmO5tqÜ3Cyu '~qlN?}|nLFR6f8yWxYd ;K44|CK4UQviYDZh$#*)e\W$IAT;s0Gp}=9ڠedۜ+EaH#QtDV:?7#w4r_۾8ZJ%PgS!][5ߜQZ݇~- MR9z_Z;57xh|_/CWuU%v[_((G yMi@'3Pmz8~Y >hl%}Р`sMC77Aztԝp ,}Nptt%q6& ND lM;ָPZGa(X(2*91n,50/mx'})')SĔv}S%xhRe)a@r AF' ]J)ӨbqMWNjʵ2PK-guZZg !M)a(!H/?R?Q~}% ;]/ľv%T&hoP~(*טj=dߛ_SRzSa™:']*}EXɧM<@:jʨΨrPE%NT&1H>g":ͨ ҄v`tYoTq&OzcP_k(PJ'ήYXFgGہwħkIM*򸆔l=q VJީ#b8&RgX2qBMoN w1ђZGd m 2P/Ɛ!" aGd;0RZ+ 9O5KiPc7CDG.b~?|ђP? -8%JNIt"`HP!]ZrͰ4j8!*(jPcǷ!)'xmv>!0[r_G{j 6JYǹ>zs;tc.mctie:x&"bR4S uV8/0%X8Ua0NET݃jYAT` &AD]Ax95mvXYs"(A+/_+*{b }@UP*5ì"M|܊W7|}N{mL=d]' =MS2[3(/hoj$=Zm Mlh>P>Qwf8*c4˥Ęk(+,«.c%_~&^%80=1Jgͤ39(&ʤdH0Ζ@.!)CGt?~=ˢ>f>\bN<Ⱦtë{{b2hKNh`0=/9Gɺɔ+'Х[)9^iX,N&+1Id0ֶ|}!oѶvhu|8Qz:^S-7;k>U~H><~5i ˿7^0*]h,*aklVIKS7d'qAWEݰLkS :}%J6TIsbFʶ褢sFUC)(k-C"TQ[;4j39_WiZSس:$3w}o$[4x:bl=pd9YfAMpIrv̡}XI{B%ZԎuHvhd`Η|ʣ)-iaE';_j{(8xPA*1bv^JLj&DY3#-1*I+g8a@(*%kX{ Z;#es=oi_)qb㼃{buU?zT u]68 QeC Hl @R SFZuU&uRz[2(A1ZK(O5dc}QQufCdX($0j(HX_$GZaPo|P5q @3ǟ6 mR!c/24مQNֆ^n,hU֝cfT :):[gCa?\&IpW$8!+Uph*/ o/{")qq҈78݇hA sTB*F$6 2C` |ɧJ~iM cO;m#NV?d?TCg5otޔC1s`u.EkB6ga׬9J2&vV,./ӐoQJ*Dw*^sCeyWtɖ9F.[-cʚmD (QMW`zP~n"U'8%kEq*Lr;TY *BCCpJhxUpܺDoGdlaQ&8#v| (~~yZ-VW"T- 0@?lm$K/$s_. WM]̍"W%`lO2-"ew@bM?O .|!p+,ICE^fu `|M3J#BQȌ6DNnCˣ"F$/Qx%m&FK_7P|٢?I-RiAKoQrMI}0BOnYr猸p$nu̿ݣ\)#s{p'ɂN$r;fVkvo\mkmB`s ~7!GdјCyEߖs|n|zu0VhI|{}BC6q>HĜ]Xgy G[Ŷ.|37xo=N4wjDH>:&EOΆ<䧊1v@b&툒f!y̯RE2K0p\0͙npV)̍F$X8a-bp)5,] Bo|ؖA]Y`-jyL'8>JJ{>źuMp(jL!M7uTźmr(Uxbbqe5rZ HҘ3ڴ(|e@ew>w3C39k-{p?րd^T@eFZ#WWwYzK uK r؛6V L)auS6=`#(T'qp^X7c&͌ӒҶW r@/m@6P!{`ͱ)m`6*G-1F 6=X#leU d6xTV6 gn&i"@*"mr栣 IEVpq 0sy OM*@ >n) u{Hk|v;tCl2m s]-$( Pnuݮ)`Q6eMӁKzFZf;5IW1i[xU 0FPM]gl}>6sUDO5f;A@z>T+DE 6Хm<쉶K`ͯ% 0OFjTkPW1mk%?\@R>XCl}b ,8; :.b9m]XaINE`!6u.Ѫv¼%끖[7ɰ5 [jHhaϯ/lX/bjFO.= w ?>ȑ3n?z,t s5Z/ Clo-` z?a~b mzkC zF/}b&x Uhm.O 4m6^^osVЦ+*@5Fˢg'!>$]0 1_glg}릅h:@61Xv` 5DFnx ˭jCtu,R|ۯG8`&ו:ݓ3<:~iXN9`2ŦzhѤ^ MW`c?&d.'[\]}7A[?~R6*.9t,綨 3 6DFe^u; +֡X< paan}7ftJ^%0\?mg5k][ip4@]p6Uu|܀|Kx6خQU2KTǺ.ȕPQVzWuk{n#NWj8+\[ ?yiI~fs[:.۽ '5nWppH? 8>X+m7_Z`V j[ s3n/i{ 6uwŇctyX{>GXg&[ņzP8/f<8sl, 0۬Z"X.~`٦G3TE.֣eմi<~ik[m9뀥!c.Qvl\b FqQI.ȨHWo;Nw$͹O$oEE-eq=.*Dp,V;(bgJ!gF)892s;=^ۭ7h9 lr_qSq-XbsK، JBJbeOfOO}}<ʬ-'vlQ]m"ifӠ1˟ud9)˔~BѤ]һS8]uBi( Ql{]UcLxٻa,2r(#'CDd2݄kTxn@v7^58þ Ţ&V"J~7+0_t[%XU͍ &dtO:odtRY.õ[vJ+ Rjv<Ҋ(.GGzpFL`1CS$Ǥ46i*#zN9tT :<XK*ɤ{ U܋N5 l͖h"褁l^=UF^BcAw`g*7R(#ғ [K&#Mp'XގL=s5^:z7y0^} "NqK$2$ Ri ?2,ᙌEK@-V3ʱd:/4Kwm2uZm8pnglVj!p2֬uT[QyB402|2d5K: `Bcz|YץF .Jg< ƜINs:b zĄu3=Az4 u5'og^s7`Rzu-anOIq;6z( rx„2Hi{(2HFE?*w*hy4ޙM^٫wF(p]EwQzr*! 5F XrO7E[!gJ^.a&HߣaaQÝ$_vyz4}0!yܒ栒޹a% Ŋ X!cJ!A\ ?E\R1 q/rJjd A4y4c+bQ̘TT!kw/nb͵FcRG0xeO sw5TV12R7<OG1cjShGg/5TbW > ]~W9dNiee$V[\[Qp-&u~a+3~;xQFFW>='ǣC~방u)т48ZdH;j a]`bGԹ#qiP(yڤ~dO@wA[Vz/$NW\F?H4kX6)F*1*(eRtS2 "E I"{;ōCb{yex&Td >@).p$`XKxnX~E膂Og\IGֻq@wY)aL5^1 W9&3JW(7b ?(]m`F3 W((!5F-9]dDqL&RΖd}})7 k11 K ;%v'_3 dG8d t#MTU']h7^)O>?~?__|&q̑0dd4>vk 60D _o~[[w3ckpkpLNa ^j 5*<&}kˢmqvۗj=<Tr=[ a^؃ È(<^ٽcZ7C:?pM z*"#窾+ HsOt۩%͟A498SwWv|jNQ=-[ӓI(\gJ8@o2k'Hr~4Z(I8!H G8HNW%1Tќ^?GXodՔz q[*ڔC"1Ȋ-R0ڱ}oF4 3vFf#8^Vє+k@ :)@%9@nA B q 62!/ 6G (" u:)fSGAV(e֖t܁ ft~c.!R0N<R{mtdFdHÃФsxBl] " Δ<=9i/ d ␙F9Ґ)Hnxps2wApP!se]I)^ k?'k:%Ѹ)?wɧ6a{r7%]_Ϧi~ԞnZhubW*IakVC-(>Z#"U4Xk1G;7#m eji'ĒGIqB//(O &1I;svHd=mJW~ړUCOīpAiB^MP=MQ`=JB!"]b6Ƞi]ItЀ'Vf:yo=K˞r:( n72-˒#K9T\aVܩO "^OF1%e"xm뻱~0GBeFO0ޑ]w(zM6j\v00ׅYɓHڦd%NzT@gID!EL2$%Ӧ{(gL pWkn\SDKIIKWi^9)N?[tLjV}}O͌:&c!JC{J` nKlȉW$)YLE%I:/8)*H|]}\E$V*#(G;3U-;q7KǰfξC?ke`~UK mtIC8^P߼fub8P銗KDi'U6K×5 .]H<$ ^D'!" b1D8,?tT q lKxDȜOY2S3ҁ%mo(YT\3}sѦoY=-- /IDd6Gs =[F۴'c,QAIٰ9JXOz);B= @%AIt0v[Ƿ&FJE͙A~IQ%iShnMІt.޿>q=$ts,cJZڗOx2c6 .1zҪR "^Q[ TF )㢥M-GicQ\BL(hO7zNa>>'(Kgc{>/MoD8q̒vv73'9pM&jV3=ɹvYƛ{3iψI4Kp5 d2oOgd||K>R1Qzi#f>夑3KմԔ萴%|xyr>ķx>{E>Z4Ӥ͋#+hI{hNZt 9`b˝`yB,Ȍ=6Z" 8L O)&On?7\7ix@ D_P"~GijbɠM&HtpR:4Si גt&ngb9%islԃ)Hc`ebw|Ī Zg_0FRYeO:F)O>UD;;MY,2ڨi"R"*R2s@AK/u5,b#u>cY^*xkJ7C~pۊ ~;ɰ@ՙ.rT?m0:;}d8ۈ ݨW>.[Vhi̒;̥_9$W!p.zu~9x۾vC;kN?WƟ+fx3SuKQqxST Ζ2%?T74a{N8;lr`$pZds=3jwlL Eڲ t|*n8[#yN SrA GYb8ZIaʼn8 #fg3i`F#5N 3q_M]j 8E!@1vցP7!|+R@;HspSI]ڻCZUcg5pDcIϹ,oN-_XI,3\j ]ٟ5~' SuipA!C厐$&k7dmhz/#"݃,YqCL$ڲ`"MUbeT>Xuv~4Le͢ }UVM)[A`b}mcE]LCEg=2ȴcmZ?E*-8nhױ1xR2ϫCya` A y!?h!9yL%VLU2gr26A!4vbSG ]ꧧWp/ &ee *w$-`J\ ptǣC^p#_`{ К8EW>*(D{ٛ,[fnY𱹞M=6&$<,"lX-Ǐ_whaE 98 (oѢ/Р΅ 7ցl6618ł_1/=fu).s¯?.S[{'g=Ҥ):d8h\y6]t1T7IUV:;.1& ,5΀j:<< +Y?58In'bXIǣO{&V\DŽ0,9f O_"[l:h¢8wݓ19\:f6:+ .3}=uvKc ٹeS<>ij(o'ciS<{1$E[nP b?8E'xv[K+E{,Qƙ1*dcs_Z'407|qBOgYU|U--sG8`u! qGYܷw;ȌCPc_|(RaIBKb+{P.T! =ĦiTob d<>SHr][KqWs7ѝBYǭ~RR"p9dFg|K- obY_vM 4>/]e/dy,8!xŋ5 R<^mYo 3c9(F?hy:1V(!L7,RPEd;)QEAVخ m3 o\` s?Hc# fqT .,ŀU|⦍߶/*~48âF,#[:y_YIpʼn)dk!J'Z5=r&; (y*b*O_ULT.ÔD[%s1,jЅ@k0Ցu֯dtKl$Y5O*GUڇvI`b0ο0~oI`b#FOf_$0!i rS/wvҍ%Eb/Ec|U9F-)L)ŘF`U:VK jeFrԋ7EDYpԽ.D\dNyj荊EEg]bÔF˩ք%EGƶ*NX)Hc(<|q@Oޯr^3>Uf1w;mCja:-1_k٘%VbZ˙#G6 `q+MPU~l!.We$9; -.D087?1a@P5B,c}jcGȱ WW/ @a#LA4.ٹ^XڋXٝ:^Izq. ٽƎDn6ٹBc5Lt;3#i3RAٽ9| cbpcTfp> 6L/_x 'ۙz7~w~)'qU9GDT! 6]c_:VlnEUdn6UˇKU;V`JUݵޙEO[)ܶCy*8¢/[cչjx&? ՃJȚ9!j[~[' "ssTV2i sLq>z@JM->=@NỲ\쀜*/) ̞r21.y? bO]3?C!yw3ޯL_Su>o>&lrw&i"< :]_<<7U_~z5є/rfn͝MLmc 6&)e+n7cyy{_~궼07R7wPuqpqo{ߟ+[w_u3ܸ'AqC_oB㖟E-? k[~;vmcoW]"U;gm>?Z֒Z6`!2XY]-Zcp˿˘ɲ}MV7yeIC~ W R 8/ZnRfH1_G9(ΟSYpŘ-ŦΣ8N,౬}xAX4xM"5XITd E$ZkNb۩r`fC`kQU``%NĀVecK[ld-'Ó5hjsa*MDpa.% qZBh𒄓(#~ |ؐ3$ "6meYO>Y?> (<2y. ">8YAC| w&5fɹ(ȊVã50z)la.~LlQx[b&Pĥx BjIKn"@+z'}ũrDks^F\`%Di5~cZ*sXLqQ$q6v+jRcepO}[ s\VF5vROq%mX-RÈlб 6jf/AfN vRPػ.6<'"6dv .z{I>|&ׇ4ĂO4 [P{]"}r1殲)ߚA 2J1SGpw>ٕQѱ vb;pV ^WO+į1tq61W vzZ U'=҅}rZ:T#\_:ď);KX!LHuQ (6c94Ce|u$4a?"1] `Wa+m𢛲`Rs _I@U8jxɕͽf3[Pg%,IR Ř`QbmүcH&CLlv7`ySc4ΔV`nI+ƳC6;җ2ct"*5S}t)eNqǪP@o`co ˎ<عLۀG\ 7۶+q|YRiĹ zm/bcK3;=,7}RqT vvFI O0]&5uKMf#pDTk6yi*cem:y0W|1u CWL;oG^\ X5.aRߦ[_Vs? Ž^A12JQ̛XL:OEUپOY>WK-uP0\8"M: /P4Qz~j3 .-8NJ|!N9/|a|>lX9T ҇t~T1=UF"t; 8-1I|2L+)WȱL˿ˍ-038D*0-)ZyT13`tTnm. "1LӰW&jDkM (C>ϭQ3{ߤ%EN;?P%ٱm -{2k 8Vbv"w?W_(8ݝYvWM "*S'l倩n'OcoJrKmqveJ+'/:㧮䀕e7ːزoW|A\Qu&'9~ l|`pΕ [Q }B~ٹF&4D&u.Im9O,HCԢ[b C-lLG+@_>̎6Oj~ebIapul9| 3QtUqSCxTD7U9/̍0j+eAl ׁG- z w6%}N^ \mQ!%8j0dUo=rh>*YȴU3Q,̸*Eߧ%&UKjsT*?]ecD-~VA/.Y}r&"B:p`\E)j<).R&#ÃecE,dp"nPS 44 Q8ZƈKnnJei+^z '3JDbSK;*uБ:hF ѹ @˿ޗ~7g9| É R$|L ]OfJl˪VVg:lDԒ͢Zu[kWۗw{{7st08`J0ꨴU1|z:9dX)z2!S:'q9| 76"Q;D*04Zٚ ?V¼r8/G:T6Fw/ɚ~h?lUc3MکEen壹n\殸,˛_uu.Jssu/*47U0)l?R_^Uon̝f-nnZTeuu nn/*0׷տ·sHH?Et _I`[>>a`57X_<,BLqL򱷬dS{X6"X#-^䀕#{К4i̎o +Px SPp.,?UvwmK~ %/ ISĉߪMSƒ"[=!)Yv쬬acL6Şgfgg]$#>'Xbq+~X$k2'{Zdzo]$kLbN4_+?V7Y\sƏX<*xGy9"@l= +1*VuQVVWq=.?A#t,2V`DHFm0VXu`?h=ۆ{\:E+וPUa ǣͱA.3ߏ>*1#X QƧTz?6 0͓jFxuR.ǫ~اyt=N3;qv5V D h~uphx,饩hZ`?6^j`XB^ZYpVp[7|^K+҄*PC]u1xu[+0Cf6l'fuZM@n? }g0XT5=*JUm/^/G7R/^W%0^e~?Gu 1A3]w;:~h7hc/2?#kc4dn>#?{ |~ZȭsOK8g۞#)4<`_SS9WoMP5R]O1@W\ [A@~w-V*KT&RHۮ"L|6n]n^]P5ׄxm?MifۯN 0G`5pGAMCon8c8?Fh?]$=os{B{7oOfnX4W&Bw5*"xO?- Pنs滣B2ѨBas'Ojz2 kx>Ƥ>b>MqoP#׋(щ@fA5~j9LTbZfp f0UTTzj%/P(~d3-4f뺆%L-t54 PpS ôl빚, ]OsQ+?[ę$| G4Ю $=?L4wGB-,,`̏dZ^,!xE#ȻX煸ðn DQ)8oE<"p%"pReTNrVˬ|b6]ElR-ŤK.&e.u1~拀e: 1YVU^]be9kF xUl{Qlf$D)Z`Xeh렬NvTU}[RO}. 'k4PF%Z,6mhxl`<s1w"֯ HH`PY!x &V@1#fW(]L޽elᣭQ*j^\ݵvҬگOI@oY]W|J4(3௳j >|(xEo[;|D+\FEf:|2.ڑBg!AѵÉƟU2çvIPy;۰ɱ*Lv`j,Zx7eb:?Y>8^0ؽͱͿ+%iIu躱SAˍ8Fu7[mT؇J~%+{X_8P[O'J 0e4Y_I5kS:k ˰wU5]q`OsCAuvϾ,NUTfM톋)W vKu3OEk[,"J|nɁgA4: 4@oFd_JQ,t 3dG>j2 ;>Wp^+AX*_pz(v qfOD\p^m\`sHT3+UɚiuU_r|qm%Qd.s;VUP?zp5]_/0%:Eًpw/xg Cg]>~ڇ*fE Mt’MeV^!!˸B+K*j/ eYWDH-S,mkOaHIEk6ZUu}w0 ^,JZ;jc&.R,-NwrfJwUvPuvG34ɞLR@0˲ K|kԱFi%ZY!VtBr0=O,BȢ^T=$W,WжdI 'QKmQVR9N ,]SY]K!rdvЊ@ʐ%K,*@hw۵$*" ϲzG)9t9heh#o! jLTjmWC #5SkCK^[hUu$dk9ۼ1:rGñKp}/e-Jyb E)8ήnɥZKM#QdZLJJ"`{jyQT%i{ Rs];wfOwjeW_LWeSvᡑ)JF0Sa%cT&=%{*);QkQ:f  JTfq7t4)>mMKB)u ,G.6r,0=ts=A CX/bYKkZ>Pr%uv=%+w&W (he#io\+w_lc'r",WM¥T L8d$aiR"&F1Ku{EK|C[)b d%67:Jd|ʅ ӈ \7h#ys6(=Hpsmk @C!Kv"+_[.}|dSS\eݖӥKo# ՚O5۝j=Ոm{mjɗx/^Y,wjJR_< ~?.AFhV6vь$}+fƮ:ԒݺUM>Y(e'*h=xVDy\νգlєyYʜ kbrt~i<μ0d3}0S=]7ٚ88j22ւnDGCai8}5zլ olM['N~[P-$ẋe [tnDنD/YWɖ=CW,;HK۲E]怋EMoOeCt3U2=|t=p}+䶷{.uG)fgm3i R]oۈ@4ꪇ#M,7ĢkLfaVm:H9}o3ه/8;|9 )#f5AחD,2(}+\adnw$wq/O3~Җ(sư0m=&Kklz?2þDl^ ǭw+1iXd%F5,0{!no5>vU cMãAt@$Pjxx1fBk . k UO3 $KA@ кjڭQOّhP!M<=uFwFkN}5~r}La4E(7PzBs~U"g mPg\u.}s@;oǍ0!G 2O\#XDe=uL /p;B7;"{hM7 '1O Oڳ)Cih`gX@^ޞCW3(CFQb=6JMeٟCAq8@z'"kP=Q_s$1ȡ4 sxІy+ lsBKș^ɀ~<}sBju<ɻ2oa{`]\2+:ⵞ&JqXqmڵ;ZpsCCZ79; P$Hݤ# KӉ+%ᦠfu[˗&,Ki/tmNl<7%[.wX#e+5"3a쾊W,^ Ew-mL_pݧ|:6?;iox(HSwO<@p)Lk$[hYϗsf^7nhe>}p霭#pVmR:Z%f0&ѭ7P~&!B "?7}z\G?? 0ϻv…s25"a{JX \5"諾xנC&/n~EhoN׷N뀽 X8#@+nf,OC8&Vg9OHv0 :pb] @u" / }:ۂŵB P<%!zi`0 z7..l%cbo<Ɓ#+cP Bdbh*18h/(plGb^Ap{>÷Dž0}DpT,`0mӉ?S.yG"ߍGbi  ւתO@1X!n]p ^b@rzx `I|_@(57kwnR,V#qY B_ŃNbI3nPH;r _<Pp"N6 Y<E>qg,1,OxOLt7X!ڷiW||"H&usGw j [>R7 Ki/BR3z$e&*OHj`jDPRA'Mҿ7/t;+Mҝ?VR3$uކ1N񎼪`rzhђC4v%Od|ff@a`xdrѣ#9e Lm%wy I(NXk]EDa v8Vx?ҤevC 4zw0:sF]n+&:@WS}vW=EX]>K`TLS H ƷāP^&xcɳMӼF)w@#!QoE\Ų3ާ.a"^gpmaJpo5.)bQhپƍzs(.Jy&#8a AܐC) VF"PEr`U/CA0LpEj(_{kvY_V_|AќހQ>slUkA//8rP$$$ت(ۈ1&w]7~1Ki8K uD`LkFrL`'fР+Fĝ O0mum 6.~ I WUt(QUƮDZbBk ^&@UO>|89-t:a?NN#;t.x9 n0ura} Bժ  ~ق_#wSȕO%tظP;t>.v";_IMzM1pEWdW$_b{5b6jU-_6Z/#vAC:FU[*;9֫ * *]X}LU>} |kj5Y@"r~X@j4>0T:_Y'1y>%궀mR^Kj8/_n{@IY~fm;R097h8o>9NCx|~[ܿ~}yݠkT| OݵSu 0nK݃Ww9mު ,̣,΋39dNFIDΕ @g-iQ_t }'q[bHOvDux(JP;;MGa"Aj<\Sja]SX} d>5K~ n-U3Zk Zkl_Rє_ K)ielmqдv46w QІl6[z lDpKcJ͈U3kSbMmX;E)Or 56)r\6Z'z߄s%8>;< ejR7Ebp³*lVL $&U('_p1Zq]IaeuEXLRUQX*ec(ReeŪ,,>FPZ%8VvjJJh5p{ﬗf[K3x"HVo-=gOK[K0~0w^\5Ga0Q hC]OkO7g#藪b< ; `(lDnallc XWwTiEOc '$$`gH/wۣR(V~YG\>؇Yжẘ}KA j=Y~/8AKoU5lPlBe$C.e){D'*{l~ճڧ;w=ʟF*$|BuB='~# T* TOx⑄==*F|$rBuB=u'y# u ]'݃PwBݧ>PwBuB='{# _'߃PB?P +ԦsGГ?ϲQXE-8MN$b]L|=]ڙcYc\+:w&vL@Y20itSL4Ar-Fw9`b@I|L 19>0%z pA^!<-K:[~Q[wD0o)g>%8 rjn\8K*D,M8X.҉)0 apg*j OqOHb< |,^#72Pv~u^UQJa= `$ҿjT?G3F[Q&'o-hP[/H6#dC?Wc*;Ϸ pZ=̨tH*[C}0(&i #Aݼ>?QZ]9+[H H?_VR_-uV]DcSڄ*b>zB H0OOd.'eSXY̝neECw%N?@w_{֫8ˣC yg7kdr9,͗t %dᤏS0*-MAL}WПY\el;KMLa]jPKY򼆻$E*_RQe\?614\(i$О&%$Lίi4֚'HI<1F@c!(}Lx2o͆O= f}cC!Hfw*JQ'g@̕Z gUCCO=U9 '҃V4)'fɣ,>MtdwjqZ0* bj{,,g 9׹Ex/^sgT 'sMF\̎EצvS0d4VpȆNbmr.MWQM64^rq9')z .bfd2hoH8.?0Q[9聯 н12%/٤}֌-nT\ڍJ隱r@@_͗V P4L$,R:-6Y` 64^e0cł|`Nctc4|32H~5aj.е`hz7Ŋ)O 1kPWWԧj *~Qf cx}8㯒x&eHZ:v^fG{X%Վ4 c #x5˓f='yLAtŠpo~7@R]^K5p};4)8vF2=?_Uà-YŽ͛m]b=rAON0tϱ@yg%:[T VM}p:ڪӕFf5K6@ B 7Lq^,וjD _OZ _̗۵>O~΢(S"ᣌ&,Q#$$tRQ+$Sl5^qSn:FG*moUz$nK_02:m|3HvϓyfH]eŢ4o#H ۀ1ٸ0d Hh/xETfp&c 3C8ƑFPc,:{΍ q✟R 56VA[dTOhEy4;JS \c$@inRN]vY>@pжn[a]PJjhdQΗ#g(ض -uffq:SA/V[*;!Vŝd64VTyJh:M$nL#aD)P(6 oG Ld{|Dȶ: cm Uw*I;'/. h:,tn ,(F^PB)e}`k,WSGտ5p,,'Y4Y\5l=OφKl}`׸W>0}a+{ J.B靫Z(a[=[S=Lޥ|fpdu)&(]_*9B//[ؑkvX<Ӥzj G1+O%()uF2fF}rӜ1Y~t24%ǛR@עZ/v:uHܱSsTz%+ "lPxp'[ּ>'u!#ɆoG;36Ō/LP{>|1&tWGjUnC u'Bf㋛,0 zVPjkerʹ̤}6^ދ^m,m"!_ 9ts:J3iw7?to0g=b;n-zד G6LUX>e:ϖ&FYL':ʧ;jv~ gKܡ)zW_Ypq7.^KgjϮ:\{L_mMI.Dۏrc%ܧr5pup\SzQnkv|U2OkNU "mm2ΜL<E| l-^UF)%Ńƥ:z7AQmIS4k[nѯ=}>\;LnU%ZĐdMQ?b&AA$+h-~yOquRo}B"جU_P83㞷Xa=x4wA_ɹ 򷛟^m5.NXk衦cjZ/84Fx,B]mڟq/f i{7(w#uI 1o!LezՊz^CفQ&B6BB*|sk E[Îa@{(/.ڹ.CM4b^e;8q^<a1@)2e]Ff HUkTIV)jHíR,wg`'s-k& 8 ٻ)[ؽ7׾P$5qu%q c@Pd1]w 3DC~ ^$xmt`[;jQ^ïiIh{{m#Ƴצ"#hjY6j5}RPـ _eR$fvR$i^ Qű;rj`MDg+#K_\ҙf')NzIíp\‘9[—Fk *tB AARHM"9 ,8ƹ&;9WCi c=%:~b\1}: :k-xtV]Uw2p2^u'Ԙbum){=|Cr_(`oH\`3B fR` d,Ta]4YUiGH5GMf_סˢuj ͅP$]hUE2'LdJUDj N×3G?L fv~n;O^uktpa!V I(j-!E<>=x GDVWUDDQ'&wYy=NieуS()ROQi5EY`y~a1\:tYץz1Z}Uã]$U J^Uo{4r`)lvWګ1"#?ll$"jNDHsr.[ρvp< Dn4fmE4BC kv`]iKɉP {\9x|6j}UQ"D}8:99Ajh߾ &$LPzf#I :*^q/=դfJS$s+\E֜SгG1T X5HG&,nO0ʣLN>Əm'}L沩 Ү A̱SG57xQI:2t@r4]J|)EV7/'R5:erRzGQ31gA(b`X9Cj&LbVhQ!z%[$K ):U8OSӨVB`0.lMFgRv=*qz Ǘ 8 &p^[χ:wj> -~ՙUZ35'Lz u RI)]elo.jhb4tg+$Ip|b%1όq$kΒD7YrGRZ WSo̒IPVWi>n~0 hl%Ţ 3C㚣Tk_pѴCPjD rKtcu8;{x.vo)cy.~?shP.VXAs >\ Ap*NGb W>UNAɁ`G9 bVRԠ{(D*&AS4zL2?GnBnn2haAM"5uNI;lmg:mV[=ܔNOc A%TQ0Hvںznޥr a9V>3{䉛曓1fY_ !Z n]Yk$FS#yj.pUӮ'Iy1yTҥx;w=Ec 0rissFH(b^E`߰9 &{Kg3֔wv^_Yһ6v_v2?x!^@b=Fl$Q$7@6_WO+YZ_N̨U?<=m8pK5SI6,$i;.ce3lllTlKГzR+>8KEnRPVE߮P LQfoNWvZv+n.L1஋,F9 qɓlMƬAME 4Hb~F=c*JEkoT= @Zd$q6gͥGKbUF|޻GM:+>V1K0~*(6VɗąZ n.|Ph?vmrzAn S[]a/ Z۹OJGElh Ed)/E;,a&Y~KrBnwVH+|t;B=G5dKe)=:u۶ŏbU(-(&qwuuq6tR?3#ZMރU%༯` P*kZ@đ" G:f-7g9`1PwW&Y>:%r O_jԸ8#qs7.,ԥUb.94 FC%@9çHk'Hz۴cJO1ί Cbֶ>n9p߯X ..NWRw{dvKoJ'9CQ~2C_|+fU1L?ǔ,`9e?gIgy[^: q2r :G :GRQBm}1esa|z5qL2*l_z0?n졖Q]Pb2zvͷfLBC]*J# }Bem:B(W^VMБ@-O_3Gd~f" n~([ ?rxZ_S.?>hn;3ٱ\~R8Vjl;Hkh _wY`4sU'#Rݡg9cgƐ8yR7&n ȭӁM˫ZPQfP:Njc0v\iE11gq4EYmʁExiayz`?*3;3lt4e_c.FewOϏۻCZd rfKDN:%P78B*"]?<= &\՞ǐXg{6 Of|L s=J3H#>TA$HQI s|ڡ2(4xll+=&)ۤw%hG5Mǔ,ybDMtT=fx*-ЬV`F>ҡяP$;łH9v|+O沜| za )pya]dA#%@q<1Dע*A9_/Gu'x:d¢K+Byw !mGc** YꄒB_z vP=0SUrtEGY1=𰹽cq2]Eշ[*j K/9],araFɇz7O^sG3ĈNN$8>Htw?ve^?>?{WƑ$3TA@˛6cq0:^S$I nRl2I5I%,Q^^Uu^Ʋ>Swl,oX\ezXqu:QӶB1SJ ߼nu:[Z>|˲]١g|`6vĴJMyFMwؤ%$lq-}7ؕ)wۃ9hhhnRA߽~x?-gUne^^6N1`n+/:( zv•w-'Qz}ba! DI r{qDČFޜ:Cbj-2[`H{IY n6btoj̱;eos_\yjΤWݐ QX(D0+9jnfZ~29u وy'Fz݋+\h[[let/AsFy.Z4eU(xLnt5STKc /Xbz,"*UcΫr+ R^u)p ]I>'Hm{ֺPD;HԢ-0i<XV%ejC<|F7$7 SeU@~WANlۥy%53cl0= ^–ֿz]e~zJ~ܘ~|rG'?):}oP5<=DShp\a}j抝Cyi!wOh%si;ui0ɻBI"u13>7]3vh4vNOc` PzldeG0Fi'? 6?tlt w[V'q0N$T@\{ h]|/|ު" DCĩe~>-{lYF7k߂{ lW֣.|+뉋6;z~ʽ}l5@ U#VUyUe5/CXLro\v((T٠Rexa? L5ݫt SEW;ggɤ"7\s  De{(;fHɝ)Гx47 _+(D F29snjI/nM9-;PO;qƁ\U!URdNG ՄwD-WxWܻҳ+GJӾ+^9WSU4[+E 1\ezWXOLDE?Qh;ծFRcYu)6 #2˺e.oxVdaT/8-@}4\*?W?=K'ȝ>p}C3!B~Ѝ'< oo;] #x:c>s5n3 Tlg"-H~Wjz3vi^?a4WW}6ݳCFՋ\Ju*J&&{HXad?Fv0-/K2FTV0.1HFYŴhIJ?RM41+V^-(̢,ҫ8Vw,ܽW?û*klf>m[ɷze;3Vَ%D* 3ꐳHs⁕VjҞo—+-8}k36sȩ ljݲfSy 8 0K5"De d_ ',Y,9ԼVd9z&QgsKH *dp,LZ¥u?̥}6#wO'w;̬%GqǛs sFJGvNyUNat{B[^pd]+9rtE%]nL(̓RT[)Lq8RµgMN2a]++ٰڈǬel ?bņu0Q[H/#XcO?=>1úۿ1-1纬1I ;nVn'K-?#O^ Ȥ[ӍP-{JI :h8R-G^W-ܮ[%f+# *|u{L4V}ywٗyMo/3Pv^^yc⒯_+IVNu̫fv_஬~R_vT/{s _9X5 J-n />{[ݍx7z+V,KVh pEp); %M5dN ȮWEAzpV9'0GG<X1P=I+8)G=N}mXepiE5ǿb\qՠ[`MUcvol`CޢRP!܀JO`(k gj9+~:٫߯wAIa *p'nft)&&/5hNh!Q &<)Ŵb)A!Gi)/SKρQqxy<8n5"Qgy}\/%we>u@e|_qM0,T F89,NqTp2$ <5xyXvTKvd\,JTΰVO-&onC<ς{ E "ǝVT) #R( Al,u;%u;d`M<XW^ 1#!P.44 FV2 -yR90acɨQ-ɨvA2(ƕ1Z10 z6d*~Wv#Oَ\ĩ16"LYfv6D"\a 4݅^,;C& v xSɻB T=m `%O@`f;ITC3,2u}Ip##..B_;pf oL}%LF_h5Bn`Dzf/K'ju|'58g3J-qJ>*-eP 8߂ 'Wcuz WxHX-# ⹙=̮uVW(ŪX'QŜ%_n| }7/"`7ͅ{x:@ݨ@Qzb?/A&k`1#Ud!V=4BUF`^eI6L"H/D#|I>Ν7 ƸM(|Vh|[sQٱel4?o/~wJX &B_/wf/Ǒ\X-I!uJTN1ĢJ :&YiQ`D|Ecu׳?{Jz[35׹[1Otx\؟upRAAb,7`E2U(:]mqsjx@$pqYR^K@1Z;䳶@o uA%z0&A>Jk)ѓ HՠLkLz/+HS{@VQҠV ]]t2t'źKlA#sc@3,FA5B0&[ɝ0HR vZ/;ٝ=ܦeEoWQ?(j(KEYz@*!ޡPMdB aƄDr1xڲt~\+Rd\i0#CR'b|/MERޤ"0wv%Fi.&I~)W\W7kqh*QDm'ic`!C zJ%9ysHJğq@ XbA{8 8*<&5kiHJMJr!NZ7)EH`$\#ȑSsH$Hy*DDs!GiS6!:*ZUX̃j(eQ@.QkFK1\ZmU 6i6@ q*u/0ƧJ3H q 3Ō Lzz,`)vp9Ui&? 1Fs烠 {42 cMaC2`L8H,(F^L5>B -RD~N%bUPd!\cf3]{F*Lwx ۋ.6>gtֈ(3 ݷIIF!Ei(cUuu5j9|f0?r4+;(Eb4rƙGv]N:a kv qƆRW~K@ A7vj^kT48 I92g@UFOྞfs6 ;Oیܮ fjy6Ju3Ib?res[ZH[{>gƌ@8i@;aQ~Kqk0IjD]d6#WsV l$N0/μrR"h~[Xyfd>J-QezѤΘ1*>x9t̋q`د7CEt߽ri{$_Ϡ8/Q:YY|}9bP޹)17}a:9u.x-L4.F(i} /yqzuZF554bJrʍ"8Sqz6|HQRkj/-%jC2M ]ߘ\e֤r+iǀh LOnл]jV B>]iA$c݄54$TuQhFtQS+LLצ Ffg$wQBK;=1׌Z.DŵprFFZ1Y\TJ%wCgU; ^c[Vw\9oo=yyz 'xIϠ'O8:Ǜkӕ}7{0D.={\>ZwSsx_wob۾wY^w)Ԕ >7hp Q9&2xRTbjrCHyQ( /^M/ff^yQ0N0A6`R/y5_zZqQY}Iq3f}j&_&pe +UX>^}xߛ nK ݊Đ?2\ؓ0Aj4؝K֔a[)Ş罺X~UltpR (Snk+Q5$>a҄L)nI30iD"[ä\fh^=S{Cs-&rH=˿},QZ #'ClAWn' ڱ;Vǟ/ _5,Udpx)l|aXB`4oP4K~tuV]]TQ GyA ՜Vf4Χ2TnnwWgM.䌮 9K$dS~$;R~LFrdٗ~Aj\v Àj9r&zx5W{zk#~-es0HhֶkMVbռBnv:`;|vs6 &;PMl:`H S/nr@c4q&*g? L;g Ä%$o3(b;~W=/#r:=y-FxI߃]CV eV#0%h#8@h:6:99`eg?k[4D^=TM gٗd\{wk;;=JTȌW,GP҇c4SnQxU+QGiQ +GZFf*%Éܪ-#eQ0;([&,9@S pV҇0gn ;X!tۛ WVʦMv2Jf Ef-VK9Qmg]xx@{cT͑S=17f= kKtaCFcDz7]mG4!}|b#.Ƿ#yF2,?vGvO]730-yK%`@.4U3j5rA>HHO8z I k1HM,'VgV`F JZI# U+& ?]UMt 6\94"Xˮ?4#tcԄZI8e}\+~*17xt܅B4:#]W1S5NFs){j,Ɂ[&;;ګŇ"}^bˠݒ?˲QQ8ņmSLʵQl-`y6bay0Rd/E A_ḑ )Y@lB$sY Z-yq<cn5ńkk}XgTR~UKg]t4#UxC1ᓱ` @#IFNApLE`\t ~~D >ՠ4P+]91x`,i;P1hqLVj"_x` FPmbP[}.P6F3t/y{UE&v?o׭XscWÀ5a-e՝ %ќ#3^sL7 1`Ud5c/Z5 R#4U~m`0Û!y6WIŰt~3G_c:]0ɽ, {6{rՂ >ւVѣ9e"ڋϫT?H%7|._j5'IYIte^1XL&1G~maF$XQAk'?98-LHc`|4Gu<Q3C"{-!kX@tsRiЉ!+؅XU~o%VU/n VZ>4c]IN.mjA,xeG5$Ap*Wz6W7gb߾Cr`7F8ȝE+u_$lIjwE??A ҿ!]\:盧7f)Y󋗤I݀$̽LiJ+*ৠhf#] fqQ O*!=u91 Y.Aq"*~|^x_Y62~=հ^ipYǴ>8}ͦ_OZ@+X\|4tmKy=K~^l,.#H FBZ:Vx=@EJ(!D$t2J:ŖE[?u[w >sScφ#njm8fv.l{9N/z7Nz8y6kۡ3Ԣ?`"gOso/Z9Uք~g~|&+mY L?r咾:Z^zKt!C盋d {C?(W)x= Q'wNf"c+#&Oy鰂Woߔny-j 8=̒TD S!XK bj) [q,7Ue j Uʒ?y)9/|ĩ` 8bBVisi=0VZ]X,Ǽ-W02~BE 0Pda$Xӆ}%SjW`RXHeU*;vբV-yTf̏^4[q\t?Xh Zmyv,oCJ>^.״g>덪,66#M)noUyfy.œ6' Pm{J)-&٠߂ŗg&)@T6jz%/:EF2QaͻNŢOW|iX?pO?k@oc| ?LL|v=ڡQ>Nq`0q/7eQ*%|ϋȨAϛyv ors@NzR2qВ;7Eb$j~W>M$cw_qAKֿr|;ON}17g_GBe%Vmgy!`=WF|}-Ճ]y|%Kpߟ*^VaH/;,w 7VZ1]MGi>KW_B]0O~_SWYUJDb6_o g秫#0@3~1 `T8U,ZQ0Wk\꟮V|u0ֲr?t`}NZcmik++juaZ w}[0m_?h;\.jCl StG9ѯ`(xnܣ2oOB. Y⳺⳺⳺.>jW>jgPikV'ؾ Ķq,|:rk$^ೋGj,::3Ԏx"Igq=x*}$H(|f'pq. I*efMUHACˈcL+Mx.%d38;h{Б+FyfPiؑjaq#T/ZjwYzhMm>d5NZX߾o<֑vA#R;Mgi͸ H鞯qN%Zlkbk FbVBYm =1c;,Ɓdh^APMZOCoùg)fL8,l2>cZ}ؐM+@Ry " O)I9AR6Tֵ"}DZ@U(LPZ$"h 3OgA§ Z N֌qojr~hqcl. oI憑:4T\4:㥯=h3{6cFw،fFtJ$-fٙ !JyAd X,g%{>cw GDQpW4H$#z%eȎF ptXX_lM^ t,%@-jOUUWA?H`G2@Vm >8(k 0"S5QcC|Tx^ m0. R`t``zƀ㤇Fk\#ptkX)5CY' 0RHf5#A2J,s"0Kťf\85ӱPE uW**RҚe|{]Iyň5s+I.߳3jXV;3q _$QҤg$# N~=L5`uT j84MA "f`N;q1a.|,s' ԟ<ʯ\pX @xoIh]9C]} ]MBM=ҒaܾX.zTWTc$ZJQSI%9ƻ.fh8ܕT]Tl2wK I>wŌ/7!P’8OS9ÉW5ACGΰvbt׎@eE9 |OMp+.IP2͒v\t%ʉ3|$N*v SE)S6նs2D\լј4 X=ljAԀTD 랊(2Pk=LW 斔} ~4[ <^Azh G]Ae e KJɷeįP)*\EI\p(*kBܖFW+@eӞ|MhK>.&0[nkB`IQCp^&c+J֕jrL>j$"#Rh=geiKBݜZ4$S֤F8>fBwJc7*mi_o#j*+~z)cN!\:-%U[]Yq7_?m8G4~ߛ \?mu9N߉$zSt;abC@rTO-Q`zuk  kk3{Ʌ7b]4jTswRp.-׵o^x?6@ļ\*ow4ڴ n |,cjޝ~2o+#Eddj?O@/ag咻|n;B龜J=DӸ!"Uƻs%Dzvw62R V(s\vr>]۵vCইJN^fճʯAF,Wv#8*b*!+1\U~r 9=4Jv{n=7?Iv;Ftjan\^ c&SH*b`Mڂ!Aa{Վ'G8۰bX7,UlT_):w֖zyB \ja3]_hmxP3>ݿ A.Bo7^!^]_,яf/ xad")xKЬŨ \f܈SAZ$$U&;n@Ty:IXϷΧp)Ν)$ ѱ\%Wls=N Ԧy~K,%RgNp2 ҐAp,C,cpVJGHo1>FF N <{U,h_[SnUKH:ۀQL{s*' ?ׯ+}zBwf8!HA=+pRI[qrD@ @e7h|1&[þG!L;+SݣA0C>@Ԁ V|Jy9A Ci67MW&RzGkB 5G&jDIw[}xAcmqGv-iJnYZ/L#Et:=_Nfi%(!YUN_-p|zgGPik K*58{jV?RUXK8ו]1l\GiF=$:&.I0쩂oKG`WS\Bu.OGg:/.GTXU poM=6Db\63swhl644e3[L >ӼV+2y-8~)*Y/'k˹j}L>e [+꒓Zi"H$zLB!)l̠n. |,S'T*EDG[p3e nVE= 3_H ?T=Lta(2 |,'|b=^* p2:5yoY&pBc}%MWxiQښ:™%X)IV&XO=̐Hn.esݏs>@>@er4ǘ!g @sHeP)zh:Nf_~L߱D8 u$&'kƸ759B1[Xj3mʶ^W.+N8:-fyyvbMkT"S4NlzECQaG7١6G2I3a\˦#)A6bH́a*|S$^/<*<]F8R<'>4%<0A*MZtoGu >WߨR=o6kbc%qcіI˗CP}Ge 0cYx΃;M+m% -|z6gC* + xZ("Mscj u5K#,b\v>NG_c\. X|ѶaZX(N( Q|-QnT_ Ygo4SꚄ-S)c:o^!pt/PcY]-DQs=4 NsSur"v ~ȗb_(ECd-M>ox85J ŚM$Q6%v4Z#ptYr& z??4Q+i`1i)|y~~b?ɧ_oCLTKaQx"4M\R v WಓT*OI^87I+igOCDJMQ/&iF,x~882=0I3@c},Y ?:`cA f]pnbfjcķjcxW,ّ6_[TKΥA!uΈgDLJ.9yJ^F#֨hxW[11C))$E!wb>P]+i'`BW|.6J>C(O7Vc6fHmƤlfbtNu7:և4@ݧن%/!t6=ʋ FGL. ΍0CƤ+Gp^a9g%!uKv5 5Y*h-KV"S5tєƞ$ LSH`@c MWs9x?y-WszΎSȆ:)&XFaA'&ba"l.[n,8WIld>{GbޑM"xEu22Vob$z z|cAQРC$L©=EMqc_ԉXZԉJgcuѦ&A<t99e ɍz܀c/kT%7 #e&5D.x eR&(>0OMsڟ,cS B1cWgsgbP1a胋uU¦?WAx1YEA֭.ڷ85beHrE;әE2a8n/R!Ƿ-^adė ,\ĉ}34+T(TW,]鱟ߔx>5dn,ܙf^.KtY7h}ėզ (i +f8thcz5an<!l9W.DPg2e-- iOd#uM-p 7 H.3 uOV +9$)R/%D3J"X[h?a{>Oc,T5U$^7+8B=O͌[w\ޙELԬG˶&5kV̉[ةx7/eM>sfnMHEAhJH^ r-'j!XY !rgtM\iau/ӷ q#=9/[Yۘv' M:WH[ 6EgbexLt }uo-Fj93keA0\Q${LBpPjIz֊QgC(t&ٺswγ۠hPݢ5pǸLfuR e4] 8o=-a;j JWٕܵJ6Ԁc=FKIX,f^3OQ 8Liwi.y}f2:.,aeΨ 8;HusI.B Y GX8)[M]|s>U #9͇UjC%Aᙱա?= `RWЄz-&yv@skIWle ,!JbGW[r!؂>gC^NT Rd`Y5˧q"Hbyʅb\M |_bVlGڻr ;={%(:ㄖ_1Tqk<_Y7 Kbaw+d*_+,¬cQ=N||c_mv|e >Fe;>'EyC V Q=/J5z/.kQ 8z&Nv/Ee Jy٦G݉3R-j@05pLeNk }ՀgZ$lY_pܣfsQ97pYi] y`BҠtcX4`SMt-`gLrq󱒋:y;Q:g*RV+y&I ]C[ ؀g;-SxG;Armx_[0 Bgg!B\|&JPX*mMX(@`a͞iԳC)ӍaPYk^c⹈58U;\Ky M!'R?7go)te3Gs#yų_bwdu/{X./#?^Vr5uhiܝ2r=~0{,vxb/?<=֋i]U.Cl" ]]Nt*c $*FPpV2v>Qt t7л/!Ƌ*6tucWSG%3WBHBSZ(oƬg }/[theUOvA}}01ydB> x{% nݤgHD */hͷroU0Vm͗yB/;Ӯvy)R OԾ?#'xiYQ.-恎,Dgf5tr3~fQ(>"!s@aI*/{u)9Gc9#8%MC9׸vuA!Co9( xզҘ-qK@#|{G7x5顕2l/I 4Q?\%&>.琍CoDDoF=0 exVae|:@$>P_ߛ[b TQL0@.af0L yJbim4G7]jxGe3T _ _aHgtjcٮ~0JySmw8wF֞%QJ8NRJ8/m Z:#YHyo)ZSnw,c5f5eN3&tEל Dno!L@ )P~Ff&'Draq1[G05ۯx70W:42MzF\ ]^F<(X(K:r㻛m^F?͚HoX2$%!LC%h*{{/O@< &-d$̄v^} s, '^} f&$)gzrmg0޸u~z~$<'x%&8 p j!).SR//8T((PBIۘ`rEJ +aǝz& SU4k'Q"jX.o䘖re F0!`)HA½$?4a%AS.JGJUX/_͘R\o[RR2|uI3ڝd:x@ΎЦDxa%3Q4^ h={Vz2R*2IR֑g~9 tv{P\zf2yR]xe엪9S8r "Yĭ)Fr4 Dr]s6XT3\~qWqͧl,+2L⹍,3'oTz:;u/?$[ݱ>5َO]E~A/\yxπ٬S[9 % lvP|aӽV{1B.̷ۄV vU"%AťqxMG#A. Vm|TV蹓(a(8Rh>4q-9{L=:ToJحÿiRKobNYTvǐ7\q$^RRqڥ!n{PC R`(铖2R1O>im*㭧9ue-7Rl>GV\~d]= Es ϰG7ȺBr3V4  ޖ>$rvjni]kO㿤G"PcF2(ԈIN; V*mc9KG# WcBJAћYY&Sk -5T6腢g*HbCGB0=S̱t_p>B%` \]1 K$X`LzQ02$89L^F(@Q4ha+ o8HT_ kԮհ.0/n&,V8 F{( $;H_X4vHz V]v8 gSP v99 (.B_"*oܾ1/ SH)wAE4V@SLЬjRQV:X?]~jt?渚Mj۟aÇ0#t0 ǃM@LʖW ZsCf(ҙ*ڛ[MڹlFZRy^FB ْc-IE+Ve--+g[ ɂD\F}#aY$fG(Tbmt,Ƶ`1^HޕKS%"u.250!5F*RGe5٣Ygx?:r\I]zUֽ>(J1#VE= U&әk`뵭 (؈UǑ$ dKS"✰SQ_DIʈK?"J"48,_%pV=8^lz9ꉒ@+xcd`ppvA*zYJܪڕbLRd|KNT4\Ru66c.{܌ o{YOf8Q%()0<TRA]SA1c}~!%);IH ` ffH XbeRHfՎh>wċ~:^ c){]uLuQoX'wY7[3z8ʀ=8ʠ)Oxwdol;ާşg&c Q9N0Ĕ1G` GX^!NoITGR>*A٥>EE[Фv[֕e/NDN_Y+ _䊢DA0\6krrާ5sb|Kb p*[7腭eN{zRMjK5$S?6eN]TP^*'8 !7t'Qb dhu܆HsE }p =L?ԹJ yG}! rˤGrabsN@ꚫioE힯2}L)[suaH)*spjr2ơFKj>[j+ߑx ]\E~Bju-] q>\v/ Vfh p)^pPė]5ҚጡJhVDoye@LP)wFژ;߄ox$`ʸ &?ZLEJ2sv9 F"J$!gENTsûuP!;NdoxeTG\;;`㌞ŌU:M2cЌ>zCvE朶+$ #Ƙ]]c_"/( ulnJ$+@a Foڮ$Lklz˩Ñmje6K$_J-.e@cX+G[Q:>α'6ЎT@B- e4`Gk¾ Bj̔kJ 8I0ۖ7P%̱P?)i- C3 Q2\T3;Ԧ&cZT?<>qjJeU;;+n]_:VQ~?)7o3rQrm49~X?5?j٧1X'g`eߏa@;~)z\ΰ  ߏqFqUvxfX/UaX+78ja8ax<5g $p<~^L pd܁Nf }퇢,FO`}DꞫ1~⵴iZeLx/0Jd Y !kMĒ8+1^b/xQjpD@nz1߿Y,gEX\|X,Jc/?$֟#%8?ߏF|{ZE kDžhRĉnBSG;(l51-Ń_{~86I6&?hp \AxUsYp*W=p8KSC>.j}j;Y N_NǏfƨ6&)HblżG bf9^i5|? hzymy*Fzꏪ@۟Qu؎u0)` DB03bY8򍎣̇x5 fwի/ U/0G^K?gpNg @`V VA1ދ4ex2bf!;)b o6wJ)s,R/YbXzщBIʰ_8_rTUDn'aqߟ:U=}NI;o\ Ku,R#zBϏ0!{G^X*_%{ծx^b|֫ -~٧׳uԺg%W)Y~BdTM:WR%?(}IEp'T3>Φ{ tBJ.~{NXHJ+/_9f'Ϛdz摬y~4]#̊~ѭMƱ"m=MNi^]%ǫUM,کԪȵ-NgKTTQ|6Q-ZRgI:Jjɗ K& %GZ Q),eJĦYR;RR$4yq[/PDg0eS\u |[T7Ϭyծ~)V $oe"."ѯ(dUNULr,@ ԙ v2CLjΟk<#}FH+qҫѢ{fWa MZ^z{/1}saa9JCR(_o02#%|٨ >%O*ɺw6vФu]LQ#NC:J ZS~E`7&>> qiՃ:=ONawadY0( ęRșB;9-dl&Hh9 sckZԶP=eH0V{$x^jv~.n~vԴ}!8'VjcJ/%ؖ0*|;U }£/[r^)E0y/bNJ!1 PfE%Gn:)W@Df u{hP\Bs̸a?YF@*ˀb9bD`YXӃΊ4+~[u:g3_S[wMYRRcdYpN lhm3cBˆRg-U`t>0sAq )I-C*5ДA+Þ Ǜ %+ amI3xTWF'U5sݰ=3O`8qa5zP ʁQݎAç1P3JG(*3$ZR|{3cAfʌtAiT(( A#cEJk[tgF $IOu}0S_ٻFn$W1I6I]dw` V,4j"}%56 [Y.~UG ̉)Qs;x4472rDg 9c9(F5*2tQʜt"m/uiAs(dBW7N`%0(j9x{ZsBigY4PT)C4wN\(%ь8Ηql{kL,\fCRyNX>(HYVm)Ve8Ҥ4(s0SVߗ43g·7}"W{XB0Uq(H̜BFt(Y)ɱ,\9r,rAs]G7,>w6f\s%Btϩp>ɌQ@ۇY i!(K_ wR`ĢiCc7+9K؉GD `/bEKFOvF DJF?Q3>Mn^h2do+v4a΅DG(یx4?Ǖ}P{WNPt)fv)?`J2ָ8S\Zp6Fb}]n*Cr[h$f/949AFb̙V)3P/#0SaۺKip 0FbdEB+[xrl#6m7zbD~٩rH #71h⾋p%&2K)IL&%9ɋ}FbDWZ Gr<ڕ ђ Ҡ''De:9H̜]h=jb)k ! J3h ӽL`sjLxtiR(>zD Vӥ(K@D )rCrnD)8'b.%g5x4MG+1"5לD TKp 4*]V`JWst~vO!^>ыH5iES0ǥd `VFJ %)N7vsz<Ѣ&aAA'yJ;tcƑ+`v4 >KG.Y܃O*q|Z~P!(ZFb9(W!B2zg!RB^l~=7 Aq[e7HuM$c3Az*ϿU%ĝZh&/qo;T6'J1#Ŏ6DN`ÔO\H̜*|dtEPMު]תz~GௐRq8( ңQ0.A-43G5ȋ3|'R> *kzLKPh\duߘuAuA)OϒZzH6a:sz)R*+rJЉ D'-43'­pX3No3'o;ⲷ"KP+GP*(qFbDn񴟖$KRi>O!vnB yZ< ]St}*EnA FblىlSYH̖{M-43,"%ՙFxЪ\jm9y\!ufH|oAނn9AEvzZ~kuZVg:rMi1~-EnߗNo Xc`>76э$`|uQGY _7>߯WCWs1f컑uSKv~->klj[qlξ宆 *_hٶ4`^?yt~kV|pA#U~nyx +!SSڭ̥;O fCy5C,jS!mM>X,a<[s|9J7cO`n[H@Wcg|F0W1BFoTΊ+X”P@ɘ%eEI˂Rfrl2I!)s9cR4to-wy7l,^sCBTFxrX kxd5/Wv|kNou<h7Qe7sރoMǼ/?T=?:R[bTж-$|^UxS1{Wϛ`A*ǏkQ2@4[3 ~pj@ *0WnRV[.N܌T|QS6iqqdb۪l2㑏'׳Ӧ'Yc2i1}8ijyFDj=0SfLx3~VjOYũ&̧ZT-D7¦yL}2(Xy wqyqXٱuSS^),d9;fA4_5龦e4}_wfz~ |?ᓧLa77D{t,O}Do&LֺRڬ?f6BKa݅ܛZdp>3J_j{wp*F wZW_@T+~`i^{?ajdpMfo?+0a4!ԺNGWT wM@r.~njyg\nE4F!zKd~}hO&x(ڻO7}ھO s,_p[F"ZHyVȴJWxEݯOQad8R`8`y5h)*l9iDvdx#Ka| x<㾺?]pVp҆?+ZX=3cxP,)SS!ۨOQQOiߵ7bsI#BжI]`n 瀺~]guQ\ml t۩6,taBĄGAbI%ϻ)B!]PFs6󷰏WqJ^B˴R1j+$g*+vR KLgܺ\i߂<ylb%;v/-PlT 6=$=vks]yBPrJn/|+.vD~>a=3s K]W 2gn4(h[<{Q H--gp.kT.G`$g^8S Yrp\Da!o PP?W+dֹ}}q1[}(}-[~{-)_2p.~q eY!IOc/Kߴ;p&\ASyt|zOAԛOW2J@.p!~N5Es9r3QV^uy1uܰLt`RgKΝSB[&} dO^-P–z=`mSԋ]>M 54M!C{2e\HPA=_(hؽ>={^/*Y7N؜:V nCTkmHEoY}؇lXd%A_mHdf栗dI&%"eKk<$՗bUWUfYE-2}=EMmX-w+lkNkA +/ 9j ֈbNޣ 5Q;!Ucbս5hfl Jk8P8EMjICBW:xyPh%F-v$D:м ;I ViǬQFYJa(kY-|lC+0l`[=fOP.w#S^\JTM-oe{JVwQ8*O70xzsUZ6Z4X6KtR#QPr:8&ދHINĂb#sI5^ ?S-ܧZTxGDJN̓) J3K`@vu8_XgJ`wtC(X\:$U.š|+m>)?%. E1|.iu`ru?/2jqE<ØSIZ穔geϦPHfG[|z -lq1AsOŝ`Rso&q<-J]\Ut"[v&g{JU=uCѲuFd맔{~N_?9-vLN$zF>:Oa)y&`Mg.58i$qx0-v~bє񗢱Ѥ=p[`ǿ*]ӯO ?;XX1]ú!h}~:f8>HRO+&_~ b~+GQ  QcH=6tyu}`MmCLո6fv|lDd7XK}D^d#[qCBGkHe >Рt^1E}+ʮ<,dlxbFG -͝gۭZ^;fo ֹ5mP*a-FȉhK6Xhk)NAA`vXyեʼYʛYʛc/#Vz)ڳdלott39xh,`o=iKI0Na6Dd%0l㡏^2a+͕|5p#KҰ$ < XO]sNb֋v9+gʮb=^U8Q{S#+v%7ih 5d(§Y{2 m&:_F9DŽRn&x9 Ȣ`;8eXuX+H!=J;m3ڙõt6w3s`v }tl`zqX5cD\0t2pTEǸnigM rH8+EyXgB)X3A0ɵ r B($<3@cĜE)ZI}. qsvto}})>V3t?۸Vd[w5X{$V8k)]Bb/IK{<kE`$lu?Wr$^\䳙5({[Ο,i X S2Y++5eDDL ` Xy$RDu1O6LlzXc Uh}iE}74*kc&PfTetۨQ1,%! j@C 1geق9{gˀxՒYk:܄Yl&ɹLLLLLLTi3[ N,Tnb]EDz.:"=,a[&vgCUL_JjӸr_ 5}\bASxW<%\o*]&ﭳR^>H7*l+n? ǧ7SP ca`3~1>䝭sg GEš4TtqAQW(p/%K UZN:[6~6ƺeUdǕf#fnPɾq.lIl+-2άjWrcoYB= BvO˶z×f]抑w?}%~c&T#9cB],H;z9Iy$+vv~G7'mjoW5lGьâK誋s!BE`s*\x.IWsiLFv² 5 '2~h܈kqkR p'قPY dx|7)llNb1wl^C'Z_npV7) 7 4HCVg#t߻yYv68_Ӈ&l':;U8,w##Ϻ91!.zPiW[Ogл8L%5zjHvi+$_O_4{.LGu +y Rv4H-mR0MOyv-6I{shΦSwj>a s*%YS|a3Gv75+F6nvy65Aoy-N,4&< il^7wo6bb7knlo4TЪ. yq35}zVWۦi@&^nH h[km/OrKmk/E7R.9au:6):wS[Ix!JvG,] 2c/2ȼ3D5znp7/w}L'V Cu\#-)m{v7|8rf3)4\}0x&SCaTh> b'1h~otQ]Κ}o/`Cu߸WT( @ |lGm ^Ȍ ^`A DVdR^JYbzH[\;-|F|~'ZK(Iz yI"R:" ez_e/H"88fSa1 A0 ('`! =rl4be 10BB23uS/IN Y$53GcmXӲ^zIRZ:Vnsy1-}iɽDtLL1Auqa`t#𱭠J}Uy5gKrRϓ|_-E[65?u:'wsʢ]!I6}spWL#5;׫YJDv`ލTmqI[ّEz`A,c mȈR4cK2 ,ӽv6LcB)7j<[֑aYqGkZ D hQڱ(mNg礳{;ρnn|E{:D\s0V8)ײ4`8,e qTd[Ƣc\g7Nܴx ,EgTZrHx { VCˀfu)X3A0ɵ r B($<3@cĜEiϗj^4 j9Q[5K6] &[0l~ˡZ+^ *Xe*ɶki!Q7s%&zqxWNl#+>@k eճ[q v~}*d2XZՕZnV$ҭfGӛײ)e.!H>DAkeDF딭 j⽈Ɉ8J,(F8Q=2:_u<:H蔊Hd 3âҌ B' L&w< [F":{t<~[^[X?QF :DxLspRgGJUuTKSA48h3e@PtH[ǁ\b&kXXd i~go$&W9J/w+_߇gJq?{ܶe _2H~* ՜%\zz+79aa\?0׃D,r9‡$^P .N3?F؜pWzxRL\LgqT ttko.B̛k/΃|!4O#._)/ےOƪtg}7_]^NWG)stXfe?'Fy&˵Q29_/Dd?IzrՓoacv٘+u7ȳⲒ+ 6||=-Wy՛ vjk"uՠH|k]>*`bp|x?tsp5n蜄;Zg\W뺾JPga#TLc41%, .dT*oBP(:[U 'f0wE}?C$y/iyd]ܹ V]~bT~^5}UUP5F}xze]^Sn{a>eH=04 ?%?_߽„fܗH®bq sW|_3 (VcU 3Wt!} A܏Vu-_2>l׭]~13G92 :eKR i둊I%sRD! q2F9rf"-Ú?^ ޤ2yWP Xΐ% sL+,V\(EuɌ+6+SxG-FM'bt1B_iZͬQvٛ\jնgd`o =8pnA )vI)f=P:BchPfAگ\`N=9j@Hĝa8A7 b2rW:\E)#bH:} 6i9n_r(%TrJR#XN"hDlpLeg?oWv:"odo/Zc]B;҈xOp? `X0QR|8fx6PYn\Ah\1…wV>aH1:%JT*Pe BbWԶHo[.ºpjQ 3_ɑd]; gg괹>A^0l7i"xdYR2؀S",wJ">, ;"#oE!q+4mDl ۚ [v%4sѡnE/i+m**-60`pbƓQWn?>zy2GYi&I10olkb!y.v7`f1CNX$2@JCd9RNဥaF *f@z\>%嵇ć6(" ݵ+wCoeelvʟ09bs=#^`\;2t%7sh4N~wesM6b!CC!xށ_B"1OC着O;·h|=,ׄBW+V$ ӵ~sZͤhjF~D?8sgI]ڍ)QrzoteNmoA@STJcG泛"Nop YJK9RÁ~z!3}FH>#g$j3DH?}FH>#g$d3}FH>#g$d1ʉd3DH>_;>"gd d3 DH>#P0x$-}Fψ"g$dx1Ejs"5fƌ (RcFjHu!)E3RcFjH1#5fƌԘUc>!m'1Q6BQJƂ`B$rTߞzG|v?e(pЕ琻ጕ~lg8!lσp􇔲>j^2ގ{enݼHdk wІv"r y+ѐkHLL/4aj ;cao e7.B_\{ii76a}Ӵ`C5iOG|9vYh- 3Jd-t]>aE8QlmiJwvTQR G4ٶl~aAZD6^[Ynq.;blI;鵗h6oLrMI|}sR;LƓMmbsf➛5A>#6Z#_ >[ n[N,"LCut\WXh*5&Ј>ID_OE_)O7OaQ;'{{ '$vK 8iߓrj$T~JT Yw^wBRYZMRf9B!ݘQ$:e8i MB$E'Jo˵kYR) L1( f0 ? ~v(t,n+!1&r)H9M% Nq SXMί4p)spe8&~t@Mӌ Av9Q.yut$|7:I~yUN!gx? M{0Lx&\J ?o%Wf\P^WtrۓLp,NM3/.dE}@j}foArN3΅&>N~ }RsJuz#T ÁFY3kG(*ŵy>k Mn,x\Z8pyqh[?wLZk8!ޫl|VV+564@qQi w*:u5uaz1HPxXw8V>1acà4 ?Q@<}=JYQS~A:.?o[l Pǧk HGnuos4viA~WF݃9"$Pj%EiQ*sib4txx_LՄdP^1T]j8TuUkidect]Nѩ61; UpЁ&[Յ;]SGL:NOxw%8%%flC|7.`;4 >OmNQޓ 7r۟8lhEub&ӳdCfo7}gXY=EZZ!{=)i}ъ^+r^QT˷zLġ\كuGRi~JG=&ko#mZ=8ĺƶDc[(ɧ܍$Ry٫3Q4̏h{;^cQ81rIF\|ŖExn*G( ro.z&@]ƀ;`~g"O>o|7ήtLV_ R_y':I.],/zA6%x-I%:ZʌIcUcv5n`__ !WA8d]DZem['Mu ¨?5/ (4Bx̅EHA0QIjP2L$2 QðFPVsl *W#{ /6@H3AIzi#ׄMn{2X'L*81&1k)/9X/)͐B (@QZKH(JAY2T3-|M>܏GufPȨtP!^zޡ-:?Ö2J-E3qGiYfU[SD4rcӪKiuXM\>4Y' ۜcH"VoF[f|pt~ln/JgVr"͞nO;R}v_2Ϻ-bMcϹ&Ts٫Ws9RکûQ?!wy +/F|v4tX4K)'AxqQ Ϛ%HxHc}ExɍeOi*KژwT!xF$+[jXNk~C.z@*^ZˆF<[#AnFWc#"t0L p&Oh/n'mAr{ġ a|-\ѧGJ(8z5Yd)6K?ɥ9z4d^_3iS~N!#ګ4Gb!dZ9=^;w[xznj`X Lam9#fR#YFwA[a/Z&h]Hk=_#^^kRU;. B z!U6>X,XkQ qÈfr$-zr ۴ 3c-.@BPZαCbĨB*r )q`)AOeFfx݅<[1/ AMty~u8?^d.)Ж\d8 ކU*0!zoÙ,;\ ͛ ]`Ǚ=4wf%Nn@;V.GT^:4,?ˋi?is,ĻuOk6ǚl92]EO ї1cg8-֛N Ӡ=>zIJ9!³*b$K8% T+R53@#*Fx}"is=7e$75 \|i` '-Ϧר%~"Px{sw`GŘif~1Ҟdse:~v2.%՞=|W#{jO` bO,FGZ=It;LGI fRcNQ?e!pGB>1}tNY1sØqa#`K6(l:ө0OW?r9J}l(OR5 >H^EdYZwh4.^yf0ƏVx.uzӠfOZlb,uL)'-q9kzFxpvSK:2#\`zQ|11x{1a&.`ΫIta)K FA,2ЋS#i_U2I |$yu3lVu~m?BbNz{=g1%MVqo˂E}gV^\U3jr Jk8*Oʻ?vT3inعnOu|a80~"GgZ>"6mus?a^+]هRiy02Ɉ{  + a%O '5|L|~ZY#a4}mx8) {O|Z.\\~b>]^?``OeVF<C:"޺6PcGO0`kJS`KNjU!fk8syFu<">{0VQ\xrIio#G-fQBL~oO_NI=$kOsP::#Mž@0t/;]Lw\mѽ ,Mu.nwWU[/(<+fb˯Ec6La@"b*LbjHog9VɴT/W5p^,\Tn65kY.p]Y"͛jwb+zortp=6OVə>@tm.]|Kŷ509bR)s\-s$ 'M*dQK1NGFcSum.IYEU޴Tqږ+eX_cJ˦斧]E_ƟbˮlmoJغr4ڌgu]߳_d(O} Lc!ˀ ZH|ۻmDoBrgg\Ap ? qA9r;$I CCA=P((ȍr\uV@u~ % ~;2 aL"[v`1؀~霑] RxJkʡ\`e})tgFii9IK]aLY (,NqC(!)$e{)SR#kה? L+H$5.[!HsA.F"Bx&uB"Ռxd(21i)?B GiMs_]CFsڔ L[{PFO0EHH*bD 1`@xTD≈H kgx"ZcC(ohioϰvGL5FG.0b#=~5gF`إ,r % 8C{PѢY 1QZ\q~ Ň;(b rcQ r*%QzP(L0ټt 06|z51:,6m+lOИþWZx5׏} 麝7 Es_M>fͣWpKG|LֺB~Οl?vZÃ8~_-V{Fi &}ynY "imW?E~l;||?_n@U)֖'RtCͯ+)g# r DuwR!T6F)X;1o3HhRDX &g F8[F~>j@\<b`/$`lH\U?u}uyI/w|-|:ZN.O"ګ7U꽭Y l>eu? lΖUnSsMLkD+(g,=*I O9bRZy(rBŒuKQ(4GΐQ +%ayB %UH`9 `=rK5bW=!J'M_i3=]j*]&CBApcFH!4`! \r\q-5X`w>84 QO)sZSV/Y&_Ǒ{qD&H]2$92D:RXz7*WstxֈGn_5R#,^ۊ^{-TH`c)qb00{/bkpur/Rp`=X{aw9<07Bh9h L*/<(jJ-bm&r# {K;³8Gٿ%!N !M`J8E"FX FXCf%,61 RqHPO Ӕ 52JXC:KbV󼄍홴<[Ζ6`wĔxl mDgJ8u. A0 Y12Dąĩ!Jϗvfc-)ѷ)DGK BC(1-}" Y+,\-bp)5,lPBxMiOZ+nG4)JTAoٻ޶dW`i 9ٙfdW[k`HY7JEYq՗bUWU<`5 [w額S:‹ʢ:cBb#`% 6lܖ{ZXW-į-`)j<n!& ͻbWhËm Y|6Vr QXHnLA MJJdE"eA iO[6໿y&u>Ͽ䱜׸Fk{OT` im>1M}{-4yiiADȢ]鐧I6}u=qj%eد&[au[EK)gk}ywGG:ʐHq0[._φ9j&lf% ,ӝ6,e&:] G9DŽRn\*Q( Ȣ`;X+\$J {D4v0p_h'qU1rhW'6vߢ}9WW;9a+FP8*d)SܲtDuve ؏],`3,:AbeP{دNS@&&VQR1qV{tHb:eZ+IhN&Tl;KX_?ϓayKυ7OYe 6AwP8PO`9_-W &F.2dQlY"#~Cqw>ً%z fDM4]W]g!SLUuh6B@Vӈag栣h +` ,jʜVK* {S1EzkDtOM#O1jveZ(p/5^#|m1)،9ެ(?ByG?^?d2P@bt7vM%ǻZ?ĆBRn7~kY &;&$s:܇(yMh EM"1 G(' EsdnxW$g҆Z{.gW#)S@頔9̰4R%HD0 AP$@2AJ૘Sd(`ː Ѫ:IXٱu'Tzb"ʈr[G(萶 A#ŹL נXd ҋ Ңr 뜿/I f:l%YfNDָûatzN0F@=ϾU;mUO=s? ǯzW&SסWyR|-عN={7?" Ws$I.2D0X]ߢK)Qwn"y19CU@0=P&\:$UX?]Z]E⢐Oʫ`X|ב PK/_ޭ)E+wGƜJ"u2,m`eat.Ӣ[;Jl|Һ{ Z͓[: >|nrU}qNF!El27 ՔT?uUGow#ތÐg5ěGb|H7MÐalfQ> (Q>| ]<9L6,NyzM6U#NG-ҩD# ́}6]zK[Fhzl_*S_y\{ 3X珿>4ECxTUWe\]ضŧ4nnKsﷻrpzǺF:J^A}i}_iߔz:s+qUtTflPEk3S:l8~6[vIz@*YF=?x i;&\ ʖ ]I<ٻ3D<03@H*c e2+gI19Z+$G31`GL$X8i8B/$̔_@N낷Vw/H Ę geYNض 2Si}0+E `YM &Ƹ+XcuK{nFy-%Jg 91%X)B;o݅kނjb\tCxRAQ6rw)]^"1 $*go:P1`'}GBW$Ÿt @ V`=*[$j[ *Lyy38;Sܠ,%OL6"-)<^z2jq.N:, [BjѭUP:kv"6qܛ{Gt I6IA3Ŏh+O' 4Xa ͽhAOZsara qx{=))OUcxQnPc V8Ryk݇ N'S H̓tbkx`6 !WF Ba׃u)T.g gmиI1bo[]rrY2:+:ae,]ÈvJ$ /О$G'åR^B,H [  AP0,%>$D%s>pŌS*`s7vQg MA`j QC 5^Iѳt A^9_e2 //WmN.7U$WbT@./$5i#IQF)`T,2;˜p X`ٷN?uu(CY;sgt-.p8>15l;u$ղus@V酤kt^NZTgZTLQ2ə2 !m~GI ')3 ak0 bhS"K\^:>zwgG#bUсJ%v^iKj :bY/2n@fm?6TxHk0./n\ ݎG_J_0}d[xr{W8iȯF,B0#BXYL^jʈhA #(H8H׽!tvgsy>Á]ӊuͩn)3l~|A-v 6=~m˖Nn^`"{&CD+X{)) JnHb΂wDðqdY@;+-nEʘIn04f7'R.#%$sdv sɲ$AGN(^X(6eEһj6NR&$6lOdKڶ=.th x#w&.v%xӭDf-HJ[RPL]?Mv-< ) ߉ӘG"TAleF͝QFGR :d`{̉3x4ĦJg'ˀ8^b yg(J GbK FWDBxX ea90wFWgtQSL[ >#QHYu2LwZ D佖豉D ih̀ԍ|2~(s6G|U|'a{ԯ_ԯonޘFUȖ> T햮+M{ףIg[#V&qj4$0Z0 by긖:n1i@Fjl|Ȼc6"Znd5W?U<v_Ϫ-WˣsN6֬_7.T.EKXW=%jBig~$ \c$--u5w/Y=]Zsڳ@9a[ulYzRvyQè??]- ;wr8 M:Oft`\:WQdǃlpbċo&H E.(qCvYI(J1,,VJ\y:E4\*wL[9|`"|P+ʲh) R+kn#9C/c]d#AȞx#F:IX @IԆfu & @!JD7˪ mo/O*XZHg9 ^g#BkB2N[$YB o?SI=.>Ϸ-'׹T%a}Oz~O:Pՠ,qa6P̜f;Ƀ&k(7|L+݆Xq;=Yg$aIHeӂp`L YT L!b/iK u!ɣċdg$cFP$aAq QFI{+fឺǮ2VIZ'iSҞA(Z&{O,bә{lq` Jփ-.Oѷ?\ 11Z!F7ѮIxq*I nbTBJHa^:7d H^kνqANx[)z{ ^t6e\ź+tVj(\o;@ StE']M2h Jl@՚!nC:pNz鼚]w*=$=h6fB@ʘNBBT$NdIr.<Ş8b>QJ*S.cWX)RXY|]`PLl ;)oKMHm\jm > _5yYW~[خmNg㤧yBo%xo)yz={z~maOGs 7>SXti xʬ$4wsVŅXOiBHAENCL1 @ޜ”>9Wֈ;K׿9O@ ^ľAgraApt6SByt\?\N7zcy=8(&y І Rԝ~7ŕ-IFl?~WjGU&: ށfmߙ'RIc|Z5؉|FyQ>K]T-FܪzsbE`vw0kJ惹_{_Tte斮 D`8zRnԿ֛?rz=q˺nh}7U],2[R?373Bo]&&gH(luLuL>roÑM_c1LEV_wrCx]XoP=.|PE߼7W?|GB}ϯDO`CmH"v]SV߬kymsS7״9~[)ٞRPz |1f<N<^\Y]Bf_a_r>%+.GQIo C>H=6(P_'&5+Mo􏖑N@yNR R+/$^gt|`t`FN+7霤m=wv9ڙ]L'U+kL2?Q lZtkZ/)N%Ю#!'&R"r+8I.Q1B.aP-&tj% "# x&\de eu>d8)g-Ӛ `1\Θ 1x*s F8X]vt%pm>ڍq21{e:ƱCGBGZJBϞsTmM\r=GU dEJ4R(ך|2[сq㸽wǹ74kvgo3vVt7hJ;HH 2L$XUK{UxIqjKu CGoUv8+ $S,9kR)05N-Z gXQqG^_%;[$N2A G|"{͢U#L&-˷bJГb8.Řd=_`- ZL)w..3%'ʄS}1˓<9?+J=?%eNɻ gU+^O?/Z'鋗BZ*Ӟ;'I#wc0 C۸ʜ$cqk$26$~<+pR,3[O>?Ii:g',9cLΑh*d6?кX-/KsZܝ1CxeG *~:E~=Y%#~ |:?<8&{s*e^ #SNg cAEJ 3Imk&^qmkk3p '&J)mkBGn}A3%)z k<;>Xav6CQ[G%hחՐ L[ywF&t Rz*G1pL2؂71{dya3EHe1t;\_1Y? C)|:G#6Zp: ~T%W}zlFw#4G3 T5;@@L9YǷ_ LO9u,Hə*.Be2j$FY$:`鱶6a_^塠~>\*F4{osZOZtv Bf~}+v>WYetrsrTUR*CCs5EMhVSǥYa$_̞ P|my $SSwk>VQ7+';תI($/^"Y8sN4DV@toa@Zнvp)Ѽ\ӕF:Yu.d'';X ͽSPGN^[MIe5ǹ_RVᔧ~L0s*3l 7juDhN]{v͊m]{5ܶ+֏{Cږd`DBxX9 as#}}[L̾4ؿϓzE]X/9]%>IxJ`} ~`XN oza ^$4)72x3pidYUcTԑDl6W`x4 wxIV]yNRFQwuzv mT$0)<6|>>O?:LRAiߩ,:5w[JumvUب1ՁzdQrOq +?L77ewh2qYW>0@r1Б3XrK]ڱ%3w7"'/5sWluZb.F8AZ0VC!Dj-"Ȍ;O'6R/Z ~J,cDQL[VhEJE@RA,)GhKpD4j!|ATk,& {꼐 wc!kBTz:`4r3.EX,88wY.  遻B'⫕1r 8m X׿.}LQ\qrL0K<^#Lt8dX zi6jgVH֥i4آ0+ԅq^GJ̚qRŜ75jCb}qz;jRgv%r*IJα}QNr'Qz#mQuyzb]'9j:U{kbt P##"~ӽN!*}֝g1j-@ ++#!`\0e`HNbJ gm`ơJ/V`MTXIw;W5N|VFߢpLcC,Xkꄕ2mR9 ,@u5ܯAwMy짥P5RS/[+FoEev4͔e {@x?xM2 9Q [cI"!tj[-eCԙR+ڃ쟛BI#D NLj9ԫZ+IkG*q/&U>bwASĵ,y_48tq{Epq: }-ϻ>Т|M iO1qP=7T "OOϹI2m5R@哀Jý}ot!D١KZ%uJ7ͦ1,OxCZ7cjv2>GMY~,t5įB&X`T9F$㑅H>HԔ1тFQ4`pH!o{_Oa[.BGvwP1̛O 4͢O__rLUak٠7巊ְJjf%2MH\f.h&槐!tפB\pBnhHE}L]=cX3bk`w9AwDݹ)pmw0y}m[  Ɋ#( K eX%6rWeXy*IQaj|})]vcʅ|m+WˋEcN%:LϾ7-΅Zf[| FU= lm>Yu{ŸeՕ o4}bRU$>NJTeyY< ޒ0yjۆ!h:LSPށ0p0q1Yd|`tst=#hI6W0qt5jEB?oәK-B6YZ2.9Φɿ&;*@&Q=l4.gnt/]}wߤ^_/]&zk8̃KXHк3 B͏Mh_Cxb{ MIϸ){O}`^OݼyinK}'p)nY[n\xWZ'>٠ץ6\mR%Z-`ڗK"FL\GڱAm1oFΛNhI<. ?ۆm_Pq+p1R53Xp > (0Si#Ɩ bQpANҩ 5//_{ia8@ xD F:u:ZF*c8)M^6uU:Xo2qP:F}tã]v+B] O~QnMژtJڂIh[xn~seK]y! C)Sy-=yK庿^Dkvq#p[܏̇ D~ ^\0s,n}$%U3|%gS|҄>dϣy䓧`@2rTeȞOe66iJH4JaA0qDZ(ڜ-rm|SU#!Xu [/7vPJx0[Fd6^Xdilpӄ: ]+0؁d'x Dk$q9L%sϪ`yMM۳ :ū6(7?%[F xx D yZ!=$8g#ŞMQ AjY}]Ω{ώ@ܖ>}gJuj#PA^"qrgyp>_JN6WcW(Iu[HW4|zkIe>IƘKAW A\.O3sukʇ6(ym1\Wk\'G_眳a7s|z7Np_^ rm4+`$3\{Bosg3X#FԳy_qzcvS xt1 X FZO=v~ ,ͱtˆ71A?j12E^eۚgϽ{QL] ) B>wÐ\+Ӌ-#n>W+C?;N;>xh6e=hFv#ϯ}<բ,?LdX)Q b%ͤ|ǂQy>أtTdr)pǘ<̂& % %Ok,Pw6EÊT)ӤKDIC IRr܆} Hp$2sz_ri2q\ >vȉ>~%Ko?ʠ5}ICWkᚣ~+iؕ,gޛE)󢊐9jHE*.K YdK"2n2ԃs%yzWB(lɞ#Zz8UOs؃oY_ץ2֢gݓvy4e>)m=7[a?^cEZU˾@-7|?{?VˏG1 Xݰʿ^>XK<.aCv:]n][ƶAxӝvsژBλmѶo_ b4|K?>rwqĝNZkaoJ^;'~?dȣ7ifLq=S7ZaimXͷRWս;eLA@}@}Ȫ:L$XkKFvcd߈|Ip ^J@@d'dHͣVE[u^I?\9$Cg@0}UχV?-+拔?%E<2xQ%MX)蠸gJWN0I9v86Dɒ CG)OL2>$b.ĩwɻƔ͕xH~#goLXSJkr9Be6XrN%Rd8Nc$g %q_ȥw*h9I5u5?#TR1!)7(R{FdRِS̘Xq> Ӓ0wtHX YT{'E))HeJ 3m{p9k}`I9<2O-]k U4GI=GU` BT!0x1|@ 1cUJT5%b<g*2L`6j2ӰBbsh!(1h_GfXJzXy@t+mCږLg΋b"_{y7GEի"7,BZ$}.F)82RI' (&'<|"1a1.fJa YE%d X*'D,'Fp #hBE@k+)xFaVNev HI;:D̀j@o]6ǐ?!g c4oE1S +$BrLhjEWBE(X䡾`c5aAG54'u0 S{\"0E!LL&FW LK@d* `I5 pԕ@) u%RI9nTXI5K x;X)(`~A`@HgW: hRRªܓ.ٯj_;Y &y!X!_ 䵑Q`8PRL),@B;@HTyaH Abs6V@Vn`,1WW 9$' WGaB; B G's`u*ʍ3COTW%^bD@J W@yF f\Xe@hRaABk=)x@i$A2!/}0l3 f:7.,Ѵqd  Z@2V#F@܂̠mMG3"Y ԏ/X?Vd[]Y&a`l2He9v,}\XmX֭ާ,d+cCԳlM"+piz%7>85 \IyH!3i.p /-YTD@ʀv O%TxFKAFXŎ f<`4Do+xQETAujR̨<ŒFM yN PH]LAY]cf\Xdә4H$OEVJ2A~w`DHp9 GX0a'a^g3b!t"$)*5J't#,+jxfI f}޼J)%*\խ2^p`FQa-!MJw3 abqȀ >/ wV%(A0V7TIa\XyZ򼑛Bx>}ߦMՕxU% T\TQBz$0g2fg`6*U F[(۳)~u=膃ٹ6rRD>+T@=*0$ AO 7߀`{U`}fJU:*D԰_ +@PvKxrpY9[?"5V0 `\ؒuBJ"8B&UɈTcg4b"Z@ BDLyt,[19QYzhzFH@*pIP˜$l'0bLjR۠5µ qjɑ]%aKCኯo*)A8Zu0k (RbZXae B3 _XEbiDF: |%p{NQ1tFY6Q"VBruUD8l3hR#.# !ҏ.P,9uRR,Xc"ʕ{"a!w6i{ P Gի^Er}7^XV'vxJA1sѝie~W;Tg!uL\}!Ά]߿]}ؐY_^^];Vt@\ɞLH@ʝQͷFT1q_?~t,=$ XL#@]PɛzH|kӳ kvOSyW?bMZyvU4_1f-_C2C]cBk*]}w{E;]e/tݙmu_3ctc\pA7kfkWv[YS'#o/><1Εo/g>恸\ ~`U'm Y_$S_/7֡ziE=R.Km1s/HNuD $TW%h}3K%t?{3v.#ݚ?2=<0ZOxH}癿x}և!AHɫ izoq [7:y1lF(|G/^/95[S;ԦQnO eI϶BV( e[l+mPʶBV( e[l+mPʶBV( e[l+mPʶBV( e[l+mPʶBV( e[l+mPʶBV( e[l+mn,k66b=px;1váOtȠY4ȠB*q@j5p:ZG:aE<}{&od1 Zڲ޿>?_Yq_3[$δQ׏v~?Oo Ash/{xcx9N-L׎Q}+/٤ =8YxȶZ5tCUL<8]4\ >.~3wLۼ]}>;߯qʿBl]0I&@&{v4:me#,IEI(KjW7XzU|w#W˽l|=&H'|f6:nG.VҪEzLQ5EՓx^GΕ}(Cy* mTY>a5LiGyDz}t{g3g\/兴g^H/+/A6+A=_ R0 맭*;V݇ cqᠹ))lD1:z&fQNװ6˄K 4#"(|7GE, k< ͇(On| S7)}2u3 "Ս u Q8MqSaT7MqSaT7MqSaT7MqSaT7MqSaT7MqSaT7MqSaT7MqSaT7MqSaT7MqSaT7MqSaT7Mqna\s?`0 $dI@o#ɫ^}.|Ck4РNG:3샟|Uw&IZΞ^/ zxt'î-l\nМ3fA1R2G Zyohkeu.S\s,O*בԘ?4^ NR"VJJX)+%bDR"VJJX)+%bDR"VJJX)+%bDR"VJJX)+%bDR"VJJX)+%bDR"VJJX)$b= 1^O~.ciu^ۀ`1j98Ѳ;RR7dƢS}@x{ycb!oyY 7b1g3qϖudYMe$V쵷 vd}^۷xvJ{Р&z) L+' l$g\VJZDR1r5"|}hZ8VuA͚>&|pn(s}^)ufOT`rݕ5poKtuoSWb|' krl*V 7'e^5Pgu 7RO֟@&SQ7 A\ +k% (CW:8/ i]Iޝeo!|]Y׾\~.G`hgkLk<{x0yiWՖxP M4x;o%JST:Jty'WAFBSJMδ.1!Jg c"Q߆f:]}%-+iYh%:OLZI:%%{W&aԯ(}YKgF_g/2&SJ!Ԕ٨*\@_cLlގGHEq m.PVa Ϊ!v:㴜e,t)]rJ x0W(F1W!=7D cgF#1xBPK͇p@ȚH jqE!zdX|nRYː.1p;/rĖ#ڻ ʪ_2a/^Mz0["W]]EyQh>d% Fei҄v5Wf}Ҟxp;qypyy4:goʡ;}/iYen%QG|W>2wWOcբKzތɠVVX?M$ni.fS嗎_`΂M^%<D+(3f"e(LX(^(採DOp`@mWb'M@=K6fa`@Ԋћgaeac)܎_Z[-3ہ{ɖ kg&1n;mgU&q,4Ɲ~UaC;IK24 fPr|SZTnG=9NzLޯ w,@8?HzlcunU2 fZRiPc4B1bp! -.cZ?Vs`p Ly?]loUaWGЪ Cݞ:)EyF=MGϫD-Y 15iu :Pf/oO ){')/kLp7!q\fWƣI3 w K΄s/wl6sK$]S t 3eM@A(4ל:םbj91 %6|^^bO u:LO}1*O橧hF}w6B! 2-Vib{sVuVE,WV \Ģq>d׺t6?QC3^ڴk_83wk_}ʖe3v%]{[tvbbjtU3|8&oP鿒#`V7#xHH\]F,-*ivi][4!5#GԯӟjNtqq>=_(Ǹc`O\)2zrć_ը/ħ~Y3/?\b"/?Xʼ gN ;d鿼q kiXo4l)sos>!`sفۚ(x]4lx7^m.12>^<+6ȶSPy*N圁WI>ŋjGu.^kRoSlMviÄh$-Tv|F\ [= X}qng^i "@:*gDZƜWHzmoÔ5v#' 'G&0l`$j!. WAX'.]rsTqdċ_Gr.W;md(B K 5QZtZWm!YPEg5T*:Gh y#osboQy!Mm(դ&`-(sP/un8+,ϻ4G]u{Q^ 2дkg/6pBx@0EA=peTAab[e{5֕lQm!bǙy1&|庢=൓& v dȘ8޵Kw׃C84C84_l=`] K3vNop=!TD9,fpԇ pZ4+Hr4WAq) \v}Y+z]2!+uroSɼeTEI聴QWw5pefT濅ڃ*P `3dM$g u:~l<,1 V;blN5׆r|0J:+lM)BeW?:"gaFܘYMө~©Kt =o5]qd9ș6 ܂g-X~Zړ my9ٓne{^"4+ag1 31 9%aZ CxoǛc>Y:ٸ#KiJuQS^ʎf o1ᆖ7Mdzhӷvԭ$~mH7Yr-hM˸]~[gYdžh4mG[Amם44!xӮ;'3m:UMnn8vsꆉ[w~p ^S`On[xw7~aW_ҏC`%2T׮1 C!{0%u9/rʙ(Fpp3CHy@die-h6+&\AvA"NQ_|;Kr QiO?i7A$y֟IZ_ kN(0{MHD\sCN88+ 2s=rRqC4 xqeH(bDE_?5zfnan g@8I_(rc(z3I}D#ʛ# !xt @Gͼbj@b-) |"jҎ `c{  `=k, !0-Sqt&7g ͜qrƨ}aKy! ]KMbHdG7ᖞꡲm4oz83F J"GǍ#I_L>a2ff/E-Z&1bMyɪ&JݒXɬ8`Ryɝ!WT <1MDnd1at>Z^&jC’z ̥gQ0j/%F[ t[g1X<ƔR| LY{7z.LV:ZůZO~[[Bi͍O1"Fi0}0\`S"&YJTpX4=id59%dјN(>5w:Xc1MTyocXɘ 0Y+PtqI&0Ma\}^UmmXJw+Hm{wv~84ܒic{[wX]Y޷dMZNw,pϳ߷9e :ZruKR/r;;TS.^̨za\u5M.3R/Kmtņ)zݵ$O'ՈGːc" YXqy8,"w!=J m%[NHj(^aT$޹]sOH8z2ʩp bc\q@XjvD9 c& zyٟ:q6/49|Ö/4LmXePkpVS 9/xZ8$x2 nbTBJHa^:7dXkV f2H\᭝͗ ꧩ![,(Ԯؕfm ǜ)47Ojel3$װRƄp&q"Os)$gjuNURahT;qSmѮ|/m|h4AF'h(|ȷҬg/@<@O?K}L34ik߭"j2^[R iJ8ե\piX]VեauiX]VեauiX]V%vg)$xq ٤Dq!;r!\H~A.$"9P,5\%Pt|ଁNFr=d9h/ r .=j˻ǐ)nm9!}N.=|ƶcn `fz U1K,K/G,KG,KG,JG,.,/NcZUY3FdzhָX͋.t,,{e"iefYYn%Yio+2 jLdr*Zm.DҨRe2a|qng۩IM$o)G.Щ [,gxIǀ.fx՜ٙ잩k+|D$m^M3ufmyzr dO%^F#]=fwO9t1fwcwWF"96v>qkgK#wC.} 7Xz=u wףp驫{Xsk-y8<, a<JӀp'υAwὼk y." -1 gP".NE]hvt m\ g+֐Ȝ 5O~UP`+>֯D§LѯƱ3#/?e\MB&kIYrz) J!J7@Lv̅E GUKιh:ә>q2Jp䵦UXnE^F~Kqݓ ַ y,!f!5>{çȗӔL2R3@*˻8Ex.oF15'l r^UFZ|s Z| j Z]`|ekuEL0d_hw Ȕ3JAy:H@R'2rJ8G탍pOYRbӴ=jhh_Bҩ ˜_iR]9[ϐSQ &e+JKy5s78Z/=dto0rCcQFD`Y g[!91@L'Ip I@;wo?ƿ@# ~<q6@҃7Uk1_o}mIuR;$h67bbK\Eo^7}:(_=bU#hyr,gnIm= aWMRЖ{}{Ƌ(j;<ӏ=ex[d?^Ϳgz ꩼN=Wml8ݖilKx*f,BOc^Lk.Զ"o|ifV`ߌ> 54H9{{O_V/V?Kx4?a^ H #}]A0נ(ObvI'b*A|~[qV3-N֏G.ۭ9n^ݬw>Ez&?j$_-جW7jQqj~%$vTmv|Zfm3ɞC{t\l&#̧ \t7bEL,[L1v284I=y׏W#OYQ>6n;Y@&S}7Ab$!d?$^RMhxm6\󬋄nWg","Oxy0ma֫ M>2mO -Y6t\z)8-ZZhMr[ѥg}Dw='_&!o&>ŠN:kcy{E9AݗvÆ=sI p??"f;&(;LcB3gI5E6eǍ渾a5.TZ2֙6IA_Ǣz64^6|k:̌Yxc4Ƕ pJj3E.+q~QX 9l .u}3 ){wÿ|ܩK:'QuW;{} / |9&=8q+0P-"AIc[hЄ54 M[chJڜu댫ۜq{)ٮ%Q]w]sY sZ|6{uGWwQRYU]U.bF9Ws(_d.jc;M/Տ"ݖ/\IX(8As- ܂ς6 4cK(ֆF%|;DځUEdo@'Ge@6P1!tFOTp :JSKV.uʟ=?Ë6mssU'7 f&쿴%AGT92# kqD &Jrv,*Q+{R,^bX{q5 ])g[ gShD;g4.gxUS 9,:X%  LfUnL׉`*k Y_ jMyvbx^ӕ-5v d$*9iZb0̪*Ȓ܋ґm^wF:VKTĵ\zyjV~[KK^/#__ :͛]'HhyD,)9E.(rZ.]KTNUˊQDx\ʏE\%jfqŕR ݟT]Ӵ)>_ps^ .T>U A)W)'7ap(7Mt+)y$(Q5&p-uuhG+oP~ 'P碌 W7=կ1LWQYj?@Ɯ_Y]^W[/{I"Bn=kӹ_>62^DhP S2%+hsq99=6)շQrQamVolrBB H7zla7nѥ!%Ôssakꄕi5 *:sYx2¼BԳ)Cn| H_0u.-_XhgMrڽc% /IHA3XR?p2m\Z {n=VQJc %pDE (s(*TdB:b=J*08M(rMjEw9lVJHH!mY f!! I-҉Y*M hE| 9K3gOЇ;>8m:hh4wғ.XmT-r:{m@1\(wp)BSϘ8P=x:FY^1ΨJ\zgQcL IeDøK)oC'':CtP™C|<6,r)໳q^}n4'Y㞖QWh2`i9kf]< 0aϓd5[@fz^C=A=GR|4l\r4lZqlJ_e,T.:N.:vA.3}:NN.:N.:N.:N.:N.:N.-n |xAų\\׮b[ph,cp /lPqxQ&7k C s(GIp_N낷VP0vY4q Z{@S񎹨b (Xe[I'=SK SeR=,iCG}W[nɷ>K(+u,vt̀.>l3DW*tK׷/ &1e3PԡBr k2Eʅ 3>[ʅhp/)Ź/Zڿ) 4bY*dQPΛ 5H '=CWY ɭN ]<ЇtEd\r9*R67_/]zxٔ ۺBG ޥj?Kro걔R6$XSR0kQabC1M׎e#IlJtI5WI 5׻}T{/}oҏA9¤ay@K rH# be}0`HM-a$EV H?@(_;šs9Yg]W{fK`(Ãi^jIG*t v50lmC%=Y1gI7o2YK"g[|n4W=Mq+:1MP)>>PT'ChEnv,6̬$o,-X=)43hc޺2]g eO;B::kSU\sj (1oKl//;!j@*`G>*UD!$Y]:F,\3bU ,vL왜\m76JmĨۥ,k6گD3Bz]jO$_w 2FZg>&\ ŷ}m^Hž`ق) BV(&|B!;=ғD!Y(EG0p.3La,h#Zy'] k$ab۝';~'9lgRkqa2Ilw =L;|anƗ:VѦw[u cd D<3/W}rs!U]02~uƶcrܧj@Lƫ{«!&xve=Oy^g J`_uZ}/*C)OM޼ >yoFn",E>tv3swf#?l6YTROY c2(PFX8r&xJd‡^ >(@`J!qh!p\ܶry,t9>4{l:U~S:Ut'n.fE5O;cfU NeZVSN=q==n nR/ swO_ſbxۂV+V yrb.b✢H꜂7kUS#Ё%#tC0PH$a#:$l#m ULG eo՘IDk1hFs+%e5rּd| PYkZG˘a;[U5:\S1Mu}&Ž>fWt]w=}1Fste KIbYuŵIzj7Ojtd_JjK ^Ȍ ^`A ƜBm9%rl mMe!mY, wdpbT$5裻/~ՠ0}~g4^8i DP)J )rm5@Ԯ TPۭ7hd I` %IlR!`@@!#a:0- 9ڮ7]Yo#I+B4vӎqT7aP֖,-eKS; RHA2H~mv~s.Ekjm(ZXԇ@&, VQZ)"4XJ 謍B%ǽ!ƶ}$iA=dZPG&D%L{#(|ЃTGeۮnaO;ex(58E#JHF4 IRQS(SV.8!(Q{N.Q|3硍}aQꋤI;xI;j {h1(,clU>QI$O"bjR3JMMj&cIEjѽH.$ꎈ.ruZ'Z2F(䥡^'/pN^ 8y'/G=³BN^\N^` 8y'/pCWh߂[~ oA-h߽1}Lm`'r4%IZv%IJJKGN^"| pB^@ y!/ B^` y!/ B^@ ył- wmn1S/6[MlY/^,2@ާGj+އrz՗bB4hy8)+Q$otwzf ㈈R )1bjJn" 靋6*[4TA҇*&h`PΤ&rT@ ZZvZ#aK uuKR,UPRMcokwM[ϔR)ܢq]UC] EQDӀr&F%NF ҩI>tg$)ۊN LfSu6_@gQ-yXͽVQ}[=7\kJ̿AtF{ z}qтWyּK\cIIOlصW;j!Z(4Hi2\%ah$͉N g.PU8scc I4۔1 L$x;jjsN7SR6Z#ahY =SZ9jyF7Gwzrvc-t.,ϞL'3}p;zqԝ&J <SkH9R{pJL36޷.6Ps5ң gkzmj[1(A +54rg2x`rWnr}hޔ󲐎B/!eV\ B]yuy{BuIi\ZgH5=?oH~ɴ}˛8q܁ٴm ѰG̺Z5vX?OrԬ~~y3vdZkۓ'd^>.qggh|*8| '&s*"delHU>J)xP8kn]8PKkOԘ-P=\|ԂIET܋Ȕ#fT{R):ȨT](a10S];0r]b~ 8.toabb4DzDH-ah2@ :@@2G0|UW(Uz}\6_z=ܞ&iUfcU'?|vF>|`~>mE,_sQZzZ]H^QYΑq '(̸.^ n;(ӭwDԣԶ{@0#limCe\tF5qT!ixt\yE+Zu;L~j\0ܘe}qh]{.^t32uD3'>1Ipi\0{?~v3&';\M#o=}@x2`}GNfh:)צ|q)g <+jXNNx[ }r~wvHtBD";e|&%$a9vRiDž"ޭ4zqtsu0PmƦPhe(4yXfkZKB.ݺz]*Se-FK޼{3\s^?|->nN:"ǁk)qָO:0BVʫX2UIGOOC/? {zhe~a9z:1cGVy~\~9v&|U> DD y_n0q;Kd s;8W#C99W'v8d8B%xy{7%G''J:n])?e.:BqlcqA{֊~lMWkK+Loŧvf~o7ûӃ*wu?t#H6帻Ww[T}*V!}ϓfz6᡾SL}>D\yٛ^-Zz,@V^8I#O U0[Z4VW66dDMqvm꼪_;d}fB"AEC b$g5 *m!hArsP`F`ۧmh[eNS&SJ~R>t?]~ " +]Qm$"*NYi{ \FWOڤt+亦KTD&FGN(Kh6PFS8| V'39k@ZRHKpWR|{Gn7]'_ ?X7cyͱXG{SxB׆dE]Xdp*T^ka`E1Tp ˇ ":SyC a+-&՞L*iOfR-<{_\ӵQ@pٕ&RW-g0E#B1Ji&a\>m!RI$ 0Vs/Up9LPUx؇ |/,Fz!RҖg:kAy @E*Ko$A0tۏݳta8V%D8PBKk pZ)CTQg)po<"h%TIK #~ gy=J~c}?3,XBfn/oKQ3m7;ʹMT?˔NGej!K!(OTdV"{Wjͷc-?Ej')!2GRA"t 9=^IB i)HZu6lt@4#g]TN[HIBh 4tzb,^і 0K"r9՞fǵ"!D5Xcy嵷Мb)>1iN4 vA:NNNCC[.JYŜ` j"8}v;qIҞeE\<.9[>_D9$BYȾ{aחs_/-y2 YM7{7[8t; %8rz~_ƈ,nIZZcmK(ֲyjt||Y3pfǘc1h.᫣3V+SAٿm0^s}p9}iXlt2Ia+}o0Kd^D˚a0mR21y{컫Ͷf^;Bu& tҡ DG7ÝAQ=mp҂m''F1@d5@r @mJ x[oWx.h PrHbogYΐE'H׆ .9zʲa˪دIzGSȱCP[JN&\?fT Ld*uP |iQI[OrǸ*M|ϫ4 D]1wWsfzL/j>~jU4.,&o5bO4Q eYH΃*Wgλ S`akuG7qU%IqC!D˥NY dY*qӤDy-kzpE30%Ogʴkp"+$jc:|<P\fBWc]gg0 }WUd1>d3Vy3p}$ZD+r*&ιNWjU# --Ρ "\yN>\\*;QR*fw}pٽ=%J+kFs>`tz s1?]ݳLu-oWysM`vC sb.ܷ {gr{=7D`8x7T)_қOl'e## xeðXt #2lO8y.>f=wћx8K?#G=Q=+nqzqv,?_o=C1LEG?Ԩ7U&YoP7wyO(N{W|7~xJ9}O?y;Οp&rbe$X1 oBr >4m ͆F<`h|f2m󑗌{}Pl_O|OŻWko\Мy;+Ov8]2z8 l~Qi[TT.Uk.b%B 1Y *TB/ =qզWGE&Gr%)Ret^Q)zn5`"sT1O 픤6d^Y}y\UrAx>1P! &lΰȝ6IM:D# S^S&OuQy<ӭv~=8}WuK;_Ҋ\ m<22L|*~0K&1U/=<9Órx&^zfsuQQf QvlFQ&e*YRx(U+>6[̃N]ׁH411x y}(=Jb"mގY޷ގo$ϧ嗙Aۮk rdC#x__}RmE}CzYkqY5w&`\ cxmQ5"gF=F͏+^_zِl>ϓ_=%U.47jѝѡ`LMRKa(-վڤl..Os`{QXSK 4zjTȹ \q'IA\Ey/SRNn-yt}*t:+v{s ݺENΛL9dv x9 @LCU 9f bJ)8@Z`LxgScu3`epd3 ՚1wT)2Fz^~> g1o{YaZSTzEgۍ!RC%k]DqYGWuJ=LB:)^*T{QNIk36f0xS"$ 1 ż#/F!^ VMH F""552:#j2zTPuJF *Z\'c57王Fj1\/ؘUԞƞ/_Ccۀb5 `+=@LWwW9Gھ'dGb\J^X{M b]NgKa( NQ(Yz 28`$_J w^#s7l;M3ZY% 9jdbo^PR!Db:Q"u;˝^iTkuV΅> ĻI$11B]A&*!=*E!xs>ei:1-˧(BH6L"8h݅ШrcUR -ĭE,G#BanQ ^^KtqkgkfVZr|Κ~NďkrRt5rmNU&li/ |TlU*wWK)rr&dpqȧq:mpp/L; ֪dTF'&`b┡鄌1YϢhRNCTTqG.j[2Fzɸ&sm[+㩲, y' ^pe^*c2%~W|jœ_w(]?=%6%Ne6#h)Մ<1Q .WT@+٪'Ӑ?}q&W:($|NK'T ª M5rKl?HVSv`S &T/h"SiRPhwN"@ۺ}J5d DPFYŀ8BNC 5ȀJ5hvek䬗R?6HǾHDIN"ZNiLZ0Aky'|M)Gјڍ2ƀkm+GE`ؼoMgnLjkHΥ߷xttHtdI1Ķ#HV}*L$3.8(Q{4QF3Λ9yaCuv%Eł YQ]@(VTcTQw$zRC5Q^%oO} ;A_"RAT([i25VBor op+4QX&*SJKSqt@*&ǀGޭWngi{&IAqt`O7W9iͮhمo;|NROYZ]eRO SZ U6>]??]M^5׉-& NTg/8{sw6/6i8sjԦmO}ML}y]<'0ڻ2G](ʂRs~+DXrTZi * A8 OEm# HI>;6~h]d@aH"E4eF1.@)E9E4;U]jv' e eiAM)EtiM( M"S8 w`l`RG"a" !qz,*$X1$R*&3rV#r-e/Xglc, wwnn'q&?yˠqUόؔM^9K"QEy1߀ 09~-]Q&)Rt\8 CG p \*/I iU =Mb`"|L+1bwFjvaX0.;MQ j 9D*jT+Z UIQw6صmT;-(R@ kB$^ȴ8"FօG:*uYQ?9u?D? bAemҞnhJTqD$9"2eqyDI{(2 >g~LWF/!.NK6E1.2W]agܱ)Pm;EŕpCNJ.v袜>n"c %RWi"uqf+I SW4\d|,*rǍ=#0.(QB6@ic5[% 7S΄J<[jcTA҇*&h@3CMP ;W 82Q1xk=X9쌜!bu"U=as5ydyX-3pwEOϭT-;TP.ĨȂ^:9G}2h$57(2HRmAvjiFS+k dX|dͳYXͣZ$J~[}7 uD}2O&Aj%^'SJmXG1ٍ+;@ 'LPf}bZ ,qU$O'=q`f' < 愡TRhqC)ZF$lt9 괢V4"znl $ V2& OsR}b)ϔTYmG g됖#g^eI1_?&fMXj{Ԁs>G8=j䷚J <SKyށ6$R|R{pJL`TEFjOP$: I2mMt:c/a% PRj"ȝ "tJT@AIwɹSxr~;=Ǣ{fGoq)nƃP("{7\(bs6Nr}Yږ~6//ÖHL=뽍㞖SwSc66Y5'%b__'?OGyuØf,4az]3m^5vIՓUBFUFh=wQyE g8G*5TjXfZ4zr]'kYCףǽA5g@Wo/@]}F D\pq1y'Ǚ˭LA(;+yz:ܔ}}\ݼ%t5nԾWY"7EnRnvxx/ ly?QkkZP⌎!oc{E»Ow\s#2us#b̓;&6M2Ak]#SѬ1|&'M9*eR_0qZ;ZJNrd xs{u0c׭tXDly{Bz]BjP)gקw_V([ԁAR^ wAP2Xi%Ky.] ~ȁҷ@tm[z.0GSd2F*x0-8MrFI"R"Fk "J8"$qJD`2PFVK˩(MsY/?6ekziArGEo(aR*2Nt8n?)E%%E@*ԭ mT"GoQcD`v D'9: 4DC$`P9P)cq7'b}G'iRr_fF/gM_3Sι2g~oO[*(G|+r Y_.0j^^O BN`i L֒D Y:l$ qrj=#7|y_vx1sQdQ? 1,ЗSwzl $jګ[+L~wx`XH%aa]>qk^Qy\ ԇޝ4O&g#]u|sۛ捷Cb4ьΈdi?+7y]p᯷7uNzk '{ kƨn= lQO8y.>M'z<dpNkXq^`!N#w,}:qR/C)=_ph•zza$$xM5UOz`S!D f<Ñvݠ>J6|}kz}tWxZ=j-߅ 'i)! C.Ec5n:pe#0jJH#鹽 S̫jkWW9ޢLH$H\bhyar9>inVy4IAPu:99xxbxSmhPh ON壘cD5~ޠs#zU_OQ &~igO[(IkK**뒫gU=E4G4^jMQy}""yLDٲ"]<88)UA LЊwyL@'\ 2\VVQE=N#P ZX댜JԼ ہ S\}#o:Z'r|f%Q3KxHgJ{H_!iey0g3FcA4&e`F2/ˮ$Ũ̗/^FtR=۷)3Sq&Y›HuN I֫`,H]ytId,VmC}+cC6"Xaʞ) d|VG;/W<Q[5 8z,a@K+`@IˉDKws': [Q.xpcsO4ʆ5Rrh4^!@n` h}4MTH.J SRT8vlqR ga Q)gpeGE5AQ|BUK8Z#,2y_18b&"lur*pC>6T T3gO QlQ>mOV9G_(]mݬ-G>+[ǎq:h31Y E2"7dTcQP>p~{5yYŏ)SWСї <gG_2 gӏh^i&ɲ-gzdΜ'gKLOS2Fڌt,AZssf2[vݡ9y4=D*RbMɡo[Sor73lg_0{PI[ݧ`<7w?7i[ ziaޗ:?]^HcthEzMDR4DhF>KF M(,5(h='Χǧ%ҳӰTȻR!{9y=ܭKbQ.*\qizǕ<~j׏єp+l 5Iq}vzd7Nܽ4?TZOoa]6.x"POAh^ǻsrRV eH)CxV.Vտeu,k+<:tхŶcZ'bZ`$DHbVPl>߾@ J%jι]cedQ`A]d:*/gj#<*8UPi{~J Y&p+4*9,6&Az,f^xA*8!XQU!BJtˊLƹ \g@E"Ykc!F&s ٮ n{Ƌb cܔEǖ9<Sl 9o0O-(fMiw?Yi3W9gԧs?O(a3w7241k!kIfы2T^kLk̀'oCR;֊<$)1貴Z:}gS\V}nc^'Ǔ'2hsaYS,qs$nNvA-pǘ<dA >H-w6EZc*gM2i2FT̒ېg tiAMLGfUU3g4cC /I~jķd>3Owד65 Z Ll3 "CufZыȸAV7*JhG ޢS[O^m6ļRN_˜3x%h8벪]k|Vԫԫ`UuRxarS-ڡ*"F=#ߙ,LIYred: s TJ G* l!\!8\d3˜; +jiC 6隢\& |[f\heKS[};\p,[ |V6Y{KOW/=kIKv6riݴӚ[_hK~Wd\"&ԺͤSw[.:o߄ͽ^Z6Բ/{c2rx(/_ovv'xϻ VQ9 Os`y'w|hnt:/;+!{xDs|th_-xn]dDRo"\Tje'WfV6B'}5Ԭ^RjL>~ zu3{P1LI ,8y,Y5/tC` 4C"*[I+^JE!м1B*i-pT 0kWe)GmQQb8z#0vbJqT |B|m_W^=X/x*oң9#t4K&2{#]Y‰d0ngm O9Fn-6eAƢ1) dso0i\R9ۑq3K9 V/BY e¥?wJ$j&'wWtΟ4h|?|_vd欭꒎&xBAƀ2E,\)nFg' u6 UVfn(}€ ()ȄdVce1hlَa8H 幠vRԆ;{L<:a`u5Ɛp55 ޣ29|Bg*!r %š,jFMđ02yj4Vjl֩htStsAj\:Dqӈ::NS^Ő "΅M *OJZ*5 4S6뺻{,gL wp("sBdjn$ɓ`$Q"D9QLtNj䥸+pōY.{\͜0q4|UGx0Y5RC4Jq(q4dx̎Nu䝲a2v_!b-;:0aulVIoNXB^Rb)`T̵R s/`T]Zpvp̼p]Yƍ㨁@]W$|ọV6Yha~Ya 0p -#l Oiqɓ>hQd&+ g%Dd66Qă5j[ɉR*XVL\3d `,B0#6L~\5sqOQ;HP6x3yC Jj~NXg[ʲz<yj+u8Cx;龐rh"*5RjCnh-qMP5Y"f>lh} vggeX;^'D[5,Fb7G\4s L\I#]lӳK-i{4=#O{i1ò 5zwԮ,&$l?zOS'G}ukݣ<Ç%Q_2կq˩!Ԙ;:qOVYw,}p]'_3f|e|H/&Ce vb~z'zʁSթ.պetTzMsM糿Bo\~fRh'o2]foz) /4rG9QXeJVyrʽpԒ%/mWuۅ;H?Y*2 zmfnڊǴ:nYO hM/%.:6 ۤCcN!pĜ]T\C-耎1H"tf| Ptw j8'&B٣1Zh}Th FggL1$8E;kkan!>z}LD%:Ŀ xPN[THL8"ދEFkm#G_E?m\fv3`2;0HV"^IN=wba[VKݲdOpwSd5UUN8...Z:% VxHKb.&:F $O;r49}4N9㒐32{HJC('$)yU x'yZ<4(lܱA%ǫ g>:gIpE|R騕F@ZEL 6.IEFŽ[z zZ4 P&q3ڋ^jC5zݘ}b\l̘ptॐHt341 v6㭉C҃2_Cp[]n;gi,}K?@==6ȻEjori?|\_fYbyζ ڪIQ؎yg=t c07[w.G m_>_M1 U>^ܦ|Y[2ȌK0AIc *Q򤃑1gn8W. Nl'`S͖{-P$a(E搓k#dJ5I3I YGTY{%b1Fc]:Iv^Y)Uc\_L!k/J.0?_oi>TFtQ^{zל# 4׳SI; lX)bIW NkRI.Jhy1POo1PDs~W92~QPx>w03ᭆ8't΃.$8*%dʌeƐvAFcd\rϤLULLR;BD 4qJF瓊H!\ A۵cZ#g@:?/|}szQ(駊V(a)CTX;ަK1?4ݯi]t)$,q txmf%)0Y2Ĝg6zܨ FY㫐?NzIӟͩSn4oWVeo;C{3Vi5kbh!eڞ~x/^0I~-)5.Iw &љ5d: 1^xBBݛheDa h61p5; nh鴶N*ᤐOLbvh֖xӻ [ԯY$:99\\߽Tq֗(磴EnXX<uMy)>:Q幄=ϬuoڼTwדمwCbs/<蟝OUyc$ /?]Otlh-&vtHJi<](a|,Xvf>1ɚ[?!FmzVrs(kZ>{WD1??xة3k:5&>?9-?(:(~?>rN?ׇOO4L%a ?_C<~~h!ƛ-bh)|لo3o}>$o٧Vfi $⦅}7T<$ngO9p1I#Uz0 Ml~1qҬ,Se`j f#ؠ>¡&C9>Dځ{],Ҋ E (i_*cpd.%l8$t:UkkIVmtsviq迴i4l7Kvjq<#3NU!X$T{hRtxYMKE“'he!&~a3Kh3!8REuH@PXXڃUЗXgYiK!FQy,@tzDS%0 xˠ5r6zuu9觛ۧ "YF?Hhr *DIVNAj!8p8E1$BHutk,u-z1ȨЊ@˦*IȂ.&rqt ]WD'lr.p6@Y!z݂%|2_u~] /.G7#X%-Ͽ\_yꗜ_~yZ[jG'5eIҋH|VwY[w'Sl30Ftl*5ǚN}wO'y1-GѽVPˆ}K~Ni';U_noZd3 S{B>coz$zvȰz|ԣػ-H]~门0wWO ;SΆ#N3FTRdt5hC`5XeRAVUrN 鞣yB֒mfiXZȻҞBȟOf d6k<>6(vfmv۝4G^ 4z_q؛P濒=IzߦI}wh˯BgE0Ƽ|f[N+(崂 bAݱO_6zO[v4HE|yxцFYY% T>@J6ؐ! lZzPV6ˮ]K+k02uw֞tlwUCBVA >~: H%ɹR)*:H*Z/^ +JPxj)yFe gheW3g1} UzbHR$}@`YIz7YeN%wΥ,b+LˆFfO = #j c\vsj _OUC;Ҳ"͇ࣃ{Zh:c^gy13uƼΘ!] -#W13uƼΘ:c^gy1ALǭm /6M⩸R1*JĤR^Iu`sG`S8<Ĝ);۬+u 3ZFˍ5ZhL),hN`dk\ %ctI tV%w0`Td.A N"w@П!Z׭e6Q ]~771ոw`Lj2o>~v RFp\&Z:%q VxHI b.&:R0N#x.GsDO$ϛ<. (@<#y.$:~ LֱH'Q2wSzHϖ:@pYrzRpsȁyW'噎ZiuX%~BKpQzZ4 P&q3԰#l /! =nT'o\ 6RnFF>Fsu4wb51tF8_rwH$,-vc.Bw|3>W4kS]Kyڧ<b<1 #ffu?wީpmvp5jT0kcҰ_p^af6/E Q؎yg=߿tqEpf9 Xw.G m_>_fngڨ3mĉhmR 4Fp%O:ssjVp5;lɸ@>?O><CNj].33I YGTY{%bFc]: v^Y)Uc\_=S5"k^**F^WuWj:( I0S<7^QG嗞υBm*,VʧXyU.B04s.iQ@H鳼^N"^|4 `f[ O]H$pPe/U9zKȔˌ!<ɸ.Ibv|-$Hei┌' $(;7I+8`[2`|Nv&,HVњhF(߯ÚG4zusj6}"&tA cޙ8nXO,떩d$n~j,>oϽ=7QʒUK^˝%O*7"DX%e ڸ"ꖧs -\X9,NZ-HFp:_hrY$rsF+צ"0WsR:d{!v@@CjLj!VVxne(`B; T($4(w( FYJ䬣qįVK HY0eO05jjDR$AdL&F) -&--b3ߕ R RH*cNSٰI@ EH2+š 1NV" $v*c6wT)ճQWpI/>59^Vaݘ<[7;>ͼw8D~7iR!G(3$KeSOwLVפfa# TPPG胳t> 䂍 B37]"*#+Ig\~!.^sI~8U|V("a/r]t;!iKY}%zFJrq>uXRYgCPӯloԧ%n[}^$ZhSnfLf baYZ\W{Vݗjc&īu!f#˱AUc[2u%nlz%?&|"$ֻlKaSoL2k)v@oiU)HҮ s״>ˬ8ei'/s8߻I8ȱ 3kw\/{?L/~Nݽi/gw~\~ _n]͟'SttWE0ݧQ݆G=kfc QǣOHMx=A{YKwV5Axlxhb*YA Y; {=md;mFCʊ L"2,ld 3 &4ӝr,JRdSF (EA"3 u7PɎ9jgsoh{zPPZI85lbɥ༬ )BcqZ9~A?LHHJE >(TtIΓ<(ڙ&eCk@=f$)K_j\Wn/D(X"Št1ӁR>ZacLQZ%)s* .ʢ )1#t gpXxtXXznB߹;pRF%t .(M74|lRCzɆ(&|uSQ9{T,Jl&- Q&5R%H69d<(bʄ)N$;{)>@BFYt"WNџ1C.b0I:1ԱW3q6Wr b 4z8M-f"<,<O>1gWgs£φ]]|x@Gτ a?~r_iPc@b|>$thj"Հg+OAA4 H<ԒMz+!,y2~zȷhn},*{(\t}m|JDє]Ұwx$=~;s^e~n98] V ՎyU_A ߘ|Tl *t|™P\WKݚFSR=UOYaf]Hga{N~L9Q&  ]j4bCXL=E#}D;[+NQ(n;g\A%&ɂN䒕dͅ6ژՂJ*FeUJSg4ZgPL*ZF4([ VEP&Q,|Eq28ܜG]!8Wivk{26tZfcD4J#RE"b҄+_(J2JU]v%z ~g}z<.ʲŽ?~}š YX/pcfLҰ373Sɱqcl2H2jx Oخg@eR"@!BžSH; {D!a/(d}Bƛ;&IS#%RV% I0l,) % 8e # 8n &jLb F BK3q6}]ۘ;k2ojt\U>.`,wt69g«68e_ v|L-ZDh R6H`W S=L&D`+(} Ug"$"DrT)LuDȚ@ fS *1"uAKA`uIَajgl1U9rȈYqjoRۍj׫1ffӬ[[# *\M_O.謩®?Yl]Sl޼E['.XȕM*?hw5I?߹T<м&ʹ;5.ʬIO{KDln^+AͷJuby}󻫹3o2Gò~̛]S~[==9yM.#|.I'@w4-vͽJ=178qNS'ͯ~|i,䚌$o.BR(W=~/(;i$mUZ+@b)gm\ Q@Kc>P)OL'{E>: m7_k eh$.BA}V?3S0 ATMZ'-k@ Ll@/)m m2>w+q+*};i9t6N.*>ѝ sqOP3>:b)XmT "ԊYFNdE թe1!*) A< x,Z*6=5{7KH #/#(XHum;g;*Fcm-|R[xg5"E=;Zj}Ԏ-) mf)HYr$9ZcX&4MHel/5ZluJX$U5ںlEv‡ [r=-vg⬷i4[s_vgVV`wiAEJ ld,J35d"] (HƕsH5/ƺ7!sAeMm V2pa]ĄdfRM.v3qÖ#:El|EEXiɆfGp?ӰE5 |o]Y]C!@j#zys0l# !\ӛȁ٢DygK]4i- s]dF1U^;(iy~#6nIu0pиМvjЙX'Fy"5)&Ո?"A8mX Y 8'URjd%[vDJR!7nX`1`v6 fN~I6KIMUNK]RRmKyH{xI*cB:b=JPV28M(tưX& rV?"D4;0 6 [YOs̋- SV>.ySF<~>ClI?n4U;=I);)<=yWf74E.GsrtF`_.0c~0@(hg'RE&2( $68`ϊqy<"91ɓiBP`]poNތV-\mdO|q;^lYB<Px&e}R'^4/6^Q6QDt?M^w!O.BDÉ9V"DOYrOb*T'?M׉>M^wZO=~yTvke4٩ne3{.I/|Za- yYm==ge|]OWI`xbJ̩[_sPOZQt^Cq0p\n\b6R-Ѳ"U6jx|&L~X&cu Tm(eTnSR!M3)/8}ip=\qEլYҙ=.f/W]xP(y.(t^]P$?nB^m#U}b!.@ϒecW5u]uY} vA꘍6Yd)R[T Q/zEo1oSHa?@1bŀ[lE]nA݂3ʎE}u[t[Zߘy6@r{jku{r5G ٖ=OmeKICsh܋(YGW4#\"c9;"J}S+zNwqX?ӝ +j7FAU}#EtC8X]\bpGX5"J:ztE8"`[CWmV˦THW3zUJWUDKE*dGHW 4wDWW#hI*]t(JUҤ x sBhr (ٻޜk*qhGVMGT#ZΛNHB`$*#\ޚ%V4~!TGHW v$$E,FYh(>BTBHѦ%XÎ*e-tъƫӈRvtJ =t(pek*դt%B ;C oI*؃k`mL _]EWT +]Ꭾz,!EtKpuk R,NW%e]=B"Rq&"qKj ]EL"ZANWeӖ*; Vf-UDP;+vCPGWX ,o@ }47BIFSM|G6볻}=?NG{۵ۓ%<>fb"ڒ 2;hBdcYP*QmWтQI~= ape_Yc1:J9UOޝ;㉁MW\#CBx6) ʥa  F??˜9^/l1|h*HǨyG 7Pݗ}24JRd梄s9 őu| <`dv :/i 햍?ގ/W>-v2cZyMW[9q?ΨV 0 o7edxs Y6AFssy_.uZ.ԇH)J?EO`B<# )[X1c yei,pqL 'E엲}(ph*Q %7ϟϺ7^Kfc˛iEţ! Ӈ2Yc%<ˮZyy>gQy1?4k/ε$O|Rܣ|ksy |[ M<_7e=zt1)V[٤{[\ -aѥ )Bٓ~V).g[}ĐBR29LiVgU蠾]ߚE2Zv]yxsSEΕa<ovwu3?6zpi2aZj$ppG%y?7vp#\븾4g՟%]B-Ҥ7Ӭ@yp0n8+M.fʊ\SHˏ߆4iy)qAF:/<3tL(@Xv-aw2_~wGA'ͼPn]r8n4r*9hqd L`?FeB={r}:/0< 1Y5?di2!xD"?iqjqnt9yQ3 jnWaϮ롽GW@wkl}&(aQ$)&M1gLS~sX­<@+ʫ͍/%D9C_@$>>ySJ7PpNIKv8ɮ$U#${>Pn aouriM0Q) p:JCJqd[&)y?ve]{b` baF!/qAB 75<(tsk! d) 8$PNbXXH685|1a|{,V9(sݟ+gkɡb5y-ِL7mO60&˲. 88 4C  'i4آH(AY{ B)QLzSNq3m7,cF?o0d@Ru3cmp63c}Jm\Xe˅f. oL.cd|Rys :wA F_|36F[ " (HBsFo]e@cI VƦf !=}K(It/@!3t`Zd.dIU3cg3cW$fSX6ؗY]{|>X3̍Fb9` Ԃ5&pY!ԭ3.qVq@!C4 k|@N@8Gc} ҁҘ680}F>ˆcĎYN:2YϜd82d$/(Bl+ZQ+ qLHL,)@4(@I&)(s53bmp63O/泝KMJEQ3/;^\g@Jr" pV"E I>d a*鈩V/‡YǾ|(kCj݊ejMp9am -KK[Έ7;eΗ>p/KPum^:-^bkس2BB,ZJL*0"~ wY/۳6_u׬\[YT !0=FQǝ@:PL wA &!XCZ.,ydȐ>~)RBqJ7dgzX_[6 ԗꛁ<8 OaZ"l߷z8(CQPtƀlTUU]LQLQtv65]J܏SdW-ojqe"R8߉m޺5ĩC5i2\*pF)R*"9u0(hs :bg4뙷mͷQ-^YMAGV;Z[e6sJ13>!Mnrq<<V~D&R ke* *# z9Z V/i5<^ %OZF&+ g%Dd66Qă5|%T&T>J16\3d `1  k`tFw!!'F.i«)Q]6$$~#A? H&yJgm h9h=e}tܒ@epv:ׇTCJX۝1].fvV6hK< u]W?7O-~FeRhxu흻=q*8l[zh k#V]ލio󿏊լeyuy7NNK7%I8-I\l۪Ykz{z>B(zۿVs?g$L4Xr,Z CVr'oyhF.u%pjnNrhWBv3I6KVYI d'!:2m$$Bd\&1tjd:DBQ;K m`]୷AJJBBdɱuAg2EܒbO]q+3ƊVy͖e~^?Ć,v^RkDsIm(τcYK1&';WRd'RY29bZ'kx+NTqt9-;ua\dSF0FIB є ![*Zʹ!@OZ÷1cMPDuigu|!1+Y!!Mr$e]0 ]%t(2HVrp'1M/u4mH!FVDx(ǔslҘ$:H#)'=#uHO=!FlXxOQ MJi#p`r Nq]?ˍ*X6 N}߿g,mN<*4Qj33ì'b؄I-h.S|2}_lfpaq/Nͣ.6jZ鈓7N}. |<)&yPD1? j;qx~J;E_>~Oߟ_pa|/~zO'hfMX{~y<7_>}jho1Rtf÷uqi$/ۚ<4WOp91I#U8 +4mbytp:M|wVZR-B\.bK9Ҏ L <ل?Tզ7Gnd %)qmMTAs9WC"E‡~s-ÈW^ڰyU>?ѾvD=i¥̒A (*Wn[7{Jtd_tr3&>EG=ϞQnul״WK_JU/Y;hV,tr̦ ʇ*d&'&8p&qpUVUV|s<^,bl*UJ4im A(`YbR.Y#;}eBNyN>0{Et0#Kf"(Ll$CSSq&Y›HuN JI"E}Y(3jk]SULGd^"8:+d߱>b–4ىRhH%n|p;K|&\`wR{p HwI.p~{voƿ5hki\o ́{2Z#4}ҮTʆJY(W+筭J*h T*g7+0uXkς먔D3()5\uqQEMzPT"#<2:qϳ H@#l)BV'7`CVΦ0Uw.ӆt[԰>_8֪sّ|iT~d^Kأ=#JJï0Cʾ 34]Tq1^af-++̸:ug\1tB/Src}QJ|ԅNQB $K3 h m FF'y;Se|VDAJE\c(5h  Ϊoɉ.eP:1[W[lMdpyq6W*lr~><?yŦ૭4J6drN/Q݈;N'<>zr/ұ/#̳Ӈ2E鹔-E`V6#7eqo<`ml/hW|];Y5[:R̮{Cσ_ʾcZj$󒙥3 &Doh hүQ <)s'ug󭈻tCm!m:wnnC ~,5RӦdN~Ne6fJXtShUr0Fng;5OvݭtAëȭW:;R0/RHlPa^z3jcަZ>]roh6]x;L4c J $LڃZp$p %kr`}JPIL3fs߀9,( )lfJqY?r Bn zB1TU|3xR?/><ƒU$@;R>ڇ )\X pݧS>$Xܔȹyx6W?xbòH X X #]L>>7@9xjAd 1"OY a1Gs%ΦhbI"BP9k}.H٠ݶX>|.Ϣ]'_N m~c鏗3A2qA'Ye!1FÌtCufZыȸ8-:r;s%xNEoˁ4%R1B]Ks9+>nY29va#66&gN}#!kLjR}ERd!H%@T*K ODh<{?ɳ^<<_p5x^ۜ^ƾ/o@c[XNc+3Ֆ|UW;!P@Y]up%Dup'Q[yҺW,)0UW=&_LMף?]WrF;|2{>Xp)焳U`}>8[5\pJqJ퀳!*6gW,WU\ WUZO( tlŅbYZ-O]\}pR!X`wl{!\޺s+Fx8uۘ\}p{Z;XwlRhnr觵3}skj_1_X|93.1DaV5NvAچᎼG\N}"e}V/2*뛾e>'tgGBnmxJ]$;FMbW*e{UFں[nlUGLT;vmU[[r>@=tJ`z.;2>Iמ0ܡ_5v &Dk/H5m[淨淨PY`'lVqTiO|zPR{+1rqg3~sc0:%g];$:`1:v<3v 0a$:c3%1e.@ eĀIԸLøOijMM|>R WBid]61ŚGz%ayxHR?e *DyXKulx6gD]7䧐{Zv^rG=2R L7A\QE$&+5$ Y "q `; HCrJ`)i H1L؎KJ3Bh0`K7|xC^=_iL_4x<_|΋Fl)&|6MLAʐ%A֢ȉjva6b] :Ma}ƀ@UcA![E'$*"YtLSAfq(j}ID}b/VK7~tD, .sـ BXC(:X:^K|u]Q* mGg8*-sDSF۫O~ZÏ٨Xn>\uLqR6@H^ *ȠQr'L$>c&ӥHC_VerKH6M%ا<~O)sFi]=MF7ܮ.&dߍF?̓cqWx7|Lo'TM5wg:9X?U˃Waxgn}ﻺkAz{p_Jy,Te[+@^y0Gz*\Nx G1 \M{*5wPC|ĎKWBI!y{Hw:{΁葌!sR&1;/< dH/a*dT.D ۠vo&Kdzc]k ůہi28>[d0/t̃M>__4TR1;oJ1 sDg5lII@3C->c"5Y-$ˑQK9I(MEJ eph{oMAew5AKbޚ/ /j`_bTB[sFjbiϦ~BOҢ= }bX? E<8M~Nc~/ Ex <rhQkZ2VRqMPYt1S(TH (8 0I!4g2R _E l*nZ8Wmpuij46;_ޟlz.s;~T$IJ*9F TOZK:>xOF/LJ$1l(ظp̀+DT`x`pQ)ٌIB dQƘgZ5\9E<*%&T}si\l\_93~7ۊbwL#N9 IN NJ*c@ey%({̔ Lg7=g@7<.)(kPR9$sB`SHAsKl(y )r@&SzH^ 8.^9~PR,HJ" QLIDLDBHUH4v~c}&Cja?ZJv ,Rڲu!d#^6 ͧݼqW2& !Xv -)0/Q;DD9x3tFС#o!CNDFhFeLf~^_WK,uG?lq4Xw9#%|>N߈wg~ͯ}~5D# >Lx$`1hһ=Qmv{w}~{=*}.M?#w pyf";\w}a{hyg^{C|C}hȷ򆎴pO!%ܯq mWy{Wf$e~7RZojqC^VkM뽔{mYN)0]znIRR,jmWriru3"ѯoiZݽs|GIW p=$rOˆGiYMmUg-[V:6yɻ:\j}9_he.owliװfV!ZšV#e{}Fl1J dkt0KBΔIa6fsitv; _Mn~:0#ۘKIO$})E,ڣ-"zm@j`*ݜn>]J1fqh1Sո:֮>;-7gX{9#󜹵! iaɗ-3SalͽUd!!KVzbS.LL3_+Ȁ!:0Ih#eb^E8{FOAvJr:^ZTPf6>.v߷X<}W6t_/z<߻. ]HRg*=h[DM.r6--O( D;S/44uJ=[9)kHY$lNb+#tD>euCѼWMk,`R56Sh;܃ھX+eq8FGW0I/S<ӻb6wͅ瓓oMa 1΅9u_{ո.p2|`0x%Yד\ߓ{:] [Mʼn6`'6Y9UdY!YqW#w,qc8 1TTeq@i5l%fFmhTgXo^B_}㻷^xD9z7G߽/8:߭D-Oo~{׌w57q9~~mfJ}gm@~=zCΐ6;?yjDW6׃XW6퍫0mץR\pbا3ۑؠـJc~G9׽2߹&E;I*O M#- 3ȵo7U`цͫsk;u\Ѐ3ޡlL$j >1t @)3,rsRF`cdSg:ʶZި>ѿmvv<i6C/=OR'q,5wd g-1҈kHi2Ďg4^yCTGL')!{6 QHrDυG BD(ER!J杂@NPyi˵ˊ]/p8=1"2Adcי8{Ւ)hK9ŷs!EBQE&"ƀko͛oN-IsZnB))P‰g逮V2hI Yy%Ikj+PEj8Ԥ耾.|$x0s2.HVvG{J)W%g7u (E $yE WNXeZ0TdGr,FE퐎+Ϡk/ +< :TT Á23?6 ǓA4k'AT[gLIk^ ХbxCW>(pKWEݶ~k`2  9Ĩ Rr4E2aldA2Ө0@ :-?b"AdTS>wL;ԗfvu6гe|נ[L]-ݬ@.OMA13 ƴ˸űgBG&GCG883R*AԓY dhE}Zpk1 eI@pQ7AD!,Aó32v,rW_ͷO'Mj~U-sQu}7| /kr2JwOp*\0o%uEcy0k>BW틺K-,KpjRZ 3A*˅?ԅT)QUpP)MZka-վh! z ƈ,n#| ;Cv qZBdJpIHQ\ry8䓤Ġ2{r`P B&S#ģFT*g8P 88י8wywÕ{saovzFh9|b''=w"#ƐN|:{\hGJ,48DAIp) \D00cP ۄTKeBGY6XGp Xq6yt $pTkdPܡR@\ ]sY߁IZu0 NeX8ƿ\S4>xYjрuY6nIFͻ #T.c!B@U KzI;(.#b]Hs.!R (ca@ht4)}P <"bBu B䘍Ԙ`$8.ҜFBsag}DVF:NYDS3q]̂Xxkg.ovc.BnTaIr Ɗ0v;$ g«fxuZ (QeЪ@pԔo+ O\j=Voy+Gs "Wh}v5U߆ݟ#N\Mp|vҶOYRvRZ~cL5=.LKYϚUxI9poS4A^@1xmNo]m7<rU{mZ:~k?jWϏ{Y5Z5;`CzUZ8oૄf{`=دp6V\ԳK/SN_PR] k\if\ċ{!>yuŇtutՑreZj==7 aHD݋dg>v<V~]'Ae8LOFp.Vs_k+?+Ɗ`M(yHEceQѐ<ڼ㣦LFDz跚/,!- Yvث߯0O3M#)0\q.ta) 9!0S8 CNa)Rr {Ga) 9!0Sr^6LSSr CNa) 9MёS@Ra) 9!0S *ҬR^ä-"{%MZ8jb֥GHo}*([QiW@51е\HJ}*-MDtzǃ/gǓ exU)"8[H0Q6^(Y-d;ODQD%h5OAE<mEΝJIH!: tkKInD$IPDRBH˜V:L۷6BKz6H{5u>0=<aN+zeQ 3AL4CXo!ŷB]'$YJk $y0I}]BP|v޻7Nw' jt pD^S]IF I2^ye Ny)(Nc2Px͢fn1dz>,yyo>M6u<qt  BS@lB>$uւb !JὌ*$\(Q,Bi\ C1 `Jэ "@kŔR.d^&dd1xls;.rBH\D"9KD%6j$*'g FT{*YrAgDXs|*/cDvUDہwln&kzj=`C y:S z߹Ag0Cyh*f  "@cgv+"D* GXLZS,wgyL`ÀZ,.WU nng6yi#Iz2j:Ҝ gJT#H}a})T} r^R__aJ?Ti4<>FWB?là G0$sۣ*F,e}.lJOPg.@H炜Ay8MrmJ6I9#8~q}CeޡA0#N Qۯmdq{sWxp-xXA{h9}a[>BxL{dsh^ =<--D HǩC=vTWLduP |m) NK3;^WY>z}v=%TEhq d6yRhȢ'Gʵpͭx5q}Ғּ9.S- TFH !Z.u"@n0!YwqIev.*ʲe-#yD=lvMQbFVU<Hv%Ty':Qqrz4U<ۡV-2V<,*6‰dt ١73@ "p:#8Ծu8OM?:ZH`'\0ǷM2Y%HTw%S2wRv-2HAi✫n?"ZqP#cD}CKKsReMT9s9l6$ HDjw"=dTs5F6*"/H zXK:GrQ4Fr0(<3o2)7%Yj gygc0SQ#Lf{N9B?2)!x6 aƯn߳2-({9*~ce yEuPC%cĉT"EcVCj )(/VG<}p`tℍ0V-X[Y8 =;MVTcduJ.+uY]qR#,q0a*2dnFTά7_wqoE~]?w)3oopBLjy+~FE)Ƿl^4mMFlP4gmֺiMʵmVyIw|Tdfm58Tr ~l!q== N~|DF WA,6EhT9XAaAx#u*[26ͦWmX>9/$E+ <%zi#[ 3ȵo'v$wWV.UrAxl٘H@|bH @)3,rsRF`cwʡNY[6&dmu~{kmsKK*ѫ:_ٮj~܅SĄluytp>_`&MRKa(-վڤl..~ @w9~³d;[SK 4zTT8sQWN9'|Ԛtȁ2%D:d=K70wνW^]cͶkXi}KǐN>݅C\hGJC@LCQ bJ)8rv|Ke;Cj~q Xr! $pTkdPXLgpk⬟Yy<'i&ί.4 STr31Ke3 ht]#T 1PBp/4zI;( O/;^@ Y7E$i>(XQbBu BlrFjL08]2@QC泗Jg}Dv+G$)+s S hkl]R9h>C P[g. Wǘ}⺐W9gzj54} ۆ+V7yϤtq/UWb=~ =.4}mZ^ )F%tHHO B ςzwN{5;^G)#W(uށa;9MXLimh9|pJ&fZH;-(K$F)RbYJ{Z^[gKyQJJٟGJ6n$*G3|D%'rdR`hR43Dq Ɨ唲FAeU8 AIEp,HH1*OkbB5qO#xYeϸn.wqHv*K%&ֵc[+IܸC7˵*_h^} **or֥gr& Xo$uBA{a]dQOVQ(yO"`b!dF꘬gA4) !*T FY*^lkd< yX;,|3oˬ/qXY_Ccs Jߌ}bĦIFb` 4d?pd(+**ȱ@K9v#dsÓfd>bgeJ%P܎ J ڴحp(殠vk㡨-:4اCMGt*:M] P*j]<E}J2HjPF@bLP8BvKA d@R ڵmؚ8"Sw[CW4W";8|A BaiTJ |$ծQ(-&)(R8%E52i&49*N#.viQP\T-pŕƆ9E)'B:&;iM$2  ki:RnMn lMx:dgFnv{NeFQYoY?>5=GjtՂc"J(J,!Qt_%]K:),H'>HP:グP@OGbhH vfm9=|;Y~Zյ32&fD1'-BcOcezu؜LgiN_ٚUh e[͟[b!s{;սOϣ6b& gaU߯A|ލ?^&l>·6>8~].G fm߾vGnIPl'Z0 ΋J2E8JK)<DjU qfZ:Q =UZ$w-O[Cԍ-(1r:.4Viy=m9:PBKk pZ)CTQg)pos:%*Er]9.pˮܸC^Hv~ |yAЏբSL+Օâ56r}m3e]_jP8KGܷ _r8jӔ9ݼZ7y<˯}Ղ%b\@+͆uR=TЦw7X;f&3zpzvfmĵy7IkyfmRr[̔33}hSΥ{W("zo*K;U],\@b\1X \eiUR^ \qn^ l$꽁,UW(WB0UXv5{wUvdE•%Tr~bVClp(U1Rs'x ]p N`R~*cijWfR %.iz'l^ww*xP\f>5|W!c#.. tb4GX4-'W}s H9pVTky.[eH3d21`%H,M2ê*ɭ:YJuri/obF#EK.էF-k AeyP,{ӱ\G4'? 'u҈A|#KJKj7Ty(gD _?R(19k7L.k09V0L.K)h^ S2&Z \eq쓣],%_"\!/1jl<ҽ+W'GiQuB)괫 WR/lu'Yu㰹vS\8K|Y |onOx`CRug1+圗RxOJύ(6gaWf+AľTlYYEo^4x/5oUm}MjpP,Iy}`$T+덊iY壦LFMd,fUt&0ZQ>^Ŭj]vٻ6d+=jE`y<:3VX/c顖,Cп߬I, $H#dAݍS',z__fy{0Wܿ~7qx3!gErNEޯ0 d:PE@ 1p=KI+OCچ^R?xw9kŸ9_'K}|;$Xw 5xNqƧy=uה?V:;|y|^>w6vYsVKrw3J4|mǧSv|j;>ڎOmǧSv|j;>W,V|P)߳m wb](l%,HG\+ %< 5d7Lvs&;\:t4:CR{01*"TWZ\hM& >$Kg{e}ww\$*,DMTBz-Oԃq@J1Dq ƓlY-#V< AIO 4H4#2z j1q6/T8seY̻>,U>%w=JnU<MW"]7 ˹|V/^RJ+]9F.pq D\Ns$+ĉ@E+h4$ )cb!@0H,:&%4DEwJTid,&fd,Uaa1 ya,- ~͙\dfqƫ'zAzU;pĦNQ41 0h- 1@DC*Jͩx8@%m(JmRfx8 õg6A !jU'툱 ª M&eٌ~Џ PPvڢE>& &T/h"ҩ4u)(jFK DDBuRH;;]{m$vIZxC*A& 3A!qDDŽd@s86筙30 "ӏCAD"b4ީ`:8|A  )XD3ˈ<)*.Rk>Fc*PBhjҔǔqG Jj&ר,HaD,&fD:|qq<\:$_g1-Ua\-.Ns-14RNt4F')5{M wҚId"$A(ҠH5-.iǮx aû/*q]\Dя0`tNvp^`T^_}Z:1;*^ }|HKaG{𓕏A5Ө $O5\ '& GꍦP))\H6 HJ Sk !i?L+Mxbj)%fx)oGgGZ5sE.њ 1鳳G䋑x`̓[DGfWgfCTFnCuݩͮ :1aכLt~_X6kT5&ii׳+A(5 I!*(QD5"hr%FWRUh?kՄ&]ΦEep3=tz );.oW'DPGqktwzwિ±ev#\qwPk-nȷ&^dzʝ<6|7)Z'-}g}r 1j|oP醗l*xljZRzT|?Y\ljgi5;YJ +5ZP?r|rA I WHkM%s*E l > egg林 'EV8E#NPuF)g2VB㩨{j4TQhH$Ҙ; ,Qk3%sx=Zg u{8󈉣{Hnyk,*Ojܼey8ߨuvٟI%C@KIE^5pa= r0)#ohDE[dΌ͗v͞=l^тc޶bޙmy"w??J/B-'T!sB =*F攵Nk<48b,$n ePGYʄ YA2ؠ ޗ XPXbPGO[Dɨh%yK-*%ft+*۝,ml[ >rЬs0Q7YiG FW鉂6n-;zvd9FŮ;/.FUYQMF^ ƦDUb*IBCU&-IzaIS]~3!7<6izJ{UZ(bgS l"- %c6MJ8*b.hxeEZ||pjײ,1덞Kln-5|:j؈NN/Q_/AG'9QaG>ƻٝq7'ȪVdЍ/ 5u^1W|COC\VˬAsI1 Ӭ{|s%QF|OYdzMP_/&huޟuÇ%>Wb̪Ow}Y巋!M>ׅKbHJGHםw7o1Ǿ}|^ĩ*)(1}*jj:Ũy7Z>?B/m}}V?ǎVtl0F4/NqUxte~`vY9w}~3ʣm?G Vb/2zH.?L)zuA:ivMu+rgAk$傹G c._'+pԽ ,Rw+W;Z3A6"|`xMnF0;Cdj}Ysvvz{NYwzá(n=z蛽@Eo<}B0*h ϖgJ1"-NtH05!"?,2TBBlc4p}L&X(g ^Ԧ//Q6Kd ze@/k`H'_1m1Xrݒ]ͤ[yúZhuR<,ww16Lܒ@eR'Fe1ĉNTh\&MPذ搒Y 62E40%OtݿPD2VH4W8\+@ D>kB=>LK ?{׶FdSR/7hw<B^"-)R&%b}#HX$LI]DUkDsg.PXs 6ӟq-$. &$]TԀC˩ RP8窛2w-MO4m#H>FƈxCKKsReMT9s.] %&D CĈ  )0%FJM"hU&܎3 ʤ J4qSy>d`K_aV&]S%9yI[ec·rԅt9:G&>D+8~l˳u4&Uf} C=t[{N>]]̢a{1:qaT/B{&Odz[~ko>\x5V2 /ޟ7}9z뚗NYaBVl/I%]ڋiN,f~:i8rq8M;z|_Y8F֗:md[mm%ߗ:0\Qȯѵ*ްٗny9GAs.q: U^!}ysgW훗 {BfjBsf*O{TA}E3^h,oZ47b9+IoS-r{ً0[nk]/8^h7WL/2z{qI{Cs1+9kW`2*İwy#f<#ؠހJcB1ZheE;7"UFziO^ .&r@0kMHzjoTU|9DځG]%4wh3O mP& i㜔8ؤC4m9hbsbhōvsKF_Z$/~ 5tg&/Prں*aF'e]j,G{Z,W]aQG )8+H"6\9aNhSY˱v2v@26f}YIt@[3/jgP|&w Im\x9C-3 zt=:/i0\ F@X&*K6)P;LOVṟwSK 4z ^ N5 )0+PN9'|h&Ơ2{"؎1o)/2 />\C r7mrOC:tp)qND#$qB>K1XJ%€ ̴R*̊u΀g $pTkdPܡP@\R,+q7 ǣ:{_|}ǖ\iebSԖ,ɲLq˲VU>sWUz$*`B:)^)T{Q΄|$R )΄dB0!el "hPF}Y!ż#/F!^ &l#QDw`Eȇƚj֭M54b2S+ bنˇ9wܔK 0מ ǘq]ț r?5=OnCcKX..>^U5Gtk:!&9˝y֬gj&a8iIFzGMs~Qf{oovEfm^\?zt53t݌`J돯~0_-י>K8֬ff,P(KYUx:3yu!y&8>bWL/(18-(K$F)RYLv;Ζ n>$Ӯ*^ZRr@W_mlX@QktZ 7P6t5ٳ5Yf7z ou .HTɵUgUׯ-fk\}4}mJ$1 QJX P2 %< iAG)#W(;p1,y'"TOZ\|ڥ}-*\fe+>WѸO&fbA))K 9eRTBz-Oԃq@Jg73e7e*ddP$I_ 4($qɍTyĄ‚Z: j4d6f!;sySvETv=NW+=RJ pA+2;J".P9b @E+h% A8ez2:&YtMJi*(%TR5clV) ͌]u!/ y U.;\dd|J-whw0I9*oO256%Ne6&&@Cr=zxM!PA T&a\ɢvC6PQxYV\V!f ж#&&,R))UgA31Ekjmi>>L^DpKԥ-5 I!PH<EҶ}J-d, DPF]ŀ8BL%DŽd@+PXu6ڨngx(8(:iĵUhd >c!' {fqRT\*21I`(!J qIi>(9ВfBsQiUgF=jzq< =$_gY^TbfI)bH9851Ik&1Ar- [Gы͎].nTXO_67u`]\w<(mR KդUkIœk;X{xr-R|x47^Z}} lCBt W(S pJQ4VvyHVlSݲz"A}X+S#NPuFkSd xVuOӒj 1 D$pRµ)KSg +lfKu(oR1Kέl٠r^:JzS[c|B7:)d(h)țR+`.!Y&cYEE:-"c[]}[ِ8SE~(Ʌ܎o;w3c ڶ9u=_`T@j./ 3W_yYE~zjjSY5⩭dV B'iFWFʄ`:I2k$NLJ#FS(q.]A$IJm$J")!d`$̰aLH{?L+Mx* Ug30ZBEk6̼"ٚ]cn~4DOV/yÈZ?`<ԩ;95/M6"+#)iz_ "TZb+/9ԁm:4jO\&$aPw3t8ubkƘr7jInxs@2D'RȣRFe:%*EspRlbm-h!r*gdy\?!.Xky00]sy5!Bt-~;}蓶ovgU\fm3n4z ]h5ޘ>S΍7huK ;>7kR|Z_&i4sAKTـ&6b3ALY~? I-a;|z]Q*elex D ؞@:qt!2XQh+mXe`,!X8$ED ۊ3S$E#ǀmiꯪa Y 5HYC [ 4Z~% 5Tp[8ЮY!B݁eG1 bRIm17&./Fl # XS3kQnbC1M!l~|Mq%w7 ׳x_ K #ҜZS0#b"0ݢl*ڧ(6R1 B #z!Uxd!Rl)#"b1hGum옼xhbQ T>=C>9jlj\>ld8X  TLku.|P8PX˽(ә7a/Wl(F1j$3:#|Wd\+X="(g.Lw}`P;v~v W2$]u0gĶ\r.할i.n&0(zFn-Uzdbh&M-^PxltuR'ٻ4o -)Qdmθַe)a$F(@ JEokkgso yB1o{ZȽrZ _*]-Q`^1\L/?,d6C@.0h{A?J,6*l9j!]c՛^&U=W)38#%>WOtf/n ,P.}^?0':5Y-tzi?)as:fx |`F{^ &` b`4EQk?r8X4%u'#xBHKhyqlM*/0' `Le_/D|w.g|f]줗l";I Y`፧)$h>+x?{U)zLoRQ3 ,-?'eO~qEH&Oyy2 Yg\ҪMЧO W2io7;RȹW_Ujm6!zlCj)j}- cU-vVeQ _' y*"VBQr\x6/2`P~{&*c5anfJgn[dUMK.Ԯe[uX4qkީםMDn\&l0 w }| k:qG-1Q'TOgi~-\u;'‹E`v3b.ͧN/v;gR='Q~y/f g M==1Mݐݔ;>kYd ~#{ݫ@}v.;-#h{ӏo%{x01?tt2ڸ~G_UbKxNN_~SL^z+?`.Xk Hx=/~ٽkBvMۡkJjwW&>S7[n $eyw5 +BR$|WR=+RNa@jaԂ*A\K_ENFhG6c"2)E5[GHbSoHX(8As-ׂ6 4c (#顽 S!<?'pkhH r\E<5Rù4(WLQlԲsPaĝ7ÿol(o l_ؾW59&Jg\<M}N?sf^5pB(\#sˍ)x"T^T 3<%W !a2+87KPs핤ZYE !I%iㅑJmc}J! E  ʪ?Lv9z>Bb"6T+VZ-NF ɬ8bQRx$ 3AI9Ia#,!F:Қ఍p+U@XyŞ" ֠C U߰H :b,L-TKQ25 D4VI ;L3J`e<F3 ,1RTIFh72v@29M&$dto5sw5GKD4ȧi0OKC JN'@ 2,e*p`“gOJ^zٓTO" EK"v3x*La?r){}e~NݏʅdH0N%}D2LBXșy~}.eOJOⓧc .dBZ_Ћ6B7½uZ=l ٰ<ýKU 0M{?Rb<+?N:iӉmxg<F=D rS'2xvun2 VyaC'2W῟}WLא)k^ȹEv9gXbRy*M۠LnI1ټy_/!n{nƭvouE~{fg/N,whl݈'2tHo 9 GQ&L1N;v).xkEǸT)b+t@Hm~rV1[,} *[6rn6W!y&uX{[4{.`tzꦱ`utԳЭal^]zedߴgQ). b4F.%rɥlU:Z|`r]Y= lCĝZ:RGKum3LK6rE3O{-׿T'_%ןٛ/fPwЇ.\a-o_LjfV)L׵Vŧ2cF)nY2ʹzI(훚ש5w]cژtjRhK ) C17 v*#x' ryާ_ q?nBTNhFW| ei-"VPXoyp7%-)7L&)p T "L^ιDB~& I\Bң$$= Cs!e"&AP[ylr.djV/RP)vY hĪC(4]cXpM1W[R "6Fl-Vήq1Q;#/w/m~>}qc]W. uz헦/_9h,]Oc?$0im#|*PWv"Yϵ*@Lv;z4rm =ivg寗2 UOH ^=`Bj!xD,!8W Esg ,>fT^ly~C^ˣLfqu>WO 98]Zњm^hܺ_~>p 70=ٸrDuazDi^Uh%7phv}p(z piX:c~edxf_^w $BqPblrr[>FNWcO^=A7W ͝׋w66oVv!E?Vϔt[\yhAS_SX>ҷ[\0עM֮]n;EOZrUK&b\Ha>whq8pz*ډ?4_g3gYʆms}E- =dЛvĺ]Ntq8d)V;m&~k~ُkdɾX;i/Ә=\qEqm^KətQ*jz15(:)9Er VwfMNthV՘FTU% )jJf,jUSTz;[Ijwc?ܚR Z@nh}k5d *ckds-茨6}׶WzNѦldh`c{,߼y{ J5K3uZysCȚm)rӔR=FIEnK7Pc01N#1;66{v<|BE{))woᆌHfd~E:q˺z$(5CɻX 0Wd2; 0os7]4t4FT{z(tQ!(}tBG7pTov~:-C*^똑(sbrq O B>5qs2=FU^əJj3kj(%%VƠsuNF<#ZcaNq`1vֺXl)6>]ІdTW)鞺50rS@w"R VfmYt7(!Q!SӐ׆RhH=eƫ`4ZE;l`rsK>(<ݫ N7p Nkap:mӌ#5 (%ʭ2Xr]vu4%.2&VN 5!1ѕ<,⊲Ίqi 1YC_WOWgjm Cxbr`^GQ7ga=gj# m 2 ׯջV(VqW6!J=yW^b ~`; X*ې kRTlH`W iu x'8gs3Xmd2zb튖58PAޠB+~01d az`-d!<εnJ75gxbGܬ ̈́30LF8 )Pu>A% `_ `Ʈ pqC BР̬%JE5n2XY[xGd)$h~GPSX^+QWaZd5{VRR}Jui3*-Ui Ly͸LRDF@BY3g U$1^ hB5Vczh2&M<oD3RUNɂ ŘQb j50!bEN@c; l\tr4Vj8 7ێS?wu4؇\t詌MXkxhD0(c cP.O9=hA/f9І(Tʗ蠻%_>g۹C׀R"e`0FZ%8ܖS%lKRjpNJ@Ak^FWPPvABY9#'F޲I}_eg -PpcAxbCO;|ʷp5 Q 4e/NezN H` $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@zN s".q!ZQi:8b8H@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 :}Yʽ@tA/ŽqA\+@2q=E'x %N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'vfĸON`P8 prxN0ы):"$=H@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 t@.7޸ޛ8KM]׷.ճ|{|~Hݪ2q cH1.!\Ƹh?ztFKOQ{Tise\zv3|J;:>:{zpNFZUφwߥCdY϶H^~?WWKހڅ,(g"hQ7(P>z|`F:\J b_7w Y%ݳ15k_bU\A`pq\!zؘ^uWyfjq=|spIĹ ٧-~9:yi:{ϩW_;d`C_ݧ_;nG^Ln0ٳ?O/ċĽR/'`5ֈ]7_"]oW]~H@z498mO[JJIܠCZZI׶*cy"̏30M Pk!&#L2%DeD?7Ƨv" ̄=&Hg`xL85J P|ofxe4XȂb`=(Df%B{"%(\% q]Kx$\k;Pit,lWk~]{5Wߩ~ #"!<rg L13fs`'{8jxU?P!皗6^;XY棺DmXg­3Z7N#λN0~}чot=ݜǤe3.meP֢tmȅ4*X f)  D9#`<0G[*1=aolҨЇǡ ";"7qNuqg=s)yC.:`YAOr! c)'#"FvDp`-bZ7.EϸȏxM dcO Xk%RpDCd)W!W`:b{}Pr?<4ʓ7)X%Oz~\;)kfa{ mP f OA1uAD:x!ܳ#fcJ tBa)6:@1%)X!k}\zأ9UGi1R<|q-5~5D|ઽxC\?݇TL9! X:++2P-v1'N)Ɯq^"%.4uoSa"['ӰYŖvh<}C殓dKfl/I HF3fX[ᐭ}PV {h=UQJc %H7M:"X8J}Ev& gAg6v<+3';vِxr;aSg5 `_#>LIMBԭðӣڳ)nqKqރ6c`E0%:,tY#`}6tm;ebUTT\jv8Xow7*Tq ZGy;D}{3jg>LLj94]VPwb]JQ~V:rf"y}siE#_ 8~"K X|(Oo ߽5;D_k4N[. nthf8[Q^4%:x>w/]m}?I^2l}?QK*V٢vkUGR1 ]!5]5i"rpaܯw_)4b'FD`EQٔ0^QlA7Dc=:{#gTMSk\u(}Y }ٔ۬N.J<"Vӊ_.k>h!G&BuX ]q$ Zm  kEs#"4xf#0A ǜ&H)8A-VTQqGt7$Fb#Gi iJɻ.:\] ;+L)J7D "O/ȓuFol1!p>2rtfN)!pI# w ̹H~*aĄ{'d90Oi5*n3dx !/ןߛVn3i ϋhJLfY-Cߟ~ nVdv^Ox>uaItOUbtB<'6Ogg*"Y<|8yc6mrv`HR5ͣͤ&\i=jvy_~WX_)r"4׼̀"@F0o[Ab۳on̪2>D3/;Vm).2 LXx7 uߤЫV0~PG{(Qu|So?O )/fpQW }9РD@Q5!u/L 6E ge(]1O&ތJ[Õb)@obŚRm/6h%.ʘ_e\_DcW<΋4.7_oV6#;#\q8n)Mt3BmˀaB(%΀).^Ǔ_b< ?N,qU8_ޔ+gs.x䓆ڎrjC|e L57LRIJ1@u% 2(w޺ƴ*pGzCC_7XVټӯOHD9yu3T dN7/<,3ǽjI6ރbaTA2ŎAmX1Sz8 ^Xx‹zc!No<}o@*`G>*nKe!aUJ1eY%Rn ?^?8`.7U%lΒ~PJǭoǪXJ$=azg ]yt2dL;i,&s0 dw蘟qzȇEw9p{.B{'x Gw&{ϑ{)^+W5eNG%hHH$`σ"L=Rb(u a 4_MS&aJ;f2Q Ne-qFꍜOC.` |Kmm^$J.F(:6ۯn"+]4ǻ6Ӳz7ExS*z>DAkeDS.j⽈Ɉ8J,(N9c$xAVYf!<"HFɜLD3âҌ&1  Ayv \y, <"C ae-?QF :䁃ˉ 2RgGJ8qPK.d XqAߐ\#n XhESa)21-%%c(H ^O hl, Bѡt) 83^r gNPnPmMg#k11.|}6Wk.Ù`ǧonboe1t9yrټ ڧUƊBN(ep+OE1_,v9{ :9戺SʎOI~VÀyC`!zrVjAqw6?(296)}(=uz01EHw9m6Htvv:es*)ȉۡ 4^F'B ɢ[։ X>RJ"L.='#Y~Xx6](^se>8_TJr\[NA>ρ; xwKo骫݌ʹuHħqpVq>nX96tNզ]v8lH0a4 (4冓盓ƷW.ysُU\СfKu ^/*M??p3r釷Qk`j6.tsZ ʷԍ% qͿjoƓxԱF=:Pb@_F:C.;_D4/yZҹk0d9׽w͡7u$|陟ܳ/-lML>F/}^xCS$ztm5:o>GM7Eɫ">ӗ3ۏ^t 齺}R6Y .VJMr aB)s張LUd*4C7zEx)CgLUA`3uTJ(d ˎ]y,|BЭ:1冀G&C9d,JXe6eV'7 ZvbFƆH\ ڰ&YӔ w^MZTR*sd"(iA.RmpAW݉U^%JT^YhS,t+4*9,6&ɒ3(̼t@*8ejhS(ApˊHƹ,2k)UyZ3`?mm iC$% }H K*4^Og tS<݉5 t!{IL$:N pP}ɃaR੒,JZ-+MD !:% Sܾhi~rbLEJ/>-M,X@Y{* A$Md(H%fmH %'$i[&zk~FKFj4$֑{ug͒V\*ar\󠁽 %E"qA2Lk U֙ZCd k*JvG!E2H֨]HdݪT##ka" @[#Y\>$,Vwi/^(5NPȮayx;wes m.O5Z?mR]=-Fd&Y _>d$3 UhR ruȹ (:  r['pOHv +L6b6e#^ķi5rGn4;a./Xʬ}3{vӻ`s /l6v972 S3K<&iqF/@+7+) ^r]Fn/g勳Wk7EX?NT删}Zxp k a34M?uʘ^LW vGp8jOQau|TO]zάsgeшBa"ZUrAt+>"qEW\aE\j5?tqU4W/P\ .I\1G# " ۞*TjՉ($w B׶hBCWJ/R\Yg<&A OJ eK~SzQMpk[Iҳ};7UV)sUgRl`n=ӟ˱EZ~sSTׁ2Fcš3|ԯ?}s2J<'$' B ڍVY8٭rn}HSݍVV/Xcnee׎-C&PlqXٱB-2EtJw uAbiLJf_~oz᤹4"iOLQ{lJfp %Ymo~s+YrD0B( },\VCJDgɽ@KN nW,=H(zg?Wo߾=5J͉-R,MxZ?'m@*vl<“. >,0@H㫨 ıWCbF9<`CZ/ r$>P);U"U")6g)iʣї?B eCeD/ W30:n~'L+B2fhefKΓtފ7%'thcre{&6u_Olb٬i.o֗\~׵mcv`vFrC4Jq4Br"`PHxN͑Y':mymρw M.^SxJP_0J]vwjXC?Dzqp854%'#-N&==p|=)>F_|w_O/E>C|S՚N VSBC|薽_Qfwصb|:5at냟|m; D.&E+OXzN@"HF*VTҳPb~Ysl"r]\ҝ^ig^cx:yZ}][Q,`gGɎp5ۍCRu;4 jvk7SD0[}cyQK(TL^-I$)(K2!t\iEh%\锟ӣώ?C11tA25Wqa{b|>-󎨷s\HK"eOHs&t]QפR c:聯\>vuK߬ͯz U5C >,}קiqA!η|K/tl=n}3BY\Pe7ûtY41^u @y^Xyd2$C="@\2fS kv&C] v):֞` XBZi0nG/ǫI A*Ut.3*+"6" !ɻ?BRt+4*9,6&Itf ^ ]h*IjIBRlU<=:룠:y? ;cNf2 yxrh:oj;rBe7EMjRmRX;g N@%C`UVV )rQvlc8p6 FtQ}^!S"^*Ag"%XA^a8jmy{&Stk 3_ &uwgoޏz,vW-rs ی&R +Ρ:cLh9S,K;Ptw { VKdD}h,P6lU)@Sr}h IY<Ζʢ1!x~?g֤AW1k( m#,QN%,8KzN02R\d$s֡ɄN#xyDZIg'yl HxF`]RC_ :Pr$I;ӊ)p~g^;xgq\p悵s,dG J*LH>""SF9dT1K츾=qwLY}(h80tQEm[v~AdKx !^xzA'0^ֺ,FTF$$u$/\!*@ _WCuZ.ZURwpB9?"n~wY_&Sb _M>KNxZ-6|.!~|y] o+c6{32"M&6 ػ- pzY#Ȗ%HdP!yH~ݛ$%>ӯx~2݋s>}&=ₛ)㯯NĨCp|s}nc aSyHTֹ7z\L 0v?Ujr^%ܱHmpMUnV_ ArLkبT5*H<\)UE֝vm2sV$Dff+!cuf m0* bǍ6F,ǘN?kZ̽܋ZckOxvV;Y^E@*`G>*UD!$Y]:FEi[ ,äΤ4ų|Ɠ_#u]5X'f1nc@SLqUDRC!7[qzB+Ј[vkC\|;3mccYŰv ]0#n?;hیXCwyW\N`QStZRᐐHexyPTJ]Q$\ٛ`vGrh8FҎY(>t$Iȳmra͖t~k$I.| MEyeY; ݲ;r?aCT༦\F8&ދHIG(& vØH:<+%43ڭU^G*#R"%AD2'Tfi%10" {!mcPgW}<=`D ^2#J8]N9G b88RJADG@JK}-M $\{xCr1cSLRmRb@0݁t}OI3abeti83^83K}O;jg*Y'F$9\wyO<]$ǂ=.87U`lVVfN0S1'_7?*N(-*I— ؇lL-QV@~""I}D]njF^\uFc/;BջS  ͊ +IVp _AuiQ;;MZzqӶoҡ -؜;NOqʯNNǗG9ƜJ"u.qOZ-T Bk20{9,P J-ԧJӮ"Z~/߼*^<ϫ/uaRO4_x=;/ەY^d\\Toڰ!To.%]6CѲyEyq^{WuCfev/5Ӹr/dSMu%J%OPT0?t)tZ4e+y=S%d*@'(3יA_L{~_>u^=;hqiAE}[p_yфU4WM۠hJڬu۬rC׆L۳Sٮ.C;r)v*=lŜegJunV2Weq g!j6K_n?i#f4GA-"-hIl{Ub$[zYF<&0bl $ݷb=wo#2  "u AQS#1@yMV6u5Fwm.FJ'0.Z1sY*VNkh\ ۮr+2J=W0Rg0E#y5'a`^{V(E=EA;@Cőp.uX}"%VN )d4&jci-7S;s~1Q+ q0!l`)0-4L2ˆD[Xxxl0hCiccm{%"mMMAhÉuS+[js~6-X4<0smCwmN˻-p뚵g; Cx&&,4PAG`”?\ h ZѪU dP cJ#68ROj@HysQ 8ʶlhM C\)j"i> n|gj.Xd;kG.:z t8LKˁMNScRH0H%(+]S }+zdoGŷgYI۫ ;~ )\d x:+ÃOx[0ev0 @d8fMG*`_[ RsvF|ל`EzJO¹`1X>.tK$'|%gSӄ~tp:?w%I+:6N p#x ?frf7ߟٽJswِӿ)2Ɛ.t{o' \semSח}Gc/d_ujgc:tk*"]W#sᝉ|rx}Psn~s[1@l>~©Mb.\Aخ| yjqhˁc}ʦ#AwhK=̫T9F$㑅H>HԔU0+Cʘak_Ǭ nо!d|)qnZZV~~$3Ek{}-aby ]"تn߃CޮC؝oYb֩&;'z>==D[C)[؏=v9мͫti֢+c!re#r^+k)L@V%a vTQ͍QmQ'(߆I:.mBjzRSمmQQ!(F .;W6f..gYF@=FH>#:i\ ""HP!:C2v?ịUht&FleF́ TۨQ蒠Utȴ-qn+q`ql:?}h77`.n{\) jW1˵,8/a^}L.(!@3Keu<jBB={x'vE!^PHP!o2 JI{"k M=/B8 t\Z:=V)(8l\ýUk/hl&弼O!70W[^'>u!u|sgwmL12bx*]9s s 05"!W,Gr0]9ESQ0ua*5 ^2l2$LD"{RzM!ZR(Va x$ )[) fcƌL"^ˈiDk45[!--prQ#zH=@I(nn=SD\ǘ ?{ 2F;i@Wq̓7k-LHZfoȺZ7Ϻ?yB56inc!׊6J%LBN~T\L9!uQ:/S7!ik׷f!!e^iٮkk9ߜyiRׇYќ_v=yfgʯސqeknХsk2OU~t g4I?/IUćC9GOl|nl]ƙ:v) }Mfi. #LD5*~)icc9J K0zq di'}Z>߼3P+Pb@/DmgL"kyDYMYD~ADv)@XG#D8bd.SPI&Ab69M Zhe@W.4#sUuj'KpŪR[YWm_Nݮ)Ov/GUv߾^rR$J)đA.4AE$LVwI>dej, `/RͧrJ`)D4 TkdlfěqfX3B ¥盏7;ziapNx|q;>?~^[i 1, 91>"5d&MSNabc1 **lj MȖmp(K"u9;^L j7}Q1``0+CI(2i 2Q[N1dھ!Eg$B_(Tdk2Kđ1b. jvflé[/?0  <>`Nsq!YP*2/hXE'e+ٹҶwRژwH_J]3Křcrl\JX1 I2g;"ޞ?ȸx}h٧f^/.bc\4.qr &:'L9# 1Zt0%+򔩀Qj\<.yǾxha w2w) nHn;f?KaOM;-8Yf}doCqHK 6Nڜ \Ukp J\) U5TZkuZWjupb$6J:vY=_6(}z9Н)SgMvz1ףdKyKDln`ÿ?TN{5~IՓpᗎq-̏ylףwJ7nū+?b ]Q~-u\e6wԋ^cGSoi*NG6=/?/Gq}uN|:G|SBxanz֏ 藟ߌy)D4yF||GǗfȱg/J7\MF-r=V6;l( =`:06Xk}2TkĩPj}jJ?(H?!?溓jJ+W/.VZ-ӹ+wף7 ˪%_)kkGW7S33f$kP}V9 Q`lt5OfZk]aZ9WHBw1Fkh_uuW6jEQ9܁Hyic}UJϴkzEUrȖDD֍JrB F-bw98)4@EKP0 PLl7<$] xk`3svzC }*ۺo%Ef-7WWF|}ӛEwOY5BmcRǛ9 }c.rx{EK:K%REf)&B*&O0Q䨤DN^] 0I̞iI_"X\RPU:pP$Zif^ɳ8ʚ"tN6NEĮMuŰ9vXk5&3[#K5Q2ҺL+s9.rY1\)7 \^y+RݱiYtۂfL`u |*ۺu_wDv*7U#{؁^'A7Z'^' E(mq($>e*g\b* |-DB.ߝOC7kie3RT}zo9䱙MXZ(S`,3Nyje8\LP#/Q*dHq73g{a]us7gu} fsAcSu~O"T'V_twO:tDU[$fJtˠ]Hq "V;aYuYO/UME{e$F7g) w׿J6'{pؗGܝƝҚܼgbOmUl ?Ũ +DWQ$f&*&J1a>[j$9qw^L"j-6W7$̓Hk?69Llp*Qq;/62+m0Rb:\H%)63,4UIOj#z2V"ǔĩPE$qNc: R 4ˈXpIO#Hs 1z @RE%u& /#63g9QbKozܮkV=n-۵k7ri5Z]6_^~fW*U@Ϟ=<-r)y< yynޯ|W^8s?ԭwwts=̶;Rx/,λ>ծywwn>_#_.qs9I 6%[T SO3腕 u7m ٝ%g];$:g!D!dLKIv#z!Ig& ڬQ D sp1%ml  QZSv>4H֒} dIɻdy݇? no T( mvD.2 $~ 惞?^ lG@LqnLA%"Uh@b49Z+\j.qvWLK#smMm:7 wO꬙ .g;LZrL;K56l7m'o^PJ!t JpA /"$f"cHBG!+d$TSdDgY ~ \T =j9 *ʮ2JAJ0HFflGv\6}P7B=`Api5cek8}u\=0_܎Ϗ_VȈ-E0ϘXb RHY N뜨GdLTXch"vC{QiUaSMlBl-E'Xkَb$f_Pw0`3EE9XJBeFH[u!Š ):#2B ̠&XI$T\B&QsaXT 73g;NEݾ }AfDDgu & ɂRvY>F2-<)6_4|l[I!kcb!}A+uMq,gbɱjr)V`5($.ʜx{F#]R:yɾq 8Ʋ%ΚR 0A.hxSB&p e*qSܐDُ m_xCyݧb$H*!;'u@AwQСwͰoqU>{e3}&qɤ`QhOZjeYL1PsWuO|r_ٮ]/`"'?6V>#>mARD u7E$N-iQYޔ@ΘQ $x{]4n#[x>k0uYb*Eසy"}J9{0}UMc`<1g TM&HD#&1jتjd[*)̈!CڞS{(4$QڤbYImSɘ)MHʢôz_@b&)d\**F_ʢ&FNwPo-m=rZ v_xJU\HR_/<1vup}1Iyv|fs&wo;|ۀ BPPt$t.]4wEvX} = C߷׎OavJ>p}[:Z6)c3@t2qL" ZBډ|{uKKRU_ <|?辦)yFȣ3nwB:h\]gɻd} qV\6tIGhCW&~Κ1ť+mHl>s^,CZbL Z#RH1 KivtWUuU=T6຦ۖZ[{ CG&zW=KɾCRޅCCF*+yzט-iF9%{RznD2fv0ۻHklcJ>eG8MJ5|3&%<0#I2>7:幦8Y@.1]$/ *|~{~=-ޏ[o<^jyY'at$6n$!%6Zg-(T ˨IXFk<&CO1%!DpɁ֊)\aZ%0&#`$XXJ:vk;ց7k [$VxS(ghUor<(K Cfc+&ehbn? m@Qۖ]opUq{![Km(FƈxCKKsReMT9s.] %&D&RVr9H $b,1Rj*Q4F@(<37;AۻAj,5nLʞ<^'#yH oMWUpnLn\0p&f7^W,G|h|ZWt!DāsQhg~=Ac2&9>8"Ch7LoQG|+ԁh7+1 q9[/W^qE0VTW.ePyy1rzmO矚*Mttt8=._*)\3msElnv}(9PZ:8kXcs).0>QoKӔ.ň+WoՍgis20.!х0gcoӺ_%'Q1~Mpq-:U-- ysKgaze35d`XaM8Ϋp?wYo,]pouZ]V'y#7,q|?\u.SU!=Űޗ}RcueA})*:~/7/x}L9~/)},oAkMfy܈-ͧn:|vmk^X,mA7 !q> v~Ŋq/4pI*訤R\(p+bۼ3YؠڀJcUKZ2ؕ/9#)Ret^Q)z|D:K\; ~S<30ye5}=g]%4wh3O mNlΰȝ6IM:D# ݦN+:{ŶjmL܊muy~wl9it3\$ R6,D]Zbb)ɗ*0 1Ğ3/j)sC`8 Le ٲ _D@s&z.҅P C6[}$$uNn]TN[VV0hzр\"- UaOyS|8R$1Uthqk 8ܼ[X`4:fA(3t@X+cQGHAQ<Ĥ5hw H'񁚔h2o'LKC$KAtIF?}JyyxsqT! D rUÕVYL0x٨h7h0-؝7fgP| wR݁2WW/jφRʓijȵ D*3҃qWa4X6m|qyI^od e12Ĩ Rr4E2aldAr@DU^a %u4q-?b"AdTS6ƷhM-\QHքgr{շLYy"A\d;8~8KX 6.:z t";CGN8f5挔J(!Dt<l/V Nbie0!|L8ف]@-lKנ@!,CȰ5ܳds@p}j}m9K=6T|8A1ryy1#TԬmJwzAVsn00Xʲĸމbw?aM[Z~qάŀZr]/jZZF/KiRijW`^Bh_|5ʔ=Osb2y, [ҺhFJq-T~>oT)8=Y 'OT\铟K7y=K"{x'8w@*K1Zӏ{%MR8Jb*s4Kb`B~ƁE?Wj;PԮvƽqßaŶliYv!wl1$?~Bܭu7)Ւ9=au؞p,Reámc,qO?UIiC̳Yoֿ?jxR1Qe)pE*  c/;x=),H'D:グtQ(+ Ґ<*@ 1P <Z!Db:Q"u;˝^iT5q<V/X[UUFi&"I[T!vJnYUg2f޾hZK7"Z&pe 9K;39@1'֧gϥ˺t9K{7e7طծk5_\yߢ楒x8\S͸biN*\*M ;/N>v}v&dsc5cdtj&埦MxL1.CHi#؅JFgݕd{g=2^#pґ+FRDK8KƨCR&T\XBgmPdɛ ݟv%r:1,ʩgښ۸_ˉf(/CuvSMbg_v])\ɐmŕ EEHq,jz0ݍnK 9YTBz-O4'U!K0޴EeqkjU(ɠIO 4($qɍTyĄSEnAIua)L!zk|rlꫨؠua{n; 0iI%]9F 88VjF@Vᶁp/L; vZh% A8ezrd#uLֳ UQ*K֌GzְJk5θ.-B{Յ+9󌌏'i~sOb4OAѧA:{*k*I)АK y 9累Rpi NkXr'ƕl7 $p:3βJ%脶161aAXBNaMam[c0.[jmi>>L^DSiRPhwN"@۶}J-d DPF]ŀ8B.qL -ȀF5hZև[VF}NFcR O(-6&)!)}0H(9ВfBsQmAi)M!:[Eղ^^6ϕ!1ZzchhNRj5ABr- [{ы[Cݲ>ԷӇZ ~*q[\Hя&H<܍L0yI)CyBc҉`UɌpėyV{..CU]9D\BwKgG9sY-'`8ߧORߓb!/y$裉ޠ.uBT#H/D B$~P'-!@ȩ-LLfۚy CC*:c=4e~EN -ʥZJ*ru X<( iHI WQNx[|~rN<~ ])u 1aD+?R/B-'FuS9T"JDGYWB #As:3pa1 zFU2Nȣ,e؄ YlPhzD XLnr@`,l[ 5m8b,ygffI?L:5ă#Og鑂v.:z$t(OGP dQ&A MZԗ+'Z qVeNv y%y : 9}H-ƻPV vo䉁ŶCZQ#8Dd^ϺHEB*diU'- RQHj]v8a:HS?38\*LTN39/ޓs#@d6|䆓`p߮Oi׎#2LJ5dP oO+k8usMAq2k&Q)ML $NЖ eV1'8ÁП!Z׭g]҉wOF/EF>}<[Yn{j"&os_^ڦO%T>@&F3A#0:kA1B`(Px/J$+wk6CA1%!q"@kŔR.H@zDYt,%1va}^w6̇+8E$Nw`tې*l4nY0KAc0Cy9UR%f4Rl Rg"iG&ة_&^g.sOiͫfsY(^go^۔hv::WwMwp`sYp ~0=%j/W~[x$:w.Ǩɞͥ&c _"_\{հ?xļ"Wȩh b 㲇W\W] # OId+6.5DyU5X?ȿ/NJcJqQ#4E~${\zI/rټM׷=f]]c/P:2hK:^/8r1wÈ-.70gw<ϳT.N4O<,>)Gu&~SC93 F1w ~6]yټ:7Dz"nf>ው6X7ʘ_e\^Dxy^nR$$oV7w8nvu.|(G)8E=R~{oo։&"B>ю/7DMh4~Q@klIwuR=mcynz4@N]BQmi̶͆}'m`<yIʵ}T0Au콣3l޺ǼMlb>p[jzN>%@& 7H湭g9C<12{s׶ Z;)IY#J1"-NtH0<!vV //=?fw<~ j $"!n6)1EV0%s^;J/s`H'f;jMƊ__>vl\%. [ą05~UcՏtH 6HEr5QOFjC',Whɯatv6f&` ّןq& VB )AGqY"knW ÏG|_-KrYWsKKC/C q'SzlVvtLNΪkw!rk-Gۆ^{/UsR68=͊\5®4ov\|x(ыk~xG# CD"[SggTqы/s>%V/oVjNjGY'}1LU` e43` m/8/\#03*s#|M. 6}+^5ǒqmU֮Z[V V9kl/)=tU 8 YW;׃6 AC)lELٳS8r@)ZE>3aBBܱQJ\R4y9rC|Ds ؾrs|m e,9.:.٬U- TFH !Z.u"99J0!YJDF4i8%å_uPqWVEq6q ֒3]~3T82X)(P@q%@ ۋT+B=a l xla݇B<႙6d I"e$| s:^ C4=Ҵ # --QJ5QA+ω Dw)v6CpQQx/@E HPDʕ\Oˤl>ꝴ-̯ӱ 2/qG >ңŷG (k| A(IGyݛA |ݪ,ˎ"zJ2>Oh8p"W*-i҉5h 3(;Ƥw#zn:3?Mgz0e}m'RG; I6p= 7^ïu_3p:bߌ\>dp>UGYھiZ.y|||4;TA.)\3ms6^أ |2l()eǚoNQyUy~_jFQ92KdGΧF*h1{ǍeJ_f~`3w lmdI#c0}~KjRU Yլ"yx%@۷çqfOjmog}u$ߖZMr Θ3gq>,,biJqsK`c/qAnRڳM=ѻGMG#E+x]KK=L>גufvI0*I N>SW5k.=ށlIԉ?SR9)c:D# Ӛ k:Y'v>>}Vv2ϰ^-(oͰ^4fXo0 dXs*sfo>P"1ueFT׾R:8P"<'?'y;*S2l/DQΙJJ"QD-yFF_ NPyi˵r!S2z#p綑.EF6ѷtgIe&0Sh} u$1Uth֘[sq)>֜NpIo_ HgB '1ZR8Fu4Nzm )GyYDͤ9hT zʠ~w"Nj559˼a|."1ڹڲ]-#[9@)Q<4<QG$ 1$xÕVYL0l9ᲳQie4p!.M k#֪`mAS8@@~G><8YnŠk;ϯ  `VL &6QY}Ž8O#U9+< x]">:0 #W8r+NRk nb :/üu 9\(|sk )ctaӝ mtJ1Z֌6hpKL&TMXbɤ:88Q 0KewV%[V.qL28l QAq >0GF;㫋z5bϲDųS4=~YȳLʳl_ȳTUJ25 gW J½AD&OܥɅ|$RdB\H )p!I@{SCi48I:1ex"YbB U aP֎H FGT(|謏U')+IQz`t08 ^qQb+5V}R[8FT5ySyj3C0C~3Xw^=* ۫k `kvE R`nV?`z DW.լ{^W*(0%ɐ}C Y!2Jf* CV)GdJNbZoKɒP)ESN,fNk!|-B.>mCoՠZ6~qTsߛ[.I=L \e%2 nh-Vh%K'ZˬvW;ieu-2rK_M:Dƣ8I-z USY|;T; LxrUٹrA+Sz͇٧ƲX ُlx eKZn.|~!1(;cokWΝ|E_ڒwG4/V^8Z=Q]'2x-L 7J)Jgx3ޝ3ALuqjc1;_oWnraXAʯXp,Ej+rR~+T*v+v~*%5"JVx JeUd<$ O '#}/|KS!HWaKXS+eK@ZVVXRYj:(}K2ا9x,!u)7/CWBۚTs ^m+\pu L]myƺ6h>TuZ˃6*.K9?qVZ"ѰfF-i9RTdL8Ymya8cBJ%"n5^g8<|3#_gf^.;Vx^o+FZN޴W!*Fg8~4ϡTјSs_mwBM~}*ȁEtwo+aȊ3b6Wsw.w8,o]Fi98Ԁx9k{#_g2_ AE 8\{奦缧5s@ͦ.kzt=ăopH~ -ހ7Zٟ7WGY.roqb%AߠfT]h>OfMrlmj_L=i3XNᠮp"J + oyWֱ7ˎ!MG~6ļ5 yb&D7р1`7t7ƅO5{ mm ==yEpv)b (\+7IXVSt-ں$.{֦ЏJZ+]V+xƊ=͑AM60 5{@KHyztHo HP8C 4O//>e*"n8LkXşͻ4r;kjGi仱ev fEʈG.)׶bi 1vh'ڣ)m%MC+wڎl.ja#)([k%Gh #g#ʖ ?e'[_X-]vDIR] 2,\AaZCWPJ]te&ڵ)ڲ?]`I1tp%)$wBNt5BbTB-0gBBWV Q9Sc-)ǺB֖BWZf9]!J1J0FDIt5XW}RIt(jte med lfƎrkvljK bil).+&dh̝V JZ#\Q ]!ZMs+DiDW#+͉f BCW5%]U{*eԔLt5B\1VRCWR 7etn%~^p=pZ~P̶Jte'ڵ` [R]!\FK+D+Lt(jtńEYWXb *^ ]!Z=]!JK&!]q*"<<خ;w9+Z+x]"|i-.(7áMKv][4m/6~~URwMCy$1 GTRyo}T^.ϴ녒L37K=V\>aYj8PgCdR'MɒK+(&lhU%mDWOCW(FJQW/VWVj6R\]FLl2*Q _7U꽦uL{sTӜr55g)Z!jق3Z4'3jN$GۥdB[J!MI,&p5)&hDLƖDW*[ ]!\J+% *wB-Rj'Zr+D)&'xte,״$ r)'J;]!J=-1UkIE@Kȝ8]юMO(!Ol~WD{UvRi]MJ+x1tpM1th dl'V]`X1tp*xgIU6ZB ltfا)tu|j>rq3K*݄bzϟ~ 덃9IsilD&T:U_YiUkSNs^mI:cs_.|!\?tvr!6i?|)>CX}׋ /`w~:߷P;;=šw@ԼL6ף6c7Ci J!ۯWń8ZchIԖ`ko5W(M~bYI) r%yeU?>ߐJo (]*,H/Ǥ:!.kmVeqp^te4MO E&:% p-ٲD 䐜~3;&yZD.pf{c89GR"0i5eGmiDNhy]an7ֵIxSdOH?y\[- - Lo+435`gw7=]syϔC[ 8nL?Y[u-nnM-˲֢ᦳhuu2g%IqC!DU"Z&$bT(&MAp+Wx 0U|/%VB03,oW10i-y ?" N$c hJ5J8P\f B%p/< {Cc a: %ⱅug\G f $ZD'pi9!%ι:>~ؚ>ִ # --QJ5QA+ω DR) 0!A?t-߸Cpy>x DR "HXbT(hNQxf>J^¤ SدN#uL i= k]x-̯LW;Nf0 np:J~KptNOUG[ (Bqn_nq5B=]Y߫#ϳ?ZYyJ.=jIhމVF<\U\)* l|4Y):xR l&Wxo/ԛE0։Ez#!69z ՊQ/_M{&:>>VJJ%Lۣ`wmWEIOie&}Fq1_kijŹ'?.ډ}kg1r} meZKdF:άb2hyuךw.E}jYGQ{^kUkiFmt DC{(H;"w8'e60<tNl|OC46m ĚeX?}icnF|G}%s8hR?śUB]^(jkFX! >(BFw\4 :-!9x7O/zQXsJq;bJLV CӸz MjkH~?]%ǫE5Cx!YqsqHگTlϸ3OILKZ&Qp0a;K7K4x@c8uiF'_DN+#<[ Us'Z@')![6 QHrDυWR D$qFy/df^rrm4D"AӋNO@(tuF͠5ƣA?\iWy!EBQE&ƀkoޜ"Zx ` uv$e$EJ89t4Jm8)GyY?TmB2Im|&8u7L[+IXDm2XާtVS|O-ĵ*lXhş_okj BKq7 u۞|] \o ]D\^͗r:Z X|;*ޝ°bߡPGbiZ᳗ۖ~6 f7@2 w~5#v`m;vkU,tf]/j>_Lmtv+=Xd*~Hry}HY6sK*dZb`B2pFFZ<ªu@Yu#ʺ  &ZlH두6ΑV" c||2RQYZR)ၔZ 50j9@8^8bW]xأUUP"(l' ˀ" с"z$$8[7H .%eKuIHR&AUI !#zj1!15~14u97Gb.4>b#jKB$Nw Z;diN~Z6\3 ˜Z`ҏP} AzR%f4Rl @c3ъʃF&xP=[UA|qA!p dcMw#рEUyJ z$:7MRcTdOdzl+}ߋ6ڲo"1/5r~p-:$zo>.[XYʶW77# O>IrLFJQKtu}x=zJ+T- h:Sz%H U{q粼?K`6!`s0:-{E lu>'ˡW+\EnF_8˳tSE |W?4xXdSCH3SCm97KV?Ch? ſG~4Qd\agۧ@Ij3TwӔT ON]罆n-׳.^:VV7^F2x?of㼩$aaj{5nfEޭ:q7mZ{X5gSMd;|.騵&n;dV. 7]<̵;g]+޵ ٬ETNpeVA:}JTCTN՞//wl |g/bnk 7kڔ"I+ϯ%s<(n^Nʽ3޸%'Iw ^Wand(sWIcՏt*)TǪ\8E :˫.GgaWV;8hIbX |)-ڤl.A5֏ՇEVO-!2x%8$(JpB9x|~"6H!n6=mkƐNt;hGJ,48Dڠ]bJ)8 wVTY.wX;)$v1= `05ۍI\vv*8Ǐ(TV7*q,J:/!//u4[mMrPH1J1&=p*yb[n܂v17EpP$ >D4<ňՄF"S^Bf12^K(zw杁)|(֐kdT28j(b-O-;f]x]r9~.1\y㫇!/~SM^OƶƊػmم3W&V&Y-J,v|juj&*mL9JDJ9v|pd_ƑemGuΑ덽#,!1#9 9ýIoFGUZ( `<(tof0υ0\nYvVN>}8ׯ7"m :b$Nl!2 24I@p< FGxÌ5Ueu1Þ"dzզ{#-v=vSԮ*ԟvaULGi5Z 'w؅彋sw'??Kodz,ȋ~?fỜz8}_,ˢ]W,9EF#|*Ϊ>k%NjIxmEo~}?GdFmŎxחC_뗼|-ok!VܰH|}Uuj@Q_9rG03g5 o;i{XvԌM-ܾ=-6t yyZ4Ճ`t|=˽: ֈ! eK/#+6Ԕҧ1{ʀk/4&_jx5®wRqoL[<nW luX:k;>.w9WfttkÎqm$LxH9&=ep͍Nmr1H=gա ;~Iznf Fnv:< vq9Ј\0S~nA$%A"j_jA)Jʼ٪0H# jCPkZ͛FieP|Ƃ#%< R ƹ'zبbrL۝FT21Mo3THY"!(Od F:PAӈRt4sw@>\M.`M>;Kn6rf-.!`ݰJפp6K7\ͫk6}$:eFW^m1ANT2E\K!sk܌ez|[,+|[ݜQv|Oii7\.Q/&wlްh|"t&l bE47XO'p_gx:гou>&GV+a&GI""J y1'!l8@ QA!)M*cs͓א}8˴Ay!(Īj;MbDa Wen fvqX]a֬c_kk`Bt&,-MV Z{%ǽ!ƶ}w:/}toHzPG\""qD/ GQm;qkp6a%31v[0q`ħ,ނ7NCVzW1wY092ɀ~R.@D"6 M)!j~5 91eqy"Pţ4c9/N͌"bU;"rKcYɾ-xq''kh%4PN4+)9E4hH`&\Iґr^|^ fj~|bxw/0ǧD?*Ah}] A1aw+5]hy8%P~F 'qhWO~4.'t)F@7 F_1bA* DN9>*< 9>8c]o.7.қG{gW}ԩP.QIIE" {p*raɠI^k! x[i5]4Tք9ydGM?6zwR<@ir˫y#|/qZX^j6w %ӚW:XItfP5Q5TxmU#^MP/TRhQi4D$$h.1c mǨ58,l|ryE'V}_n&lI;*? ρ\"^`6ŠnjFK- i[d"/(#ľĔN:S&&!Co9}M~hXdJnϩ#! 4:z.}Rh!;DئSR$09i7y'{uw)WM/ܯ]v[Gby$|3 !VEql|sU}]7ߏ?'k&3g ~hq% ]L~}+lCx/^9N2.`i+Ql׶:E?zrO;:!`TovtBfG'%%;:!JF尣e'Gtf++ 2Z.NW6;F2ڴOUlto }UF)+eSB{W6w''?BWvCAR@W|}b zDWS ]eL2ZANW]!]1EV7>]!`Cdo*eh:]ejc+. 'U?E{5O>~0H>i5?ՔJe,}qwߏ/юΊ:4q518E2b\([zY)̕>Esƀ.fmS6]gS١~&6:_79d<ΛcdŃ9y{'Mc8!GRV#^/ LrKsx |ƿ/ڎ*kiTrVԊߪҿ4InˈaDlJF+TW.@}S||aN)~Oy^ZА#>v^гߟıN_-VݦvКvd3[^5>X#O  {eBShU2J#O=5> KE2\EBWQ]+D)@WGIWh~+gC}RU{1&_1oI^ Gab2kK{>,.ՙC9󱔲^(/ӄ nGtOvYS^nPJc2oه˫+YxMpc _WmJ/ޝ$ΕeyI(Zj_aɳWx(/0,U+`/bwji<}a)9@ilHe}cS E i^J mhtx>Cۣmcy˖xs0|Q۬ޟ$!n]⹄C* ⛕eaՒb"LJ~Fb:EZYPrF5"Wޞs+cz4 u~ - \ /C VSDiZ-X&n07t ]eR=ҕҊ>yWp9 ]e/m ϧr%a@WCWZIx*D@X*d]!J5Qշ^Kyr* v\˼)>_F67M^fCs,~8~h":oG#ߧhoUTo~{*+-|S?ɿh4vn(>^SU>^]M'S4eA/KmY3A^u/{efw֜/5H~ƏŨ&cuy{,Xmq-e$~~BS\Tu~ݚ7H>>ǀŌiըfhZ_W(:ԨlQhbL~s{KQ>Wg2=6dDyT㊳NL`%ǔjhK3lyr!;ƔH9 Au q_H?˟?_IJ+-ZY*`\KV-srF\FysU,Q'EK+cr]z};aYg _VUK`SQk%-ɞAEEg9h-ķ&]cYq(ϫABuZ .C7K \ǐWo$^PX[[]x(. -a5@1ם͎񬽶D#0\{5y"X#?5 {y Pv RKDet((*<:2km ,^ vb[B{աR- 5 # l@l|?ټ&X(% E'alW(Mj*3} + "Xd*z?r>XXm'fH574Z@6/ې@0vMB(2CeidBsي>R· k<W]`&` ֲ488)YD=rK@4`^*BA&:FŬA0ཀྵ(J892z"XӪ#KAU?Ѿ`W ()URv>Uh&-RWU^UuA4"fߎc'͊H5yWv !1TDEP8Pޭ @z3jB$&me3z|lp&4+, 9OA[wXQ,;f2X@Q`Da9Y #NX9ǀQ;n0};{+.˶澼9P4|T7z|~LNf JcGT^Z2TeW$2$]+T<uq2I <)E^V ]Q1@J$jDOV{J(ty,1h] ra _u5}MANtmX:^UBN ~t}gyKnd*]0PnFeĠv:*}Ɵݟ^nˋ3|}Yw } Slb+tmHmzԥRsXbUoFe'`'x && 8g5r1 j#`QUD-s)p]}+ƈv( BxJ-4jw ,hK9CA:G~h-JHә`DdS:׈7y,CJ35Fi-H?+|K g2r 8oBEc1?}-lG6z>T}lz ynz*~pڠ@2XAyֻ.wXPb=peqr_VhJ7a#E|Uè8 NR>03It oޣp \8 rH)UkRWIҔ"j9aЬ07IL]X~`4| J7];)2昢1# R Q/9p^\\9KL^*S:zHXvpϯ^}0{'ؾ})y/9qho/Nr|/ߎ )?>y;;xv zo(?kxo~7}q ~8Qn7w o w= ٙ?^\._O]{yW{ye޿/J?^_:;?$Û|'G[s%t}vu~n<L> 2pmO7wS\ZϯwwTL͇3w'ΞGW2Tm)pm~;lS?Ah28i$4p+ \iJW4p+ \iJW4p+ \iJW4p+ \iJW4p+ \iJW4p+ \iJW \ "PNm&p%pm%p%h=p%=VW(pE4p+ \iJW4p+ \iJW4p+ \iJW4p+ \iJW4p+ \iJW4p+ \iJW[ \p \nI} (s \A8D+ \iJW4p+ \iJW4p+ \iJW4p+ \iJW4p+ \iJW4p+ \iJW4pUW E[ \VN pmL h} (e*C1kJW4p+ \iJW4p+ \iJW4p+ \iJW4p+ \iJW4p+ \iJW4p+ \m vݽxu.'z)q?p[pm~uo5)GdmR> ʅ@.e>|PS06DWn+Kv+t%h;] 蕮!]yCt٤Еi+t%hv/x<] Jl=3Lݛôyjͅl uc8v8GWaB?-/Ow~}erחoG]WH0Wg5a )T(S| >~ -z ?"wպn=y0o)Fޣ&$Wr$枚jƤ}rGz,ct3˟8w} hSd]ݷ9˩M'New3+8O=(7G_. ͇{7v{'BP[ݟ_:[Mp{G7^O]^\j̝>[Ć>z_?ӝn-!D6 pfvB z9n-D [C+k7sYR8v7n#]5oƍЕMq+t٘c+Ai#]%7X3(p3m67Tz>tal;tmXͨ+A{JP&ݻסS_'Oxrz`>5]= mLOCWCL]UTҩʺv p9@3+{!y;t +Aӕ< JW߄np] ܘBWՕ̬t <6DW n3t%py3fhi{<] ʨ{Wϑsyc:;K'ޟ1i\x88fh:ȣx+4-ho1{mMG2m7] ܼd==;AUU>Gb"g֢+VJf:v٨t *QyK 78q]`v*(]=G݊U -Ka+t%h;] {Pjtş9҄Ӈ8<7>iMODWCl@WtSo7DWm.ۭЕ}*]=CrGN+q;J~v(r?zrۭJWϐ|ػ6r4W cv.׏`g=`.t?ͤ5Dܒi%GwDv(E B6F]etQu-]] fIcWn ]euhauAKWHW蹷js<5>`=7ǀP! S6䶝KnFՕ!d.z/˟79j y7i6,%ӸNx`G6l0>yIp $ERK",* &0I` djoɫP1fl! \EMޱ\^[ɛ=2t VebӴ!?[MS.i3vFv޿vdy6~S^y?c8 9x5lJ3W*3l?7SFh/1 RFC\?z\xi2u/Siy~7H'|EL3_$͝3<h_:ŧWvDpsMlY_ |~yqo"M=Jeleu+YgعO)O=s;E}_<+R`8Lb x쿵/ےSBe`t&Ey:D[ٍ\oc*ٹ|9=΋R?M WyL:w/??YKVxc1~.5?vwhf"lRf)YHyBb;B~\?ot}uXf lP^G%9^.}OsENijP<]E=F;XR0 !57:E V*r&ixuy m8znvEAW(=jjA|gwY[?rFWb3sU Zm]svdž11\Ldnv?caWXKA]jK*s,(n.a`D2uE1JKbn/]ܢ-ל0eROqtz SX]a騗0+O.Fo+,Xͯo__\Ԁ/7\Q$h(- P I`|@k-a, e{8#:Jhʱ>A#:jA\$.(Q'(\aQu8%I^ys2%bQNdNlq/4*ނ|Ot/c=8ul4/ˡECaF0>؞%tZϲcGyZ#K4Ddb<^YobG9@",cyBU$`oLN;k JKmh$:k QgU]nQ%Ol-28[R A?J+5}ݣSE7\:(Nr(_:9KaXeIH:,o 5 *9Bp'I"RBYHQ tm4k52X[Zd$bZ-Ƽ\')0KBbn7?~!]eewl{?VV!cz(`#"իѫÉҫ+M]E ͂i!LUؘX!X~T&RʱK䭐=.e{ءVTA-,%D2f$'!9g7"IɀJZKRNjR6fi;υV^1rS·̯Z.sY-NSpǏC DRG ) 6DFц&hΝG79ao6bDC*&h E8ؐ\eF_"YZ*nĕ܈g0DXLT]39!cNS·x.{\ @AŰM-*`Q=KiQ-w=wE.}?`g@ZY}G=J5BLtS,b( J0X Sp 16sLȫ*F#4NSn ]M3"R2(OH1XVgKg:Y^_i+E]tR)\gy!z[siP p^'wE:-ynz ou~TƉ2K'0m2f!>G=BϧV·esbz}Hga{nǭ2B Y`bR[v]~s͑Oަ"湐C^h:iަΣg\$ O։={eH˫4Ӡ$Or>bcGsuX'ݼsky[J؛0[v tůt]BABH --,Ǽj-Xjgu,eex$~Ε59i/-')/V>lXݰZJS)+o^JmJ)bKhN VD#p*b< wD*eF Qr@8eH=+THƱ`c) b R*MHRU3cep63cuZ2.f˅b.- fsFYX55i'G,qvO_92cSRjx$2*МvLy#:ixW[XҔq26< ̆l#8YMʃ@A!Vm& D3vep63œŬ kWV;emѲv`ɇ  `i!XEm@RԢ(XJB-:kTqljo$(띖TCR kB$Xd G(`]H =l{Tg3~9c]QF-#[iHJJ08'Ѧ9P'D.rl!h4%$@!&7k-7؜!io%w޾f5ok&W*v%7'7oy03;8s?ʵMn% \MP`ZBX g+HO8aZWW#jı]8^ `hQI1\x4': P@V4V U\Zύ$)Q)cD!gӼ>1c jaT% }ۻLڄC[앰 !mf㈠S~w9A`33>䗜}r vrR}Y {9oxLJYKwzpc-.@0K ?b{wmH4ew^mއ` v&L23AF:o/IZV-' f$TXWY(nG% feQ,@|Vk+4x٧<9*1ԓ肂I/ ESvuZU]]u3WxrayIx֠ !*iUYT8Ə'3zYyѴGAveVjsEYA+j"d|Ґl 'YS/`45x3H*"s-c1AtXbW[G“2^y`5NS)Nj9e.wu5߁4@IOni IUp;oB!gh՞<#,RN1vNi̢ot6L:ߟ&y}{ p@.N(A/@Pd0ȅwzM}97>?DuiVMvuM;w]e]A]s9{ 2|y7NNSʥrZ ۲~LH3UM| C|ɬ \pѹ3e X;>$z}OC֘$հX, ^q$\,D`ƆYEN'@B+k)l<͓uO.Ǔ>}ò^]=b2dlO:H)9֔Z[XeLuyI`0_w`x%k0LֲxW 1AP@iNJ#b"0ݢlI(6‚EIτ{pM+5 vqSlNIPp-:zkAbAF |P8SX˽(Ùi-W˸ `xV0(1g R "0kJ8*w8 AsCb@%s؞+M_S@tn\ijP,y$dU-[= PFQ:p0YH)%E;21(tt^t#xƓ٩EO%W'y#4a8x4jU5֑% Sd?fXԧ׆+m_6[jɵΌ+;-V*b- opb2W|Vhl1˗D&+aʆ+oNF໊_!|ww{:7E&D'>p\J\@Q.վu6jzOU^*1IU:~Un^7o:~;ZLô\&LltܡQSWM|ޱq'"01&]M(qK'7ELksg)SB;m= èOd7 c 8 7tbў\w:: $kF F^ȕܒ>Q2p9l)E\tXbtߜxO4m5-ӲnN6K#QPrQꔋx/"E2b&9- Q( <[1t֡ [Ht41?"%AD2'Tft!H&nV /bA= PGgP`V̺'Tzb"ʈrַL#=)8 6oM[ 56pa({˛sfC*[NlA@pB)?8Зػ (!#>Hvi`e e rpN9L/3gaY(++:G`> v<.*'ar%/iѬp]3M 40BC͠q.Y[^!ggE9ƜJ"u _qඨ-|FB IY9}JE||OU;+'Z~+(}{'ݍ9s@mG'5wyw㶳0U-9(E^)3XnlpEm@}E4A[p17>9'՘(T`/m &( {= ħEʍX'ſϞTٓWPL# ~ g9? ^8IJ~íKih:raXpiA(K  9٧Z䨴$>yZ֠I i~@.` qhUu՘^<,{,$i/9HQGH(򹖜N)Ifxnp+v)4[~ ʘfi6{W?ku;@X}|Sk47_WP]:oOCгl=S(8 l$Pxd0ȅwmۦGFQkj}/lO0 *llvM7 j^N Xji]SR=RJ jVd?zMV0#)6gF\DcQ_Z/zգQe׃+w8<2еHak<`DH.!=DH*J{Oov#W@0'шD.ÏE\%jwq\6fWTEF\%rJԊGWJ٣oQ\1H\=q豈D`.K7zq+8-ËSdp[>*\V R+_ãxVW儑/޵#ٿb 67aQ$ `OۃEcvKn-aK^)YoЎuUuxY$ɿ s??\ߎfK~8Ϙ}FX >|z `jUk4[=LAFIWxચ뎆UVkٱR \YU5l6UVCj=\Ar,gQ 9 D܄K~sdzCQe{Z7 VֲVx%;rKB Z2>=(?׽{Y@;𛅗ެ'%=FK/L4SF?ÿt|ֵ_w]ʉhʩ_> ^@\P_m?PG)1sRJu4 gkQZ 7I!e%>qo&ȁꪻu  /6t$C@.M%))Oq e K]۽T)c֢ͻǻ3Qϯ=LG/wPjUF%-6ESQbPT^ij+p!}pY㇮~:qhD<KGOjӧgwNo6w1*l@B%(M)rʛ*Hh0G-b$9|#h)צƾEٹ㧋mu~>`{H1V64V#NYҠ<ة}A2C_/iv#i@rJ^<Qݍ`’#ȼH%I`Ձ P *.'1R0yww&^ٜfp~71mnv}^j;)w۳qɜo͒S9V, "K$ dbTy&B<'X$IJ 2 $ږ`Jt1x$tp +Y A,.I ,PvDh]*9=x!xp}}[Ch Ygh˕bM1+tE,_IY*km,GPp{z>|8z6)tv`$6AFDJuȄӚ-zYI%$>p NB $u'!=M͂YIrz1LN M)ԗ }3]Z1Xc19YݚYՠ9F ʨJc̜ Ƹ|Hsop~nv1_:/yy}سr%4իi<o̒_5vWN|j-"N>8i)|*& @z".>W|8q7zmJGӨSգkձ4z8Fl}BӷӈĐǃ#\Һ+N3.e;ItcM֨L_a|0ZatH*2fR А))cc9'K :+E/yn} _al¸im^^~(q^ Ee&UH.#Bg"oz [Rxd{OdS52 $V*Ӏhp63giQ݀U4 /?_2mq2j>c4'|ķ<w9ڭ?"? bO E@ ULVwI}l, `ēEdTς\|9Pv`)# F qfXldl1 wsϷTf$seï4Iv pyy|g !/}SD$R*'jL#2_C&*u *=}ƀ(&lBl-EjOcITD1b73g=b<#1mQ[3}⡖3E E6sJBpֲR#-Dbdl*ŐZkbІ 432d %I#cb. ju*fÑx(l| "{D\5&4LY'L5E1idIADMI\eV6& ZP5"T9&URb%-U:ɘt2Y_Έud\N-i(1.{\\sY Jh `\ .YLE Y:ŝaѱ-xh@X(G ]Ee 냵[kÆяAj|\jw7{lI9T EǬAnU4 Gk6Xi\AJ4@>8L @IȢ#Q 3 rLØ8kvkYOIv7ʋof-ў$W %}T<˛:yvg(zm=.k݌N. smLVo T,ds^߬7;}c}:zX^rРӗW欁ք:t9 bC(w$yR5zߪFM@ҶX J1DtHXk{bj_I+m1Ctm(2 I@YcY,&Ip.i UZf'FQ;FVx7yAj IgG6H5C抒-{Jzgԇ%Mu'a~LԺ$m|w/2t=B\h4uPv=z Zrb{4@|9=;j4IMçY:|wszifz[lZP@2)c>] ]Tvפ2f%/bHѲtPvf}3:yț“e|: /gturDL}''NavG?~6\]\]Zq_mEeɖ! 7TGK| EpmoL[nu}y sj85+?fK̬ws﷉ 3Q㡲MmvYkzDu4zj87xp}ɝ]ݜ6q y^PtKqwN%Y]$'>%z@Sc%k3k#k"S5pPDAYTXQ_?Fn̟܊dî 軏QubGO1_bz!zVl>\|嬾ϲf5 }V=0֣)s5T^N(PZ{IƏI & CYn7BvɏگB0hiFPI:(.jNA,* ]_{Gw0:x3mzyGQW/7;'<P1+dS)>1##+ %)er@1E8ySDcy2{`ւE9˘S(E!ϴD\LR9E<o,%Rc?qy=H(OTz.L|~-qҁJ70(,JHI0 /&MΓי2x,гzz%  g b?$ORA(:X(yRFG&SzHuܟ> ! p||e8zJ9,H4 0hCx'à .)YȤv^;gK|vSт8T2Ψg<7R ~5ٜ%RapgcD<=;֏fiWQEu<129E;f ޚ~UG9/iЋ6FKj]6/3kv\sS8,T.aZAoٟ3u-eT}AqtN?Q7I=ЊLBp6 YoZ 'hM0P^t۸A%hE/'+J uz^ksUxi\%69&J57t3opqeh⺎f|I}s?Q}3aX-8n ~XKW,nPM1rT4֯D˫Q›KB}14t4ç>x߆P'j-'-UOptFߋ~+4ȸ!O,k5llz\=_]t&jcLvKw{*k(\Ko &7yfѸ2Vo*UyeUnfR+2~=jLڤdUSKվ9ڕ[5XTͼӬ}ћh³z7/.o_,Bik7LyTMZ TA2ŎAmX1S8 7r:1(!FM4H^ ovTKrZEB(c$ekFlBX!*f^@aco5/vϵze#cni("_hpׯoN>Z/xvGg̫\Lj0! pp^S.#JArQEHF$QbA1 sCK ppa,a'")9'9̰4GK`@pLo~;GoU}&vG9ƜJ"uqkoF!dޭq{X>haF~&sJgeZ_ ՛ͳC`0LB91soR%H6~MFz#ЪU͐heFyu0,ӰߦU|8=Y;Mƫ;G֝\ꪾGN=>.U`Ø%;8\Tk*FUtgOӿogG=D<98z18?LKV$نQVސKMvMT^4Ul)iՀӮn+ڽ$>Slf{nDa7q pV'ebŽ >'ޛd冂*ABS_EdGVK#7Uvtޒ=]X H %#hz`m@y*M`ƂX\{;j]t8('p>>"/~qCBGkHe >Рt^1E}{șN㉍@Fϧmdga;啰 N+̪s7ry;e~GpBb"7T+VZ tfj#Q%9eݑ0bҚ0l+íT9`b{3X]a^tU< åp%V. )d4&jci Ѿ'U Mm`8͐6IZQaZ&aDx[xxlq(Z kkԺH[|Ԫs⃜+[is~< x4<0smCMY:sBW%75OS͝YsZr]u-3]J[L*w`RMtgI3j:Rjsfȵ1OQM0&[;aG]HY|Z+ng`t=3Һ#KKV^.,*Q̣RO'sK֛cf *mSp@`^ OE',Pt8>ێ`InC)3l#W8ۥ6/4+n# Vi KIΊ59sї8Y:,aA'; ]%U6w@s Rw:WX^8vJaA1ƕ7( JbLvX^KpA;̕"[B_4M߻<𪸇y06/N^ŘJ)m>1#(%+15^ߏtŸ}_⡟a.9WɅo|&6glA {m6ha Z-l+3Jӑ7ND.7RNM"m6WcXmwa;%Dcmjc1Vu 5Ja 01]!7:Wԡ\r kZ xhN7l(&wt]e-#131[I##K, yd*\q0ƆYEEbP1G E$EudXDcZ)"V_"AmHFb4FH 9 KP-R'5(I΋kmnV/f#Pkqrcƹ8CJGC"$e[Ioc%q(ZZ=(̀nhtn7[tMf^VӺBG1 bebn`p6w6ٞMw`=w-]/i ׎#2LJ5UZI&%<0#ۓdN\SPD,eL yTJpB&S#8rT*g8P>Dຳ Y4.e(}ЯǽM9)׎3`"9\Z{-y;ףӮ;5@YtS@#y&hY BC{U"Ikh-`:VRw`dio-6<]a6FɈa TrC)GN3  "bThEHexQ<,U<&0 e'Hu?0#$JUU!*-hy:<0Yn)G_;(XHCᥐ} x" \Q@$~%P/:hmG!:Ikf7ww NvPYB|ko ֫OWm>{~aݹ\Yz8կƿK$r>THw8cYIs>bҩ}~>~xUVݴMb1 WoPڵfʥat }YȟzA#e,u%kVKP!h ^xk+\.LU]֗gmP:O0pgUh.W4;iq1~,~t㘣 f!7/wȣtWŃaQx:8|)dP7E>gg}3ְi?G`(tݨ@Oo W^>m\Ų{x*׶\].7X\ܲG#w*z)pB@W? n _.qg!9yn0z'8? *[3?@=NMz7&Q851F2._|'0k.7[o44lQeMĴZ*_ 7 'N\/RjӍ0{dA_pflg:3<֟T).p}O7 aَPSU =hq=ڱx~GI 0 `=߮ Ka<,p\lեPZwTev[טAü&f^V]nhueWQ9ha%pSܧD5DJ)HtbHǐ<:6`P&IDB3mjSbEV0ws^7J/N9,.~SMݝ͍kn{ӭI2ϧPC\jGo|T=?)IiIiv@(DTU,􇞔 4_XTEt)19f{߳;=GW_wy6 iF# (h "FDF&Jn2"s|r%)0%. ">Mj58p{c8=FL-Mђ(:ug˃}Km7|W0d \Rp*9LrѽX73Mu|Z.pYVhKBKH>DLHŨ':QQ8=M*N LTå_x9nf[db10i-y >e>#CX!hJ5J8P\f BM s*‰{ c* xlaf3Vy3p~$ZD'r*&ιL_>Ҵ # --QJ5QA+ωpD 0!R/H }CpQQx/@E ;`RS1ZFYocRnojLv6ʌ>hvGٺ~ Vz0'o\Ps3G~SS| ږsG%S*1‰8> LPd ew;Sps8˃lQk>"ά=xR ﮠ,/q܆μygUnquǗ@qޗ÷p9R%k5|z-gAif-`sMݷlX3 \'p*S }<._9<.6<VlkYqWγ K>tGzYe:TuR6TpNwUp\ P⇗?o/O2s'o^>o8 9B[}p~{Hzz]45Ms#h.mڵ]>rKdvm)@^_|x>? !{\6{GhZ13zJ}Zʬ9B]ŭ#e7W ,Rv7 K=Ҏ ʍ 1iN{{Nw>!H/)#)Pĥ1X+6h 8)ʕGyY=ܳv BiY79--'񁚔ݲ4x ܣIxx0s2.HVv`\^)cH5*$RpVD|lr*Ђ ">"c/;eldlՎ} `rb ZnZ7{́.:TGWZjףɸ3:0ja!*R MO&l #ìZBdJpIHQ\ry8Da2%D}]+ݧpcQnYfbAu#t w8ni81??_@\hGJ,48DAOR R0`` Am`Lxg|: gf85c2(PR sY_&Ej#{_|}r罅YpYn=|$fXYN.˜EpI:y@ U!.^hAD&;b); )Bڀ26fo $F#I.+`yDň3 )11HDq]2@!YC]#>"#U99cYw/]}ܔX<7NLcNևBlؔQsس}al0VlL^>^bAWxFY!mF͂0At B0b y*T0($2@e{^Y!1+9{˃I'Sā:#mP4,~2G|/L:ocҚb;d=eޞϷ"]$zb$^\e%AV@d Ket+n%=||ȷS)*NZvr~1M_-J,a\hW T!U=SS\x?\ .%hzQ !wXx{_܏5^ŴzHn^ єǭRo q,Vv15s &eJ,L nC.,K>(K"JhDAB #(EBY<;{zKlĸS^YNF->DgrAXb:(`|BtN 3,,b zAyo[zlʇܻwSݤ6a +d8m%P"%! HEq@g88s!Db:Q"E,wVz=SR[*b|>JɯI#?.mzi|Tk_}s3,94hJgs>ZCy_bs%!C.u|v~}w3ҦDesSm%P³k -e.h(h"<N)bD)Eށa;9MXlimpFKG@!A1,)K tTBz-Oԃq@J2Dq ƃ?lG ,o8N΀ddxNU +n B>98JElnxxs132lrhB{넾161aAXBOLˆ]̜'b j]S`BE&#X:.E詁HXN x@y.To$!R *pЈ5HP #b|LAtAՈ;~7P8D+"nLiLZ0. z/M2"OKqhLRecJ}c@AmRNg|`B@O q|('e1s#w#. !:}qQEYqƲ9E)'B:&;iM$! B]Gʭ(x(xXlt싇0 `6Q<QM"*~&DՏrpzG7^5b4 HNf o#γ$߫@~V}rp0>yϬ4SRNR+4(oߥ5z5\ '& GꍦPQq.]A$IJYUI$% 0&$VTg1sƛ(6X{JOiqdNwH}ܐ~wt,1ַI[~n.{癨RRG>,a2V|#)ixN !@dQTxm̾5LeCaCJ`[ڧa9RE^ر)C-GJhi .CtR+e< ,mpNDH<asRytxY+&t 6]%ܼu^!.ƣءhW8ƃslʏ&DPOE?{3ܱ?gVGjC~klӐxcO_g3fxUjH/ECe /rVJɕ~4rSnaÃ$\'L#sE SŻSǻؓŻyQZIa8AidpZnSd xws+Ӻ͞; &β\t_qE]fۓ\zXwnsSǬZv|Bu:)KaTd-c X<( iHI"u={{ s6&gm9E ]~7IJ\CGSr ͕K5M9khڗWr3{,U6cl0WJM+\}p%fL qa2t_ecx%/}wHrG>9{@bT 0/.Z+ =tE+vDV3u' Fa,鰃_ط,)3J*)*)*)*){\\*) JʪJʪJʪJʪJʪ7 0իajaZaja/HfKF/.% M7Ƨ'η;׹힟CJ~B_ ^ fyS\"`?#J0{4DZhu;-}4tCkګ6p̥J t @% 7k'Z&úM |,8v%,8c*qc( Dh,@{Ђ୍c!TG`_ *g/@P:,?N&6.J>D) )2OѲPGǛMF8bj b`HA+zQ"ՁtW<ʲ*UfUutGّŶ2gbZM( Fvx4ɍ\!%1[ء'7f+ɍ_`rrD)01)CXr4Z 50jO[s@] }t]yL˖JsMT^D'4'`BtDD)8R'aUiRDiݬ !IMbܬh׋B&/O>:do8ou]xGs63yIY'c^׻{1 ʡZ=i<7 aH;'G &;8Tb1rd4z'Er'׎#3LJ5ɩL2Jx`F('x5:幦8Y@ yTJpB&S#8傩DU p&"'9<亘9u7fޅf:|c4sE2g7xJ<Y_#~gcwzqtSkʹ:kA1|CPIὌ*$\(r?+0T`x`0JB(:DpɁ֊)\iZ%0&#`$XXJj*|T"YOGsCL>>UGݚb\;n5ѦI?ZAg0Cy9sTrC)Ǚ%ftRl ADRg"xdv.VY =y^xB^#(D3<GWu4d"QiA+AL]qzCZ!pyf8Y8zcFbI`T%K!!@5D*, I@fu`N5uZ!mF݋~0_{Y/%!R*LkH =0Ő|yB <sH=3*[c2DD D9M6%H F\:'+<C:mZ8>ֺ u+ns{9V~iWv> ƒǪCΏfo8J9Uvъmdjqwn)n~y2pz$W@Ox3V%z߼V_]\߽p֗Gq2ܢGّ{| ։3Z>prz s e? =ϼ`VDwWg o8_ sG|l֯`4t}&xCCoٱ8z[ג^ߒҫ[X׌Xߌk;Pcq[4y6U|<:Ytjy!׵YiN4ie<#=΃2gqlИYe /h8ToC wޜ8Xҷar/~GvMR۷ӧ%ø\n $O׿~7Jx,rb#U( # q^ţ#TF[/RBP6 K=/@xjMoGO\ϑaj7Α7F Gse,}F4N$ hP6&LN=N9U;9ߟhw>;;wYn(uymgv^m;bՇq۹5,`^ɑ9)<&xRS !B<mJVϜuoj)h} "W=vm:nTGGlfn6Pp-L"lpPLMP4oTIP8@UJAD8qTuqS eە L`GRQ^g>q'xAܡ7 |H N!2W)KD#seXMmzKpGdU;T2кP f>7;˟į'_sFfa^U³[:)Z:tj" $7m&}j~5+E^zGWuҥbK5jM玏ÓmHLjf`*@S0V;( 3~s#?EwJnoqwQ4 5x{j'*s[k<ˡxEm,lG!-ow2 7Jf!(# P;FrJhoG8 褌 "ڞsuNPv clF0c S< 1Jm x "*IDT)%&#|9_Q}|2Bnx+8Q:e%21 MO4m ^0oHGr< 4IZ: :k鉊'mR8yhwBjdğQ?p6n6g6ȯCV~!4 i3(3J+;ɠ<$aLTՌY"D9F<T(ѦHb2H\+Ȥjkjl V ]u e  \]d&4=Ww@/3O&d( ZdhrRXR4r8J`*ı䒣Q]UcK'n(d> (jS"RIn3q;|ʣ&ǒmպ8Ok0;+ZUk^k9$ gF#XI`9XrBYbjn!A 6Xi!S\-隄,ECt]21H-׮&%?E1vE#V]ш׈Fܘ#DO4e:UBr4Ѵd6(|J庹#\\LH!: ]Ĥ樊"FrsKjdWh*E^/nLH sVx) <%М;2'A{-Kc¬V;\zŃŮjcW}h+C>< Ul79ޏ/(л>lg؇va;}>lgهv~F:it'n4 r3n kԩQP4+umy8O;(W fjv<3t1Bš!Z, ˸Y1c\۲m/<I`bPHY~{䊙vB[KRY8%QpMB.9yj𫉳%{XjZꕝĝlE!Qq'q")nbCԂrkYV+hr hjXT ɢavNOLtdzO8U9q\p&"u0fLp,5\Ѧ]3}fҥ@+Y hp" )>']*\GiA Ef,,C#"g\V4>-B*h"glb6)gV'JKzW+XCi*]}N^y^w讻,Z&` s/dSZ4ם" A܇_C!אkH5 鿆Cf"L;&YueB1zenoT &HP5J!\* JL!3Ȅ#HPZC"`J!ŤXDPd"%#S)c]7pX'v.r|$kt.$R:j$b=8-&.Za_rxpՂӛc*騦D;} ;4=Ϸo@ckXƮU3'⫆kJgkzoVs5$[rl V y+3,%kBhr.iH}mpDV=l8 " B8-Ydt*k-y dk@E0D崍De|X B-E'>`wVpqn9D4 ͂ad`c%ER (J&q @tUNbJ`f'"NY8#&C"p^(E(*ql7%J1Þ "d+2_% X/ENn{NnEٳ6Z-u]]w-v}"JCʊsmVPdUbҍB8o"ye.aX%-K!&k0>"LThk@đ0.ɬEkƩ֜x؊1Ю?Հ"nL ё:MzCR(!b\dZЉxR@!d)YnH#2.c.8RZD#)i$UQX;}Us:}q*pqōAli$.Ҟ5և`^; +fŸ4ڒt}TC>Ut~&'tW~||hs;B'H4 e`^6A g'tzط>{y3d jVhUP$'#Rcv$or:#sl0*۱OvItwn#qsX-r056y~~;'7ٕyǟx6Ks{.Kk-?tVs3Lhem:f vƏ qڨDWIFwalg >e?@dAac&aԡ&aԡ'a&axT&zK| %ɺJY٬DpJ"RH}cbYkRƹ8dA)$˰6fa0kjٮ'6x3yCh Ifϟ41{ͶApPjO 8xԇkPݮr[lJ娴kk8tFUv|5Unh,qM5Y A;aC([}?UTuX>LA_z;֫~gE j=-z{5GR_YR LJyQTB霹VY&\#]IcS/#E}Ek?\|Nןqti0ƗeH78]S2P1!at2r}'}~n9y=|33*mPZ5zݝxUrw[҉EɽbnpáLuLZ?)3"=,盵unL.4*[x]VgwDTρ:K<,R3:㸠qAF8ȅyXYcɠ. !чG YSXq˹E`%) fRf9G/&ǽ V ~U&5*pϚt詵7ԭĮb> t*[lv&x#NF j"J97I d3 +l`lǝY|,A}r%1̜Ah*@Em(94Ap#n;(}za㋰x(yN;37W'!r$Sf4$]$F!:1&(B 2˪r8Pr׍` RqU٣1 G` <[eADG$i/r8ljRaOU^팘|K?6@{[3EcQpE>*tJcBgˈLȨcV8X}=XXשE?jB5HQmew7p{FVK5@pW*DMS'o5Wem h` lBrF AW=`|ƋC*ֿslܫ{ฑK.->^S. Jy?2?:\~.FrO~~B7C7 >^$@iۓt9VO\!0?]q6gi16+_Ӈ NKoލ iX\sK[U" (@6b 69S+0n} t+]qWPFw6a_6̓詹evO.ZIѕkbٖ:<ޝ.:CvWԥ7YDy@]{KeeT&f]4i=o7\h! U|pߩ;Ͳ"K#jl 37ܬK{y :,mNmLɣ-R"bHKEh(ydșL+!0Se"}vdClVa3XtsG2%4WҞ+_iElhHi?L#.NoOO"+z/v|]f {^ztU2a_":NAZ19Mfe1;dLČ <3jPL_tKej,!Ea1ZHFEʫlYù!@O u`LQ4CPqpsw.dꩍu/I3:L*a#RvYgFM4O H\JVg)% Imk MR)01=MIr A =0r")gj{ȑ_i6-v{ L&&.$+vKmٖԎ`[b7bf>{gbbeE@ )%f @F,Q*w+̏vt=//aw@0Ь@0P& tH.JUp5н/6W'- " SqW\,} DR( &:??[\N?1TSQ[^0)&Uf+'.`7fTja]v=YY</7?_Z\,?xn"si q4pQ+78'㟯/A m551mՐhuEyҙIy#z:x{zU#պ0ћZiw0GdҫӑIe&?T^*@'ޔ/˯37^:~Tz~~/^~zO/߿8A,Kl~9(7p?~Մ6U5Է*VjJlԫlz S8CWM]՗cpR *dؠM73&xdp%̯Y*C-_q[JPuC`Ȼ1xdmPD0}έn[;HiH %#0=`=X[YF <&0bla}cA, .Gڋ +ˋ6.<62'hghHr\E<5Rù4(WLQ4ӪtbӇDwNˣ Qw 7U}q PJ?M2Ku^| (E^)3XnlpEmH<vН)N/*n'a18}+L`0S8.D! o,,CRXa禽7#dV[G1() HiHa#,!F:p֜qK R@)GXI ,`:VTIFh#>6 ܏Y(#kZܼ/i9PSxˁ+9W."6HၱkT ZR \ޮi5I/ C%&,4PAG)5NR-c]֊FWNJa^iGj?6*`!6"MO$QL9YR|Izlftz> ||»wCq RaԮ|t|´'H1+4 ]\TW*-*J{:A"qI:DWXPJpyg*Ut(%꭫S+*RbU!gBW m*Ը+&D]XUHW Ъֆ*$+ADNJ//'"$נe/~%d6x3.vaZ)ԵtJ{!\.Dy q Yigތ_g^4͓A E^EJf[C* Bis45AR鄡k>;d$ J XmϏ c?E d[+=D5ЛoQS;7xo(]RƁնK([ldH[1gROٻr6A.VEWRhUqr YoM 獏w@zT(>*@(ݡNzYZF,XMeyECdDJɡQBa!j84kuXne66aǥ.]+5Oj/:˷j |[*{MVŷ'Xts RM , >gL7G7|ddλ 0.@oP>1=e8l!5}5ߦ0W@bx ]1X˿߯un 0l'6@9N.έ03bBQo8ba5lN(aC|L5?]/|(_j5, Hߖ d>|YeFhCH(fAE+7j)RQ)\İZqAĵW[5a(J ]`n#2z<%BAY dնƜPh$hsgXH?⥤΃b΂wfàCVL+M!@CB*C %jahp& e4zl5ͭhk՚]M./`Rmӥ9w*d`}#<<d̩K4P$u7>Х xa0cZ[m3r'4 -Xt 3 JGgABz)g@xD1(3ӌ륷&c/A!l=v2m m"CD `i#PʺQũ1*Ұc  bRRtRyGzp4ۓ/cXLGƅMFJ*0qBKXYk Zn%5\(kv"PW*|"c!t} QDF&7΄(9s(T mV( $5 w5qHtS4>_pavat٘ |0M*Dj:ߗM굷%XV)D %WÚѻ9Fd{r5nj9o3|1 sUڄҙܚ`/DmT/ *Z}ƥ/-@jkT 9>$f 9lCKU+ܜ-rM rSNcfR|WZ/|#:CIٰjD(QTzsPyUoܕe$Xty2BskV6ZDKUـ0#1-R#u˶ E@P]` HJƬG6lm.E>!P6Pc.ٻޠ(*MsA)5'FWg׬jX;~K22)LHp(xNiM +QY϶@i|U TW7GXqvL6B@F=5(!Ȯd܁E7BnTzC{- ȸCLAA؎[A( `-ePBdBE@i&g=FU]c)[K1 z,,xį:M;*q b LN QCFu(nl~ :jojLEw%RI9(XT5@,NRl3:_BRAAuT Bt=*%5k͐Ȳ(n i${jE}{ &23.}! ;^ܣ{DԋqJb6SBr|\1fPTԃ,0F!NHe9;ԙxa5ZIַY!jh9&f3x:pqP\J_7#JhIWosVW 2r i(yG 8X u@ą򠷒!HDNkk*fʇ`)C#`8ӥ`)`^(^b u6B[!qgdm:^XTuaU% 9աk4=!V1dxaQMZ uY QH?$//O7wq7."O֮ T\yS0x6C@FD4=R\;mŪuԆHT*Qw}T 0sQ@R@"pc2πExW@rFEK֬kdž@ 2ߪn$l& -/h5g jxcȵ6vٗ`z'-@׻~?q$;>lKM 5BgekOak0߅PYf1h\ 昴kFq5R֜4zefcڭ:KoHs6k80)QC^"gHZN!F .`Wr5wn+2TJ`]PLMYځ DGCPʳPPvƾ:B_?+AZ96kB $\G 9)E*aX0jH1\U -Nu#<&;A lU?<66TƀumlbEǕ@:Xf=ԵjsQ{&=Lb2Վ *WZ@׶䠏y~58dx5ƛk΁ >7 ŢW>ڡU!:Zc_)a2نf@3 =pe6/čtXh*AنFp*qK>RZG7$Эxx@A4T:6_5fs˥ZpUi\CC,Z*cQ4k&!T&T$bqBeN V3f !KkBE9:WwW,"$R3 R DŽǣ^|Yr{{p,]T8냟oujcSЦ  ٿWO>ؓp )nk}rxHk>^~iФ&"fj`@?'x  eh??XﵺO;wWã_J/nжG؇~gBm0G gz8ld:\}I^& 0 }eyd&yOq@N H $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@:_'/U鑜@I8p ]+82qǤ8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@b#90| qAB;FqH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 tN XoGr%LY?(0Н@'Ow%V:C'h$/N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8~sOXh}0߽z}GMp]{owwכ@Qюd\bi&G1.1Z[7.1J/ƥ0.jy7Λ  \G+i7q,*H aal@mR,*YK Matt9BYR^A52NCWP<(jtЕڡm+oPឺ\iub[+ ]ڪN+uZBWQ:#tutea\QWڭ:K"d@tN0tpM7[+Fp ]]O8q֤uJJBWHWTfwO޾wv8^HDPZo^_߿J 2ͦjS ɮ'Cٽhu02k W(_1>uMDU0|z|(2_1!Oé2BI6es}~p G=,Ҭ&km('?@UC^K8~:Y.\j} 2J@}ԁQ;2--pnL.-b'ÜcneJBWgHW!dGgfCW 7 s,& u4Αb2@twGq p1r|CK:CBa+CW ץQͯ]z]g}@{\8Sep/CNt2[;tUڡ7JG+NCW jڨNW2 ]#]YEv)te CWkȎBWT^rk7]}"I]1+zbz2[{7+6H<~KcݤͤN3K.uMp:0M;MM3\3LhU%Ih ikX8kv 7Q h޼d6]!]`ơ+{ubQ*6p2 CW0ўƃe(IΑ!K#+&5]1\3u?(UzЧ eBKGEn\bZV2ictUڡV 3F+FK+FUtڇ;N1pOLehOE~dJ:CJrہ֮8kWn#( lʩ SH'_j_8qD Ӯ]-DKYj_mNOW^'+0tpw-m# XJ~Xi EMm`e+vE{ϐ,Ǣ^-2.73g9̰5tJtJ(5`"Jj ]\JX[*tPκztET&5tRJh9k:]%Bvt IDZ\"BW m*]=G˵տI cj>bj.@LE6!S"No MSkr ZN %h9Ҵ60Ǭ5tJhD4䬣gHWR6`E[CWǺJhI*gHWJDW(#y" hMr-+Ӟt".JpUk1ZOW %C]}7tzGJkq򽒇S{'e' i+|]Ꭾzn@Jpl ]%t(qӜ_`%ZCWD T[*tP򎮞#]Q_.|`Hk!(k ]Zr؃P2;zt^ТpcrX j>5׍;JG@=];f0匴Pi,GW&FYٍrkޤtEqDhSDZb'Nh9oP6(~cmհI C†P`a*wnmz 5&О-\ w4|hZbUIUBuJbЁΪ|t0fǺJp9j ]ZI񖮃b|"g0M;=@WcFn]`z=Оa(%!R(";PKu[*=PκztE)M LS0R4ʦ=IQZe]`MYk*eS]Rꎮ!]qZe9X뙱JrЀZl.),܈T ',6wĕK3wgKAar5#"!W,C20\9EE/?R~50q>,TUowӷ_.'/kyhƣ߃*RyWb; jC JsljC>x鵪$_=>ͮW>|uzOe2muWlM5 ^2uh#LH$a#:$l#m F+ ৕߮mJٿ&׃̮YeŸ3CLG eZ30{-#chnDIzƥd81kE5[p ZOZu*,z5%swa-:J R./uHޘJ|v{9 ̎&fv lϋx/GmZ7oyX?!xX=2*tqoo[Sw ]vifjI~\ɶY|A JOvН|UjQf6,B Jf m_} n gek%l:"oyZr2KޭB,Jk}2~jïƎ[ReE s<622۵'M<8pMuf:sEbu2 Jgϓz~Iͪ ֤c"(K>-YȾ<[ =ww~wEC-Vؽl/_:B qz'sF3.Aς3q ]{6FS \mUaV"%LR"7 l>xDZ2:LjFy4Үlo׷|{9K=r;'| Z8t6{@ N"\)c"OwX 22Ƙn]=DPDjp[Wh7+'&a|*q01m]{l#'*R,C^zYM#AB 75|Qzc!Sr= <* q ( $rroD'ea pvÚUR>N\[..^~}I6)…^1pVq cbf0G2dX #2IXAZO8D-iEZQ( QR>RLzSNq飶ěU`̙@Ru3cmpv3c}Jm\Xf˅f.>*{5J`3极6wq^ěӛ(猍Qq>AdRGsFN]e "J VƦFݐ=Ph ^VCJGt`ZD"rRصv:k.fSX68Yڝ|>X# Fx#N`%%xjy0X ` \FK5>}ȅ4*xP!f`A9$p r G(`A:p4f> n>,uF>$bl #֦MaD1bLj4;QgN2Brim-7rv !X(yVF #$$8݁uӔXoA`Ir5<jLkoc*s[\yO-@@j֖ 䀖ƟwPg)7!WrC8`)$TƂd#"ᷠy|S1G%_?j|onY\[1^ \Ba{;#xuL8kCZ=],Yydc^X{)r8%b"XZYp#5{'#&cd&Ϟד [W;$xj1'' Sa+OStsC)gE ^S' i]4bi MZ&.}}+֋ܑAM8F+lop̉1MͿ+Mgrl19Lo&yF am0 ؆th8jK,yԙh}@]~UL{IrZ1Tz-iTvݢZ{w"SOKj7ECZgV|S{ \dɐ1l8a[?9d4&Cv$G=Cvj㻮1;l [XJ{ŕq02Ւ DB7)<<A(ԃ$F:[k "ěՐLB1jve@0Kםk_=F[ic"u+^9YZ [B2r,)j'^Ul9U9/{}<4#WD8r4rU88)ӑ5QEHF$LNĂt>lǜ*_uUp_-ˈj{I;"EH`IE$s2; J3XŒ&B -oCcP$Ϧq&+;dG ^2\֑Gm%o9ͷ4Ne#jj+,q>@wӧpfGnwm$W?]y},k?xhU"iR۠ݑ"E]HQdQ,J5@dTUF9qv口,Gh1 *zOWlr0MhViioXu}ـ-jlPodej wZ~tI<#Ex)rmMP^$sH{]K,6Jk͵,=Gzņyȫj嵎hdL EςPPXΚjY39b тieOg.&]ژتjݫ&mNNj-6WܹvQL'mfj:|zK=;3ɼQ8LjEt)olbE=hϣ P@sj ԻKUV1sX%ρVRtf| [gE_GEnG:sZ_i<1Q[5 yr,&j'Pq`@QD潏uǦz O=Y@[U-Jŵ*e}͇R2(S W謭Dw5LT( 5x[WTrk˂렔LgDj9Q&A|Hveg/rhab,}" H  1e 8ַ*К9#{ń:TJT<̅s{tfU#5J"<գc@)G$BkFX+k꺰J^X hŹ2{#sqF4# s9R4.aQ|EH+4 `ӽ{wۏ^ 7r FB{{@ȱk[!~v4zJ{x 5q}|5"*܁kE@"tu+Bw)cj& cVNsEg2v Ii=< 4\TCQ;W%7kgΈR(E޳K U`ER{`{g|s9(ߥ}czk' fJzõJ( ‚N@,[F+^d "i* Z!eɭ0(Aj@$ge6g9uklxl]&?07䝷[.lqP* g!S.3~=?Xd{gfyx6'CdXtI :,Oޫ3BXnqp`5cx \v-.IysgP9O dC`Rfǒ!P FcPRhn;ύ R Rd܅Z3gQ>><w=[9KU^!puBj-O ӊ}Ji BWv-6b$'6<5ӐN>{ӹgo7o-Wr~߉&*b2,7?뢓 ?~FuR7=Nu77xs:<)f4u/d覲_̯$ڀ}YׂR'\gEa~9ML] n{5*ѠN*+҉4n5ח~MI9]:b<.nE8F_j:w*O鱎OěZ\}9dXMٙƐ]I̿T(•:Ykv9Lް@o=29R~fNNݝv5ΙLQ٬;-o{hs g:~X& ?q_Od9whi0SjM-\+MIMٞm*لz.讱B{f#}n;tM Ayi^z3j7sަf. khN/xiwe#XWn]i|$0nsݨ>{wwEfE>+Dւ(TI򠜖>gnRl͇)+0,:1^^PKf-lmlA1ۀ63ig `HsCV—FR9N+#0䳪Sۭtov]w{-Ǫ.~D(Q*G''tRZ Q 5wV}В:qYe &+a9cRW];Q ;kU[mT,%SS4 ^pewQ` ޳A&'ƈ[cRG^""]w4EI`i88 MXV<uZ#RVDs6 1^,0JKB{9l6ZT0g[vyU,v4c y Aև"+S0G)'pҔIsP'Nϼ)I,p^zEKB B)>yMcYZ7M:vHs_+TDzz2S˔ )oݗ)}53܁`H GHY=EbƮ+R+QP(@p9=&aj(xoy4~aYDcE|W`5TȍGCt5X_i V//:QsdL}b^䆅'i祖,eG ^QK;k-R͖zBrWMMi!{_h9gRijBH6ʊ0M :3D)'Rz:!q$8Pgf6*Zy܃Y/e^лhXS+akxEv0o?Cnk*3UB&HJh `(ByK=|qku^3ؤ-&&3C$ɤ} A  e$%'ћt fav#6XE(\ 55*:حZ)QZhklR2][&\^Lp~n[x=q %`!熱6`,̍x@xՂ\sMnV5:-0^&Y }jztV3KYT V\`1q.iJ=@Vtnde1@%Yޣ YOKl tJ6 ep y{!Bm4ۓQ+'Om(➡PFEXYM 4k)} 6$ ym_PרaYo!zJ꺯 zspW˘Ր>W[Lڥ]T2!*DLՖ*\*Sx/4Ꞙ-ĨQUlîtrd?(|uE218G#A'SHX fڞ(8s`d1ű$k;~nX)s$* (\2 W\%\kz*Vń+᫗o"WO2r8pzB6\=Z` WORn+p%pZ !(\M"sc WZ ]WJp Õvד #BWgXjk)\k>\p% Q"Q W\%%\I#]WJգ(+2b.M*jpUЇYs~u9.YOfn"E%2$F-=lڎG~>ҽyIxv97iת%G\R93nI,)}Ppqhn\A֌AcAQvMS_S۹1B&>NxP>4Zk݂~׋6o7z+2\(GV??no6C+F Aq(LuWF !ƃw?rv-fiAY҂=A9rivZ/p0+˼ZJPltu>t%bԒ3 `]|ׇЕtE]!]1(q1t%py1t%hOsP)vUաJZN~aCOTνG(x:{aqh(WEWt]Ѡ=ѕj1t%pY/6(}t%(8lʢ%;CW ~] ZO%AWgHWjAt.G] \BWA:CrC +bJŬ] ڨNtut9pr5U1 ՓnXG `bhZưZkiZPX;GZEZQbJbT|tVj9k-0/G]nTf)t%h_bC]%]Enry`c.RJо8<.1~=q+eWWG}i:7/CWǡ 'f>rGЕtܡYV +WW}{S+Aܠ3+c88 `~[ЕJP? ա>}`rˡ+y)t%h9:];5䠫BWd)* n9Š Y銬; >tnZծû#$k>'ׂUHmcػH>A`| p(^|O:j8c8򬥧  3T-]fJ ^Ez֤v`U\҂f^KYZK 縴mfIbw\t%pbJ2:] HΐaAtQЕ5'hm(䧋Ah}@QY_ˎ>=t @o[)iU҈ ZR[46]67omXCW~>ͧO7OdR̼;? o_~']LjךUPU w/3ebf5x3]̔ g_gwh!G^C=._C$׾\Իtֵ}|}Sޯ?+ly_&E-%^fM{59]M>GV妳x='N' ww_?m!,G߯޶^!mmOhv}y]OR']q)N',i"]]dp=MN47jA u*֪Trqƅs5Z-zPէϿilE#V_煆ԭ/Rh]Usu! X8U!kG9:LZA%9цRkPÊ3Z(\ #j QcS)]!?mctl w>\CRMRΕKͬU]dlr-PtMSjJ1'Zj{B,=zɌa ]}cs1 EG 8SR>oHDk3E2JM5 mF)hmlcʒ&S$s*0JP4 I{mg͋ŖbT uAQ]BƲ=uj,Zad9Eʧ: VAfYt7H!Q!S({ ٥fݑ婄Q) _f&Hh# L9'X EA)1ؽؠt)CsXRM+vFZa0`(QJ[uPyU糯ҕ0ɠ-]. kBhcnue/y ,&VеYX@Wg05˶q`T" ȎITYlXkY`̡ m "pajGU((JU&ԓ)e , RO-`eWVنp5TM`Ԝ&ED!}AА$x8t, |ĬQAQTߔN iSP^q3j,'8IH&`eH*zbiu%fH57]J@ e :oQ9Z Ȅv9Hc';uOQEgyMFݚWBNŜY' sGql jJPI!01UD5 T r ֑lC@/J 7U.(Sѝ)E4GRFh֞%@Q 2#}Sm ()eX|F*A$u9kh 1ꓸnzFyqJ>5F_ʔ|AI8(l,@3D"A! h VNJ;IBu&#aPner3RUeŬSDr|R1&PT؂*"0F!N%C񈝧w&^Xwveuz)Zp `cg{]`XCt`-$ >:%AuPiP\J7#J&r+ɂJ2Ba1%{$;f:#.') >@E&rZ%d^S0P>K LRPp/:YPH#H܎m CKEUfUS YߨiyT#ۆjBZ!BTD4X!ݏO7~7f1di*h&c+tm`4=Rnb:En|JPKB1g]G]I (ʠv0FϚ%\-1 -I)юa<ͲՒ% C$K>O W @u ,|˴w7mϹ^*İX- BtqtsYhli1 l\ F Z= ΝTiVnMȵQ^󤑲z hPAi7Lo=̈=X5j7LJȀ-a]M1WdsC<܈ܡvI,WRsjP댂0dJg'h(!=`yNVv XW TN1֮ ur3pOr)E^Q^1 b0Sr) jr#:JXᦳ^:H9-й ᧀ*FeS*ajl3j hN\36xk fnV4 C5kփ*M3|am\3iA'24T VP-zޣ~]ۜYo5kPk7,&s ;[*C*mP zxq VQV.m5EO(u!~)AH'k|Tg= zVQ!,eJ ]\1d@b~pQi8 68fgTS.\;mK9 DL02\ Y 5)w(8I4M](IWKl$ƮwM$WW,"Ug}`AApK6B|ًon6tdxV5:&#t:Ƙvrw?x:ZrlMuPlHֳ~_`F?]$nꦔ M%]]Ɏo]^}zsj?9rXgzټxZJ៾ͯ, o>lI;9tetxONŋ×[7жہ|jtu6nzYm0~m 'uF׆Å\렗^qI`pE:O't;'p 'p 'p 'p 'p 'p 'p 'p 'p 'p 'p 'p 'p 'p 'p 'p 'p 'p 'p 'p 'p 'p 'pQ %9;*bD%%PR%%PR%%PR%%PR%%PR%%PR%%PR%%PR%%PR%%PR%%PR%%PR%%PR%%PR%%PR%%PR%%PR%%PR%%PR%%PR%%PR%%PR%%PR^وI %G (p:F%%>*[I @I @I @I @I @I @I @I @I @I @I @I @I @I @I @I @I @I @I @I @I @I @I @_JhD|6^7YCMux}h/ހrd1 &&&xKhKV I¥}.]jz ~ѕE(B&%Uv%KtЕaӫ>1NW[=?IWۡUt@v(]-J%mSI46<BBWuB'CbK#+lY<xWVlyD|Dt-j6v~0(Y #] `+I"+li4tp%aЕc̺NWDWHWnnr)յINއUShyo]$uJ|A,hJj"BYe+]zY`JV,+2jJz6X3K>| +.^b]K :Mޏ/)_Iv8-f'?O&gݿjO|RU#hRf Z%jp}"ӘoǛ(.oL[UhF99,I^r#rGixt KjoYGRPnVR\ɥzmb`ےEsTAVStlrp _g '{l:7z#h;4'&# { O^}rȦ06jhm秋 &y{!W4]p1 ]!\ ]!Z4G*b,! h-w(E.G2h#+lU4tp %et(y #]YM-ɻB]!\ ]!ZՌꫡ+}æ}{ 0}n=m;][[ЕNtuۦZ"+l.g}]CD=+f*"BFGCWתX RyQDW{HW0aLDt%<C]:]!JHW+cg l -<]!ʮ &TpTٳY1CmgEu .ƾ0jY&pEdl>t5wܻ|s!x%lTmaϮKHdU-WZ5Zbԕe^rϭ"wưBv1^ڳ塌xobukU6Y m6SFo2xVC8mENHͩK.Z]WcGܑ`z.~RߞI}j0@Zaz:#GZgd5b 'ljvV&xٷ~I(w>٣~VG g]Zbg-{Zӌ>2͢ZFFE3 pcWELv}^Q Up^UV6"B* kYK~;]!J]!]i! "Z@D)S>ҕbƣc At(E}+k1yWXR ]!\`ъ)+sæ7}Vw`sW[ۡ¼ʮ te]ݶ*dDt9S6Bt:'\+]`C+D,thE Q4It?t%8G`;& µ&_|;]!Jf]!]I̫qAud`(xy>vxW^aA>{vhkNry=˧erN-(m6 =]!\cb+ !t()KttDDWt pC۷ZDS>ҕ\K]!F hMu}++j?1w? IY1YpOx\^۩\|GB틟=\-]ϚM6 * ^ F||a U-aӎziUV%AdN]@_u욍 %oodw -FuC$7z8\aOyw]:l)9(|@Auϧa|Ԃ< Zh%|d:yq(Aek3 FG@řukNP q 1,7]7k:]pb*٣_GGGnr+߃b_G0 U-)i^i[z}&MJkmb:)Z}}[ݺhk B/?^Ma}{΋%66k!b43FLQ]]i/ ڌΦ^:eni ؗuBZn:7z#MᠣQY.J7JI46xeP S50i5e[E0>04 l~jk\l"_ o0X^5\Y~7.>;Y\XdY^~2Z_0ﶁ0d]sH.O |8[BMn~8muk[Oz5Y.X,ࣳN2O9QCy˥HampUy*NtEYJS) `J|2o:\h1W|_mŕwLr?J* Qp,5Fl#B!(.hg{`'mkí'9j/#hԭ*ކ*XU.qV0WdSFS餠✫db7>MiIG* HYReWsUI ] NJH`BT#mG}0= 5 L)H-cNHPDQ-8dfe5]/گFj@fX?\^{#zY£AcgG͟|~6Ҍ&Lf@ks 8Sr W?N|2,|GD}F+ y3]ެI_Cޅ쬨~nǫߖL4OڗrJ%ǝQYnbQWZfv.4 |>xF5&Mv<˧}1_)_;?7,' m*CLR9-> hWn7ښƓ/ $C0uŎ 亜9 Nˆ]MZlچ:IOPXG竊m(O*u^WVMӰa2-Y&Uq@4R։jD&ḹ 7@E |r~:~1e?=/7PпB~_w=n J?r0[e͍A֜e~|]|tMYد?uMÓ: \>? ]2FCt>W6tӵ-QpZ_rԇyzߠ^5zul2^2=ߺh㣋Vc$HPtH*, 饥% o [u+$`'V׽.N-\pyjYOSA|8?ﯳΊioC0UdJL GV CyL f:oI}vbk;%[ntp0%ycqINQA4'3ACĖ!KlYe\@c8s!ȋ*s嘨l8#:úCUwyne/y "ƁBT UƗ\JypZ RF;u/%hY-*ږRaC00޵67rc׿`'Ul&vSS.<.$P$MRҌ]sdSė(pHInDs.. ؝P$ 2ZkvЫ _N4LoDZ"!DD[cy嵷sޛSD o}`9cҜ\cΔә)P‰gX+1ڨqk$ࠨWid DbRPH'񁚔2oVaF'e]j,G{^a<9ǐF)jU7H <7\9aNhSY˱s[`m/mͽA;H@ ݁KiL 4泡r xvHuTBYy0J L&5'vpmhb߹[lr/z/OǟAc1rl-sQ̙Q$5 cO ޫ^a %u4q-?b"AdTSy {5g;zhN>3j(HuzO#qx;?c{KFl&(>&խk1p}Zy/ Bhp Kx\u*!£G.㢭c#}A.(;#Dehg]:ZDb:᱙9wM^fA1MEMf ,Ϸx_~S-E_QW_ͷ͟`! .Xt|p sw:4Y4vrm ^oSp_۾*z{}#Pws*\0?⍦^AheAҘŻ^15P ڂ%8q)m!ZH ÿlAAQGHO\XUZբ(kkA~F/ t-.-:98Ҋ}DVM^>ѿ^N @*R +RV5t2( pl"VrJ2(@ߞ$֚)5ItR0|\_0xTa]ŻOyW3\M;䀩k n F2k@sHM3A))TgfM3DžI4݋· ;_#.e"$w[]AρAEuHuO= '\?AmKԁC?K-WǛK*t:|]FA>ҙ7EGf =˱st[Nd0fq^ =ꮼ8<_ẘ-O9#w6rmε~7G>~ۛ_ zHb[GJ G5OIlqVcY^?8x?'Es93Zk7/n_'3>y{6]OSY1Cc~\u/sm j&*dm}Z 7_UtTܮt|^.BYoC&l֕LfmǍc G:.*]_4r㫿S:!bY{Kf#8ә\p&T7ܦ:f0MQ -Qj9ڱX2{f Gۙ5XͷgܤVogY{*2,w3hOUdjs_6|QCg_XWSͲ@s#Jx6Z;Y5+' ϴP}#ww",XD$\)FY䠅iNq)C 9Sđ>>!" m[c2DD D9M6%?H NWx.+c :)bcc/N~^ m)s"(Imb#%v@\aTdz㻣u7 ; ՊTX kie@*(㏤O\ډbN'^myP%Y&$3*u.2 {lTv5=ִa=%IpEAap80)fO$ ,ad "`s Ay!4@,7!7҄(X~1 @D:HlȈMH F""٭Z+I!Oh쒃˧9ɛ\`v~alB%]\N9ǣЬ9% z?WZFͤNן /G.¯q"'>!!tj.Aj}z=]jZ9ֵD~0Y|,Clm15#BP"OA>_vC?"Ηʹ|_Wn/)hu Nu3N獚ftVԍYwd4}sysJj印^_Cf0Yo&K_;[Lg_ND/Aw`:viiqcEl%.ߡ-*SZbkқoxgnvza.ҭ"iSܺj2ڞ=[a3_HSMB1H.]MKTMemo>=ڷnG5.ԼPrxȧݮVs'ṃ}-Խ~tK3o} %zCbl)k)w)糧m÷8\z\#y˹^@ {7)$4<AW! [ 2B%P³['FO(9$"G)#W(;p1,y'"T% ...5&N_Ndo޵q$OwLn].n7._6gˤBJrW=|I&d΃5=]_U];C2i!)ŒI6ҁ'3h=d/-t46W׍'4 :[Yy-GTC)lyZQb\&ޘh)P.<9V݆f_<ȭCcSsFtgRp& b ]f>ro$,Dd2(0LTEF #;䂠cHaRvA$欼\+ j#c5qFz\^Z3c_,PXxT,\͹+xCI\?}=p}Q9p " A(+eXjXY_"*dc!dc"2+b!EM &"!YM&ێYr䈙Ec+#v5qF0%3 j͎}Q[3C%e<3Ctd"K U"M"jj,kY1XY\ aMBA8F>Ld$]P8OakS 0v͏ Gĭm :NS^ǐT4JX%8&3-D<))YnK6Fe0\\p&-ID#YBI:vWjU;?&GKW0_t$Q&fxKzJLd[#aug-XgݿȾ7³ofAABK$8 [Hc4tvjs 3 /y"[T-6̓fZ3ȏ-ο`GSkn<1m8|_ԋ/O4+鄏?u|;8ͳz3.h:r}H SJq8.ECF^!6l/yt$Qgz)ZR$!MMٌp"G/,Y_i'9D2&L,+,=W9?FS;U@W!3t]Rhc:3i dd2X&4t^yFFϳL{Δ_Lt9lL+>H\;Z,{uXĺ{ Ս/|oՅY+BXQe p{:W[gW xԴuOV?TPO[LI9Z%BGZ*˳og+qCR'74^y8?*ϲ{[n"}@ԀKњ HNFr$U*:RgHz孢35m<=M[jhC*)sUۍZR| o;Y&]ܖ3i5 u=ԗd/ H#,4ѰL,([5X5깬Vz6Ge1< &jV8+UdgEHo&43ov 䢥SGV!ԇNY~hXQi\wpQ 錪hZ⚠k%\m{6to3'>=4*~x:t - =2YR LJyQTB霹VY&\)l*F/oizDŽ46OӒpMZSף KBք4-ѵ7u7paaú}>q_FDvOݬ\sbCߧ"6Y7+d,Sz)`:Uua[vkTe%O)ӣldzq-?L"] )53}+HZeTjx;Xu̦r:Fo/s '$0A[W7L?`_WI26)f(%֚ ]oy%( %e{[yg$0l%}< Oy8ΰ,R!eA5\KS>7.=+F> rkZ y\ 0_1y (Ms)8i < A2!kl&-"3x0?F.հcrߌgB|ztnv׺Zw{9/hR̗>'-(gDyBuh|,.ZѡL@9z7X>gt\`wspΓ$t$viHqZWZ2C6@h)C$k%8f :*gve u}7)]fK {n,u :Ϸwef 6Q&2nmך+v2.GMSfvn}K9&2R%Č^-#YXF/K#Ti8n/cͧ>./ףhuU;wKsֵ$Ɲͥ yKFXuqyq_׊1!HP A“V:VI+!M5z$9t|cjF?>ٕ֒.mol3aLh; ҡ&.zFu,ѨuU3/ڝeqj{dm+7On.>QH'ddj8)iԊ*47G9bslퟃ_̄f2s<0'/,@Asj2!2 !~ЖĤ*[zك_Z=ژ Ob 'E3VIB&AKIs1RBl8 1=r+%fC{LWmiӈݮ|t nv6(a^"1_ƏXq܅0]m54}[Ӎ[h2 t+wblM8W.(qJnp, k[/)!YȭMJm[Ŭ~rwmxEc.݆nN뫰}4ҙ f6o>+Ý>J͏n~x]<n`}z7y"6%y驷ZLOݵ8Նhج>{u'x۲MXˏiiJ>u@L O쵱pV&&5V{FC:)PAD;^iӹ4Mۡ4M ʄR{$LmQjcbSB$ZpZD5?tuH5>"5d:aN*S.P=}QiUŦFlV[XcITDc9wK8+*jul+=}Hy E 6s2kjdlV#Z/mH !sA%MeM&Pr 8 !dbl:Yog-[~V]HǾHD%b/o-ɳv & ɂR!>FjZ4qRFm"i+mH'@  Z#lr&S@(&k fbTglPq-TGӅ>:;[%EX.^.r֬%ζe"L9# 1Zt0%+j=L8V;";[C۱<ݻ1Wx\Hޏ*mޏs:G3i.22uoԜ"ڦ~D {i), EeL4(bAA12Siaz/_퍟tWkb+[u0q[}oC Pwo#z$Y;WVNʫ.*Xr t PM*4(ț>S*rhحĝݮnݳA&J,D(ТqR"T&Ws3;cF3o';Go_yoTCkBa>piafz^K#7h;oI'guL0qIÑyہV uis bCT(1jCi5Z [@Ny>9oIćA׌7<(`:¨;կޟ3$094d AMѐ^ho2տ\z4t4$wU%qBy/mjukuӥP/5Q&`,u^>C&R_jˤ;^CUK率۬]7an|p\wZCaQ)`,ji)(i顶M)DҙoX AJeƨEԁDHB⻒AJ!AUeRhJ# 6 \ Q^*KR+ʜ=[2D&,Fyކ]һ:= t>R z^!O{yp9yc¦hy 2?<׃ay6`ni^B5 f ^ UmhL^jw8jJ#88sAN4?LY7{U#306 h5 [?񸝤wt|TW+ocqȔGCk:|Gg$X5_+??l\+Ƒh<5P䥷!^Z2[uͨx 53lѸO,Nx4e''tSV\꺾/[V;8-6lxm|."[(և5}u!I/6eNL3Jٚx7>y8ntz:vmZݍΥpy5mp E{ȣ;Qwy0PkrM25H-QG6 !1!e5 l|vVw (>" mL!c^[˔YDU/x8pP$;(+1댜 ի$}S7ogBB3Qm!B1йX-{;!H)) "*lbyo%Ɋc}0:E`Y tWD0$]~蔭Ig\K"RNZxU,mt`G{H*ލg?y(.Hd!Advx6 13S/QcK@m!\ LɅ&"0L4*5x7T}kU(Jb2F(+5RK1'd=(cr$YI$dEGb K3. ٛ(-!رU3rFV%ڣUjw%C,SW<`c>9Iӆ7a?vrXݹ9+tJ py fua#O!sXjUw{X?|U]g;{76z Nݺ v*^ vd Wr=<R{!žo}o@ =Ȥy1{ՅhccC.M-JN jWBZ~g2_R+3^z񆎘~'OLG#)FYvNV1R}5lH%C*LԺ7N'X.[R'C]WmY«nE^{^WkE&Z]$,G( r 롺z YŸ*z :뤀K暄'W{xWW[0M5n<x<}WrެAgHsCGV>}~\M.sslbflתE& ;j{s([ޮ@SuЌ FG+ѢQFIrwE蝖Pu 3"h@Y0hb:<$]f/\܉ugl@6!5gO Ojܻx`VwqagNjÅz?GJީ~8J;U{Zt1JWȲbdu}K/`J$fM" KH˸ `x@8%H"T~ "3IU  HS -)gTDzHcT⳵ZfNfFirҪ!um댜 "bX-;Vcꔾ2Oo7+ů[S~Tn_Om ҩ5ojjIW.xXqILtVgZH2Z]$~H$Rr|ZVO?ld 6ac0ⱔ4ՏuUuuUS,f$- B ZWSyy<`4>s y~:Qr P4A;(: (i*GyjA(ۋYAKqȱ\o(HJyF` e3uoQcH.RIw0n1}ά TCpmDI([릷߁Ț:@<2pQh x܏vi,ǖgA ƕ3#J$J Ą:iI<@W0(k{0M2i)Oٚ#;R/}SèMl]>;H>у䳿ͳ*)=_Aܩ=|9^lj^CYU$G9<v3M?xqٛ`2<*."k(K)j3,o DRYӄQ$_vv pW]efһ w+|b3[ĕqaLEኛu/Xt2}7eg\bm8rws\[nVˑfR)aIg 7-U9N^7qXվv ϑI:1K313 ΅~ߦxuYJ\em`tzxzWNŹo/mr ="PM8z;lPeM9E9={vM(n B%l.[]&:\t6КGjt,.&qϧ0pl^*FG0%\OO4\L3yet^[X[ :gMf1vi4H,X-2fQ*#K& n$_ XUӪ摸ܞ|3*̨)iNSpD1R (6"{A3DcⅪ5(a$Nx`>PRMNjʈhA #(rHXNf6r6 3ƶ`#voJM›#~ϻ^vX^V>o,0)>:{}piU҆-K"hZiʝ(0cDxDkX 04Da11Y2 cR[lJ!AsCBH}K@4 5P#NEJbN8I4Ehc 4saO=3\=+Ay%4\ðw3#i2 ~,5#=(JQdAZ'?fh &8|d{7`SxF` 1RθwֽEVPRIM~L_F&?뒗E*E+iYxs˱e fPq /%I12uZ 7ʚ^oͮyZf7fbg΍_KCme1峃=H>a/f{Y%+(:_אu*`yQO>b< v$:L-<{3LG%Ѷ7ɠy)yıj3,4L$Ş5*+Y<ʷ4hw84զefһ H2qH^^Vq{bIUƹuL.o7t[Itݧ 'jgJ$eѻm;sepM/+nw byElD}Ųm(~[־؊}}Z| i֧;aIg 7-Ǝ#1r*Xվv ϑI:Ɲ4=Ӈ2|\m:']uUmFݾM'|0T~dƃ(4o/mϷ á!G*BCUrC7%(UA~U <B6~TUP K&KlV.@.{:|>N# /!idJcsՙ6 WLkK4 ļ&P=.k(*TjN[kb:<\O0u.f}($~v-{*~KePmƼEQhȦDeyQ5 |`^ŝNfòXn#W LxKi<:E@pS!&z4[nMC;!i%|c!=1!y tlV鍇4+u ژ6MKUWVw;EѤj)-2uAYb!aU6zC +ekFxҘ Rʍ zqpx2'Tjmm,ƦeIC}+&PNǪ*7dyX>YͿVޚs{R4!V*/Կ`}o~|{ܔR]{[X%v`V=Cĕu/`j>\5qp^9<-io1mӖdBέ!p-{DnAcފ՗뚳-䯈@Nqe,0gASfuHI R;PTJ6U G"+t%6!Gc4Uڲ(R'bc5 䵑=b+[ ÊHI׶`K^qP.-77WW]VLBfάM,{ :M (^M"0)S[-<[14^YUx\gנ:KDJNdV03,(8fc  0GLܮqw|A=}?v=6n NHeDY[t* q"&Zj0sƁR*(?fn5mH+kč b*RRj21c-8x cazi0s^ &t-qx3vNp c)Q7= 盛=x,Fk64?$oao`M/*tco|8VvҜ`S$'f:G)O ֒o bm !~pN=W};N@-wA@#ڻham@fD0;}*me"]1EtxWVy`e ̇|j<K7˰R3r%[מV|?l&?}_0B_s#ṿL&D>mH/G-9D-m7(͕u/u Bk2wl~t2bBɴx4)Ÿ雷YUU0[&F!E t\k8Wf_BZ[b.V5CV735rLu4zt]|]^tk:Wd{UַZvrU [c_Gl}0>IC`zW?˕*Rkieub3X(:y귷G/NۓN0Q'^:9zvMM!@`~~MO4W6M۠iJu1v`{>2۵6g Jo/nfGey q7 \.qW[S[Ttc&JP50{rQ8@d3Y-Np_umz~(IIGNPT`OCOH[k`PJ1i.}cAR׍ %r]4U0VzX}][or+_sz; ,N 'ybQ$by)\t\e,S*~_.!=XdC!r.FfO <4TpU9WpÃw3pՆp'7]Õ1\4J+ ۹wO7oU wƂ0عL  N ^|h 2efYzď3 05 ,RV1k&nзWJ 7N\(YT1Qxu .ֽ7t›bRIйx-G.s2jp4)JOL!(I&9dm@"WeVLQtxUVD$\WEP&PAC"Re,6K1~roxcLȒ0)TA;4e*XNe>!;9؀tlJ-1@v|kwA[+/zg<;ʇM[/ei:>=Y6_H77P{dLtE3 -1Rc5}tmV;ԁ/malz閭y}CiVqb#lzw_ ^Uu׻#t3}# Bk\K受rY u5Q;ԃeںD rE~׹+ø?(]Ms}7 YX~2<͏|8mGq>\M?$xlUfS:L\e_orFW%8LJiԶ>},YS Rճ~x$ U=xLFxl+s4t11&Mo@*RdJLЁN@dHbTHb1hTUf@UQzaM(Ғ"d9AH>YTd>qU M4C[+HY"B1S16)dRQ1zQyK.!{dq@x 1s>' Аv큰]_"oB1?C*3J.F]: D=Y dP 2冑#PTZc)br)J_"rH3or B/E,0p]/Vk?2uǹ9v,"(+ K3ⴎ(i(H»Th-)ڥ"̵$eEbbQ}IFbjLJB]~3CkDZt|C,o7A;nEDWu.fZuivPǜe,2rqWo]?^Uy¿]LUX|쒭uѡJTQa{-FzjU;q܂zǢͯuSZqqsi{ǩRŚwwmYlyĨC ! erPMr **$3< :Xd.l4H\!!x%yy3d9HaNDa<s4 3M?6,E83Nl;C:0R2e*%^瑲(XXP6k YSQQ41^Kcp؅!eɣnY9[ QFXcƐTE|d `Q.LgV74Y%rL:#If FiYHJTvv]-a3]cIg?EOG\ ^tjY]v6};UkLex˟[%F=z{}>L&Zt^ƚjEbgeAdA?\= )&~:L/gDzp#l`.Woiҗ_~g?JwT{WoN2dVMO,EtsJCRx/}_)8࿫{61a勊{[*Nc(w{koUkp<'Z/O~10KvwV+Η&7O}7 T&Oc+\vuJ\͖6x6ݣϸjlM4ծ.(qJnpL3/@7>Źg UY tXiK׫VRV<|v\t5fG"nyd#l}e=Ϯ|?xƞq#χz˳9s 4ݛGw\xቸߤ?[]o)_}nq fc^҇zgqHfF+V>U\x5}@YZ#V)~}@]0ԑ70'^xv|fop~\r&ߥlBg, #fLTi&*e HgY;%sȌSG$ ThT\ EC!$L~l0n0CJ8IxE݇Pkt`d}쌍"]d$@stm <%$tMLLd"+Wj#]9z/ 5VfVT:42S7*G7'{U~J}@a__8ZݩbMwda4P&^3XN$ G=7x&D%F<EKŦR-Ns Qe t ֖8-c;6B>|xC3ffiQ}wßIza-Z *db R< ހ9Q-g_mc\ vmdcdum@TZUOΰٳ02Lp%QC.ɭm$?}jvkhG(jCY`.X٘ X/ZmD֗kPܾX1E0C/Td)RGQ|c. jغx3qvÞ/@T8l~ ""qkGK\m$oJgJ1ZifIi3?4|ŵ-v R scb!Cq^_9&` I Rb&׬&Z7k&3#~Ցlh9$_gY]th/Nf(sF+%`^F yQg*FH-0u:hbb͎}ol~&a6ʫڧ Y^?*@̫n{pAr6a̝Sy R.BРJm<^.ËS#FD;,U[Igؽb ҉dB gTA %2S,BT wmqHWƾmd;}ؗƀW[,i$o=,I岭lJeU+h]'e񯱃S'3MqX{ = }hQ'#ZMCdU,eʺR(ɊJ1jV(G<)${ zr2;y@lN;^:@EދÃ\񏴇6`4x%~iHd r?`+wY^SUױ=T^ǹͻ? >l54d9ܧ$Ք@Y!U߲9 Zw/{޾ؽ?ܵfK^<&G˛U'm?o@ )pWoON/o\ R>;oPgcO * )&3IJ9U TU/]U5:kRIKl|sc\MY>X\6䝮]Uflb-Y Bhqf:ͤq} y\}BSRH;mҟ6|g^>a??vv-dp$HIVW>Hg~Pnkn:mbnmޔwfjȶ}P5$ ɦ04cM/j9jHSw=(Kl*Ӥ9L6hWR0Ǡ+PTtkr9)mLsO C36D2>++Ogrz:'_J]=_~ur+g?;f[ů#;mwLMѰ-Z=|򙯷9Ց#0'gzvڥ7ܺ#6w ʼn>앞>Vs纣5<};JZ>}=)7#| ]uO:J:B A3+vj>tυ:ZUGyBW3+z yȡj+Nt\` 70tJӢ+ڂh]M gDW:24pi6tZ7u(V .t"tewfDWpzs骣$Y銕r6̈k7pIBW骣1UP1۝'񼤋3W2nK~8/Q9V8N"zrq^ýr\DPp޼Y-(`N 1Y H + 첶S~[`0smѮh@ﷻcMJ?2]~;!Oߝ^^ⷻYp7+.v%g_oߩ CICnUjdg n? `|ӹs|sK[ݔYB1biC_[Ɇ=ϲ~]ˎ%CqV甆f:*Cp~uG,#c#Ȼ,7SV q"JlW:; ~%6PQ>̳6͵dOW#߹Z|_6FEܶi`f0kW'_?7`AAůUxm% y%htghߧN腦b ,fZLGtQ2-k3py6EpG+tQ91Uͪ̆:\KsV&?fQeۡ+~օO: 7.xM:ioAWծymfDWZ{el pǮBkO:ʩ^yFtl2ZkitBWGHWd] ]uBW@OW@),tut xԶ/pe6t3uJw+bpcMť:RpS nƍNݢݤS|:\kBtGG HB`&Ϙಷs+aR6v:\xʑY=uxFODW(̅y&?f ]#]yl4fVlå|1Q.(*0J]u͆׫LhvfЕ}+B|bp+] nWxj;tۡ䉭[Е]jWkUl pIUGk骣ډ ]]DUl磮:C7WSrcVBWCWdu4#`7#\G[ NNWdI+[a;+f>tᲙ ]u8g;/tutÉZUIr]c͑Y5̛ffxɪcjfyVVN-VaDY n#O񜻭ih t` ON~m?E&\+Ph -4\:ZHhGvZ8¡mxNnf4~.tguv =FrB,RBWGHW^Ć9EW,>uztQҲF*HAfDWl ]ua6h;JշCWD+7_ҵ\౫} X1([Е,t܌a6t: ]!;u(-/tuteҁgDW_fCWUG BW0]ɚOޮ'~,/ל^ɯgg`۹-i9&履~s񬾇Sڡn/UݳV/wO(~U3v "KV^[<;&PQ^Ep}sqgY_طaW_5m} VlE-սȩ7<ΞL T{ٍ2!v_>wp-"zM'9-m4?~Gd'6YQͧTT (c>;tHNq@y >Cb{; q s|y'_ACp/_o: s$2ַSVB*UHtG-YGbww}Go wګU-oM~ݷ̀.RQL:+tpS)DK8OIQYbmQ ފ0N%`RL}%UBr]c&R(6ӷ) FhnNylm V;{7ЎI.]m!T}34spoĦvI[%͑!f-{%DjCW>yj-a6hF ijEǖblEz x[G8֬%׬Z9. RqUsJp-RKA(b0cˇИf JٴVkΜUWמpflU*4YR:vm5qx2LQ4NOЈ*K]ˡNfYFں{xtpA b ؠHջzsys;6Zڲh ^!E+>%Y*|\fHx:oB=vbJ̱6gϩT.jj" tF}zo~| Ԡ5|'X#bi G1|0H* (-qjH) AFb}9ODJ JPG"t_מɳ*.zчlTSRb#WB #1KNEJb ]{Yt3H!Qc(+٥$ݐ6*:˄wh\ 4[`}e[n@E E+9QZSAR[A۫x(QjPyU+IJJ̃dB'ҹ1cpmVS$1ĕd+tŅeDCL:4c 3=We>0Gp}KQ:$ RR}P6}֕]b-PP]-hEآy ,.H1xO VosUjcj5+@&x0X2ʄWF@Ja`AV#5*(N#Wn4 ~ *S/T Ge;j"(XM~=P Y؁ #7CA5ad\Q ,zP CO!a@YP69ihqf:!{I[S2 : 1  | *dr=c@Y\ET jş(fDi~ҋZ *jo.>LEw&%Ra7 J!ڳ;KPA7/uT* ~ 3dWՕ$b.KF J2H5}/OA:SW}$ˉ8H :JD A Ef`#d, N! `|u9^2V7_" NDž L/ΆG _;^LKUT#fA12T5 !pB@< غ3>\ԫk_k*h$%#R˫ LМ6" G1^T^td飒S r|$Z. p1%$;ANp`C{:/DA-)BJ$RFVdDGZ0=Ӆq`@(^|$/u$P{#fH܆ 6?K}2,Yߨ<{S|qgUepۊj̨jf8x8X ݏ[ mEy\UΧ}X2 ׇXk#td7Hcc.)6hr޹6rci~l/E&0 6ɗĖ܉g)ɲe+bt@w[J/sX|Ƀ9Hbyr 6շ= !X4AG "O A. AcF89Hz-xF(:"%P=hhk:ݣJ߷" A:RM ̭C4ǮnmA!`&'#E'hu]LA?ƒ.#Pq@!#2Ϊd%?" Bhe/@آ9"Z#z!ؐNt1kYDksgEL4Cz;Y(I#k4ZIY(EjM/-=zˢ{mDuS,Bh04- QB `F6Qd`- mmkw܃g7Ŵ)³t il(3I6Cn=nqV$h'=aS DZ u)z֚B$1d#F 1i "/w׆HMfkc鳄V}hE +Pxp* H-lb}BV&EυEL pc8lA3GAsO3RO(d,iJ nqFM*P@zum)`Uۛa4D jD=q7!#iv )>Ia*C:o e MzBޙB!TCz-jrp.РIMDD]z8n/n^~3򸬭hYc&-Haٯ8&/hm>.1j )V &󮻹F r>Y/&"J?4\ftSBo5'_K\ry8?;ʟ A]/p~\]iyؾܛr=o@=bE<~iX9lollor&]LQơdX%69Q[ϕ<7'**(q!Ч9V;Zv v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; l4&'z0#3R 0;J'P@g@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b';  jYYT\qAswAe:B'+@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; tN -ڊ@$8j@ DjMT:Nct!0@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; Nqވ^Zw3ZjJחJɋպI7-`QASM%iu0ZYq jqI9+,Ny_W$ZpEjC; aWG+l.c'jں{yrCτf'_lC*!U)V =3Z|M[XfeHH;|zOƪ(>rkCMcZj0MrMӤ֍Ӥ2pTyvښ+-($ւ+RqN k2 T+W$ZpEj'Q1Wk;u\ǎ+1Wh\E` CjARkG+R8?0NKq\W54վJ/Dž+,~^]᧵sԏE'" OMgjЖdj/LVru>:M%ڶUޚ A%["* O UU5yrz6zi hmcRtMLo舟NTf@5}DԲ틡-"MߘNĦmh#Z_d[9PDtթZi@ݣNJg*Z`t5S $T3JjoӧHV'ZpN[* \\WL( L*c`\!s^״xl5"EǤ֎~1tqzw#p%_ H jPcTqu^Q x$HnWP;H幫?> o~~\%X瞻O~湫^vj?cj{*0[  j[ HUctquB*cM RP V ;v\Aqqu8+{#ɵDW6{Kecĕ ^W B:W Hm}tV16jī3** *{;PUTIK[U I`F);Ip0? ]4gUOյ'2G/lW,U="Z1v\J'WG"\AhU\Z3z\J˸*>J O'5Ϝ >#TmO޸"U/qU+RT+d-"ڍW8J W$؇jpErWPk;HQ#ĕY Y H 9v\ʱM3> TZה OP"Ԃ+RqFJ1*5,c<dli3{ D-Wh9!>H`^5UO%K!<*}ثwZ->Rm.:T!)_U}].ǮzL}qƆDZU+}㢋os@})tI<=uG}V`~fbrL Z>1@*5L X#Gs-TR7r;q vËgW8ifܗGH?5\Ǐ.y%֍Z5mE`Nhc*6vsTgr}NusJ:>СRCv}>t#킞_л~@!FsvՁ6}foi%Tqm.IKŷ98s0ضm֘:c "ؽ^ڼ?uj3Fu L?7t <>^2n{AcF@o2=-[qgXǵy%ZzKB/N ipD?^Q5˶1%t *S6]/U]gW7ˋr?نΫ700¯y`ٷmb-..,y[ 7|mM\P81>Wo#oxg4q|O{goow{߿C铏:".#[ϩCq6 Ƶv{qsz\IL(׽{2/1_r V}F|\n-=߿5ټjޘSѠ ]Nʺs_J*^ c)5RKmK@zs}oS)VlPz)Cz9|I'׺Xl%rKGP6u'\۷:2hR~9ԯX67u12Y鬛_ݾ|sudrjAW,Eqr3^Kr9՛W ItTY˶mLXIVFB#1Bi7mORr7庝/꼋f&_a(~ C6r:)կOq\ &[{_ozq19)$U;ouoeRgRNrbzyYtX%Z(Gʓ].)PM+^eJFPyg0u;&@8?;/:{,/q? V  t7;r$5^s "f.̸e=s-*cE׳٤FA /ȜĶwSJGQJA3>.w,cXND!`7KZ\p;vK-}'Ϩ]h%_;6ᎃ`JL"hT9e$hoyU!C0EeiQCAe*lZ@d2 I)cD3.1*/ךPP5gU̩HKa لOl2c=ɚ_e%|_ \܆f; pf?q)%@KWфN NDq idX8ºȴaEQ(yOrnDeLL2&cEѤ$R)\B*ŚqbXXg0  fsf_nnCfzßp8ؔ4imT%&@<(\ڜ T҆2dQ'!=}8˰ɕJ`ƶڎ J Fbُ~<+\PX8E}I<L^DKԥHI!QH<E>I]FR /H2A#D A1`&($QxPAՠ+ŚQ?t! bq.(:DqWw*XT1'"hX !k(ӼȓyIc4&h %DiwP<¦3>``B@%3|Xs#5"r|zɩ p]=USh鍡r"1:IqkbM"! B()gsbT<ԅPaû?i/*qW\JяBnVX,хkD6rӼڧ1j]&{A1 J1*a, >4{ƶ@>_`ޥ#Ol3FbĹDؖ᪥Zubؓbٻg!1tYi&y|,iBytjI2D( fyQYrB&SHH?-L%ʬbN0p5@s,Xs Z|gDKf ,ڴ% 0o M^*C$Rh2 !tXkN&T8I|O~rk9o[wNt蒚jS\tm|)VwT$)9}e2+ϙEnMe' g]ǶI|_^%'O(q0F4 ɀCP{ oDH\D"X+ FD™!T{*Y bO9QSH]^9pb%8?`}I__] NQBj@@KIE ^5pa= r0)#Dh**omۗ`pMwǖ"{vФVy6f+-ϦoI,rC|.0p$ HŌpWyVzS5jKҪFA5Ө(Pt)@$t`>Ro4w1Υ :$IM[UI$% 5Ƅļô҄ĨXs(tN ﭷƫ{ vlV_9w7y%꣭G}p8CX8u۵PNUHAH[]uU "[TXb+/9`66}& T&K:Qc>raHݾ4W9PBKk pZ)CTQg)po6"h;rk)nbne<כ&Om#Ay8w fsIm~nwt3ߏLXo{k@ &pC~x@ [r[_Eɓv[xÎj;Ϸ{~oGf2e^w=}lnF3qk2:jke:VpsHHHHԉ'zKJnR4f@{ӫjV@vR4YYt2s~<,힟 .\H<>GfxXs ~r$ S 3A3mZP " h˨Iprۍ;`(!(!SJ -J`MFgeұr)DtAHQ_ƏS ц$ $PB|kZ3OVD(lf5l[rҙFy.>< oz}1N|&,\0/|qM}IM-:˲ű: ӻ }񡆺v7xG3M{hrگf:Z4: ok,ߚ؀{.U4J kǬC~8pa#jӼLnގ24Sy-\^>‡o=.ߛuFK4Cn4v~U,B47붺?{BP@zO<,?)Yi6k5&X'< zqz-e}Bd\ۭrmȵɵŶע喵|\4c5/ n[\e]v}H~߆ĽFn/sC7›Ad&? eߌP7@O.oq={Jp]CB]N\UF0./}eҰ/T-#U" G--@tsr;ّq[;߻2<~4UM+V_wyzڄݵcQ2w0ZS۬eۂܤ6_"ؖԖ߽.[Vea֘zM!ImS0hq D\ȈD)&c!(5:')ךZyE)Q>piiHBw!ؔ,/aGmiDQk΁he/- fiؖxqzuޭ7_"[ϧW[^zR[tU9, TFH !Z.u"yj0!YJDF4i8%=Q=6C1^|なx4/*]ޢiLkS3-NPD2VH(R PY4X{l:v+|;sDcmh!Upd$ZD'r*&ιtC膦1$P#cD%9J&*r9q,E"uT֐Nqi!cD`"_Il,R`K E"9) ϶gREZw&+_ U < bˉ ٻWBK|wrr;mSx{CK̗~coz/IJ2>ߠО wmmJy9g!wb3,f0 pvW[YHrdqu%[ZVwb[}dUb0?Њ8 ף>*Lj4SE{59P;+7r2:SkIY? ?'>G|;yebEπ+a+Ug_qTFIdzU(Uyb#۱Ak kM?h#'H%¿ )PNRBM/ u3h Ec5:0e#JM%8}4Ά+JDZÃ*"[4D%@{ b$g5fVY4IfөeOQAFij2zo,6t0ZNְeqqyf`bP!:\h^X\!=d ӼdAdȳ% IR"lx/DD|THkup;)@A NAyLOS76H SqC}@++Ĩq -l @k}8蛛WU S} E[fIA;7YIwy` I1H݇ 4GR8`GQ# zc0AmS:LQ<)zUs HPySs@[鄗h*Y `TYgD ~rՅ4SЌT[*n@5"Y:5iX^&HXdl-.@ۀ^Πx$wsUۻ ]g|!4 dʊy*պEm '9DѤsjA>qyRn{%[ 6^MȣhP$CK|cc9YW КRO  41_DDZer3{ QD Ρ~TTgqKǔgCr+e5,grFUl9ԛf//5}UġϠ m٣lŰ|7_rŅ '~~.o+׸^姳;n.y]x8)6;姞Nu%T{=|"ۓrc *e S?㝭_{L=e@IL"cYR`Wt,˜v˲TEUnm97j,@H!PBy\d6v&LHp7΄ 2!y#LH079Zb. y G/ V Ny!7d$ &P(@sB55"XZ"84Ј 1HjCn9J[Fc^I؞aet;[uSE, 2xxuiX\٢ JJhS @Uؘ@LKDJu@@.e,4BJ܁l"Њa$7Hb*i- rc0^*rB{,Bőg\!%7ż~/ViJBvA"(RkCD$D 4IƜGk42舒ހ9>*!p 4W\>EX@A1xrj=,ĵ/P׬Q -ot`ԿxUWzP~%xc.&Wm#&RdtqlWF3Ec`TkRfrlKLMOTK SrLJۦLmQWHtudЩר b8fyg}^ i͸̒7pUA̒Eo{a^^4t5eb,Q|>̜7ؿO]AQ.`~ӟ?nSoa^љw-iKɊqA8v=MwɓR y2yʋեB y\"|{,Hٱl,y[Hkai)Oy?\8JWz/e+(þ$lއO^|SF9C )$RFqhe-&vؙJ: ]q캬F1!?7g\*hV6l͚Uܮ`N_?ξGр9YC SOs TwՅyjbMC7 {~\`tߤE/]V~v^5j: J|h)Op5}k).uW"m cep`XTTr)k H2 j9R8y @'p 4' 1wJ} s(_DƠ~eh'bsJ%q%̃2rHN:i4V:h B޵#Eȗ9mC&Hp3dl`QȒVdWlu˲me+6IlVWb#}l) 5 ~{ɼn%体Җv&[@)=5 tH))hY SB`( Sx/J$ :r; ' F A\rbJ)EX V Lc,X V:%n%IGI?ӐkJ?ɇYu8>mb{7URl?Ae0Cyh*#g  "0P*X@"B<t~;(z:<&0 y'70 XG*JEU!*-h'yZ<0o'n'_<;(XaRH>or<(K Cq}{\iXYB+h8d0tQEmkv;0=l /!3Wnvl>/.(z)b9ʹ\@ GtFdӕ>r)DtAHQ o;dGR4N8@ i#@i<5Z _ 3]^Pt=m8x_-%W%G#qD%˞~A5+?a:˾h036*:ϟ/{ot𳛔]<+]̧nuSW~0\̰Oߖ R$ su$bLPjZf׼EQxԵŵil!ps(K5.AX_yb}_:v-J!7\Yޠv(ݧΪ$[/E>W+B>sgvP3 Ol>L Kt~z@,;oѨr_\-]y3W|sWW>ks)i6/ T%ͳc qェ\"Ir-3I'ߺàd?w <Y(Pfv|{4bR+ƟPל&0uqM EBQ9O\0룷{4ԃܡ&2xލu oU@uѷ= D)9$ʉKq~Y0JϥszC:E~b0UL"%vY#'Ae ev4!GZ;fpG9 #ڢE \Y){D#DLƬȍ8opR<ɫF2g\3PĕlQU$R56G8 wh}x,JVֲcM 'p%ń~߻yɫo_{w !0M$X`~}U 1~Ќ54W͍МՄe\#7{}Pl_Oj-=.3#\2z{Hu`Z:T]ܸQR`<"İb#ؠ{sh,0GxLNAj&8p8EJ89PuD*cQGHEQ<Ĥ=juSɩn8Ԥ瀺.|$,""1\ K2R-ʝs9LC5JQBA"gA(+' -`*2#b9٨hccSXuOk_qZ;6p13>90_J-:φByjȵ D*3ƒq'FTK.޹K.ȝ {M'k?$}kK %NQTj(&K[A!DZq>@^jPXbPG@ "#&DHFE+=ը;o|VȹCk鐂԰6Ӗd9An}{7SWM#/LZFxtce\tء'BGTkҭqzQ-+T2r3./qP%oI;pОO.WйhaQP,b5 1|JyG(# DDDnftShfdJ X(Q8!$ʜaP2Dl$+!! kU֞ZvYQp_mav—sFfaALW2)j^ ~-1ZبBY0 3L]8H{ J9nL6ed902u@V)GdJNb`E)S 6(PNP3׮ 2 Hsֳ#ʂ!dV1'8ÁHBH-ȹ)ԻPL?.2saFfZjDZ>粏NEҗ.I=L 22  V&Ź' ,O Yӥ{m-uU*Ƹt_QVYVQ9vv]HwP]|h'y2O6+ T`0PY*Bg[fɋgo䵛[$ '/(g)<\q~y݃Dڷy//޵6rۿB vwUm E&·ؾF?WjI]zHYg_5P|13H18)+y2*W"ҥ^kD+VT=c1f!iC󠀥Ġe2g̃xWTxnWws6n@ zbn{ %h(8|EF rNId2 Hfm. m&P*3D2)':f[.7Yl#mJ$+G6}6AZs`Mټ>u<ydYgig\<)j,YjZ_c513b4XeɄ U *xyZ9 ڢ9?N+ՁDאHBI4ъW+d$$@G$ɽoM)zA[,F3$+Eg{y$wcI:5 =$DGiP|&Y4iK-|Kv}Gh~ +5 ?v[K~V mϣ+tJؤLaA eo7eszS7i@V޼~ *}1_JVY$mJn5Hd Y]|Gwpv$$$ԑyu@=~&A.ENtY^Lede'wUDx-0(j=[%@1$:߿O="(w;}tdQxfx(ܳ-;6tgh|xU4v+z%h4YpD>y&y4/C`\cZR֖^Ykt 6) kW3܆Wr֧*+n; /]]?ߛ Ίu>׽/o;Ŕ}O7޷y9O=%hZrj.u[o5nyMK0-qaZ#Yڌ2Zř b?cRji:cUh+UF:F]dD^NuCuôuô &N)A bȣ3>ڻ,KƈFU; . pϫk(Mf?MyݯCۡ|83B Q[ȼ1L,/ yڂCK*lXB`VZن$땗D!EH02z Mk 慚ގZ`$cS|'{?J?uqasf J(+o]K.h+'xd)V5GЎ{hI#({HM \P阭ѥhrFST\\DRM3ccp63csJc\ؗ a. _ Lod&- Г0O~g7Tzzed*ɒPI'FC*?%^HP!+ d 2R7e>q mA!AL36 !eihWTD65;emY`_Q*^xf4SirP%}ij # ,\gEZ NRg%RJMY*7#iI 4qML,( G(|̤ARvMG_4g3JG!ƶ0bc-=#l,i1'4 JTKRӼ$'ER)~|Z1U gLi$8mVCqͅg|dB MZZMi )D. '^,llvˇa> PXݝ8sh2qŠMNFiQ&kn/_1zߩ xCʜUc0j* Y6u27ސ XkHN}!mt8ܧۤ.xrI&($a*BDt'6bLs3Dx6FC޽4:aT,$q,p^|ĆXOQaXhOOS'G> ll~~ ۭ44bW(6_!}ׇ:u yfD˥+lu{Q8ܪKV([ERB>qTE嶲D|N/D_J )[ '(%$.& $0)&a&{ Qd# cgBbO%8K>E_"hͩ]磣*q' YR.F&= z8KUL=:bOхՆ.ωv8\mϞZ[Tu[Xxg%ܰҽl٥/k`'/X 옟ގE&hwf[ؕR鏪/ߧ=l%_@-xOg$9wʹlv]qCyـ&j銼[ cB|۫<s_mW3:kcrJp<*2*RMgʁɨt>ro4Of .E$-S[Y! XT4! 54ܴzcjBg&=jB3f$P_v.0n~Um~Yӛ`GS3%o tHS#xg4W@W4Vkjfv.0G*pug KV0v*(곡+rI8:]±j v\ @q %hl]t=];\ZcT+&4t.BgjnNWҶMUJ(D.;DWHJ2]+QR?ȶu{:eVLt+tEhm+Bi- iaAUwAk_6x]v"z:Iquw`&nM%2%o:jQ8`4LM&~S5M>CF oXs2LzaU?|[%y=hařFyŹd{$*+%VQW'C'ddrdU+tVU\Vc D(^xe5_}Ut }ܴRs󐹹-'$}P*B5p᫐J9?!IWjVmHlm+B z:A2ƲN3`nCWW(mϙ բ)=] ]Y+V*p%t {Jۮ>#-ϙ%:j7=vqj'ԾJEW]aOW=$Yw `*p ]qkm+`ԝCtE9@g+tUо,x8]{:AB&4+, ~dgɺBWtE(a=] ]IfH[]`+Lg*hemW5J)pX /i7?=c,LFv7VwK$~M> Vm6SiMd63tUZ ]ZE骠+ÕlANޝ[՝#58pZh(D=] ]YNz%]`ݱRv ZNWe"ѕr9BJXc+5Gv fG h܁dOW= Pv02*p ] NW_OWCWBXg]y骠=]"]֬CtEUQ ZNW=] ]!h2H%EW誠v*(ʲUJK֮6g٭1V/E2E~iGL]F^bmwug4OWyڡ_O_ {."WϹ++׆tnd;%=vŃ+k7?9EӂP S-~Ls4%ޓSo$g)NGIژct}K9SO׻٥nBfd^wTUdt6Wަh]Г70j<_~㦗UB~:3Y)]TkYY*mҠrkh~}2o͓MQTr!Д¯M=O]oQQ5߱J7oEu7SFI( i&)F6}ӣAB\m<;Ѐυaа/iXTٿgc]koIv+ Yd(K"nbd|g,,e*%6)%j`fTխsϭS= -;!9Gb"5'˳z@ 6|WIu*+oY;A+Ȋ?].]Eg~s~>'AOovt$b7 ?,N꣛M_}-~gE<{)}DΰBc.VgWź!,K||{ҫϙO)1}~~*oq3e1YtN =b'nsBpWW:>{۳rA8;?Z-!{1$j*?nC^כe'p=BO>>H?i( aCaچtk94J2+O7!+~Ԃ)gn z;2e#E䵧7q{QЛ_xlvT\gH:|0%"gv@ȩspTy~u>8d{3Ip&[<Վ:iJGjVrCpds%ٽ<IMKЭRSyaLj<~/֒ >V|K+#u:Ed׾,>|Q5eZrA|) b#X+b>4XN|飷z;=zI( Bgk Kk>}{  r3+5 khxuF)Nfϑ;1\= '<*9Dt Z}G= ~rКv0df@Wc^ड़]1`3bv6thzt($;]*t$8?@̆*=կBWZi礮XkWCkbW[:]="vE \'vNNWϐV'EXp-Z-Gw|@4K3`f כ4:i:K3JY9u]10Q Z̆zKvRwztUsZc͇7UP3PWRNWρpGdo/ #J?Vwzx;F^M~;pzj? _s^i/=QYßߖn_O:$c0;dqZڏiU7"_@J/96_/^qqڬ/,Z2?|hHg[xwŋ6#߮fUiobooOM}:nE5f՞,;H wqKV ^oO@E7٣c>ޏ薑y47Wo3S37@1gs4#y׼~7K WuY껗||G'b ¿^jTQke|z]E%iS E2i+5(mQ7o]_fI=҈nwe>u _nXnY_.ޔ \V(A6[Uߒnr"h*kK� J,]`$TZDʘ6eq1rѪiX$5'~Q4Rُ iK!W(0*Pq4\*6KP-mQh)x)';%ZWkPk(T2#H*QkR"cK1L.);]uȼ/7TcԚ1$kFH)Ld%e`Xh!`N"4=zD,XɌalh7D34f3NYVIʔE8˯}{"Z"^^9eɲ8I !Sm ӜvФ4IDPQɔ #0i=>劁Ƭ2:ּk94Yٚ,0J8<2` bu1Az$i˿*aw>fhiV:uH )Sm8jm87G!HUY:QS#J̱6gωT,mtY kAޙπp_c%swc5H؉2VM`rsi_!GR:b@Zq m@ii fJ¦ VCJq VhrЗ d`ZjKU&8l*"Qh,?CtҼuړ(.zчD v%d)0sc`䒳"ac+ ])(J͕Rl SI*e»P4.CZ=S%[0W{+ **T(:Y-Yy9mxEq። 4 <(EJ<ĪJ|ɠ-]̍ KDhcn5i ,& Y'eV\Zlkhc]VП ^gFU42Z8 V4mZdRZJQ*P*#޸ҐK  (r)6x X-tE*D$0VF d!mFP(Xq$0%X{ RHU(NP i/@50Ϊ"(X=*z]C Hd!fH57]T? +1e4>CP, Lh44QD&+eB>7Pt,`3aP=MP!!.A DUT8B`@T 8X~65d&/rR AEMgdD]`#)#ǍEUPp4iϒp (Ez@ߐPIT R+~ֈj{RQA}Ys7l LcSNUDRhH 2JDA Ք`#dV6 D'۝@VArͨZQ |[4q\Ƞ֙pOK^{1!.EW$' cEe ՉIJBL(9ؾ32 `RyS UAӶdz|gtC&hKhX 3 bZPT8xi#ҧM«@AhKQr2@fa1%Hv95M %tB\XK(jIR|$2LBȼ*c|p|RT+3]Kc4-zOE# }IEHV/Vgx$ ې, x.ܤJrB#+QQgj@bQI9¶,9겚 A";>VdӍ߿\V{w'#O*q>e[&qH4bl('r(Zsl<~B`%Nڳʚ& F52ˑl J& _z+R@tyBI IX@jT_6^KTL!Sj1D:=nz|u?#b \DÏ)) nAfV44z:6`w-> ENiVnYT-GQ"e%6f(ߌ o9-P5n(!/k CC*樇2tyD0CkwŹAY.j0uF;(B , R!!KHJYHqXoڬŰa6ـ 'V$b%*cIB\$\a~{?"wQ^0b R\e 5Ju#!SsÌg=guS[sO҅H*uBͩ X;uMƕ_4&zPJQZ^Ld&!Ts'kQ?uNy~5(JS>(^>@|c@TBmEwUYTڠ@8XA¤aG9 Z&]PZ8hQ+Bb~)AH Gk|T '=zMڃPCXm(TtqH*mkE!8Xg7vҘ%UѰ\CC,2c4& J"Iǩ %:)J-, q弩\G%!(x)XowL0H5vzxMf{+^:/xKtL\ec%k?p:h#|lIe$W ߐ+xíg/x?J vuo/ANosjwtTomNn,J}9\'7?68K܉?y _9o+\V+>fuuxw]ڲO?N_n,vv5ܟx]7wqu6m}Y 0v $&6x-R_;U?U U{?'z!b(9:?;;;;;;;;;;;;;;;;;;;;;;;uiԜȀpHēweN2|SQwu'Pwu'Pwu'Pwu'Pwu'Pwu'Pwu'Pwu'Pwu'Pwu'Pwu'Pwu'Pwu'Pwu'Pwu'Pwu'Pwu'Pwu'Pwu'Pwu'Pwu'Pwu'Pwu'u!9''ӌ fZo:zFN  ԝ@ ԝ@ ԝ@ ԝ@ ԝ@ ԝ@ ԝ@ ԝ@ ԝ@ ԝ@ ԝ@ ԝ@ ԝ@ ԝ@ ԝ@ ԝ@ ԝ@ wέm_Eڷ1q*5d;I5[cmL9ԍmET-DRq, p #r$Pʑ@9(GH #r$p#,H t`X:@ d",N>TR# dp%l>(GH #r$Pʑ@9(GH #r$Pʑ@9(GH #r$Pʑ@9(GH #r$Pʑ@9(GH #r$Pʑ@9(GH #r$Ѐ"\ZGHލ^:åfy}l/_/պ_ϖʈN(p4L*%Pk{a0Y[΄U Kœ5<\Zҝq*Yvp,z0}qfW[Onr9-'T=t\錫Cr+MW XK @Q,\Z+MqEf\ WLBp j @=զ;T2q5@\q 'wu,\\ERҌJRnY*\Z{\J2+KT˝GrWs\2g ^jIc@|nfm 佞O3Tͥb #K!*/-އdLr2&l^/'>f"E c!1QE\\MK xŧ`^fZLf.˸\@~Kx!?~' y;<ߒkw0L3re2bP 4yhaC J*R EF&+kMW~$j}MW(ؤ,J̲Tdq*3+8)H`+T+T+z?ό*A*0YBdpri2bP-}ld\+gћ3&X㪛SHtS{EU,Ftɸ:e0v},\5rm*BWӌ[MW(ؐdprIWVWRd\ W eYBLc]\+SƟWRJrqz6ꐢQQ x Nj9Ѣ*YqŤߛprW?YBWg1W_W\ZtBBZ$+k@ Bd\ WBQ-R6&++x*BWҌ jLcN lbc[ !ScU\FR4wQʘ"҆фpU:cv(ĻuTk{+Piȸ &'t,ӱP ՞h5GFf\ WFsԘVD&+˒C 8PbWĕJ˔pR dp]Jp|;{==#ZzngD;5zY74uTb{+W4Т )&++y*B':\JE2+f5 [ Q&+Tuq*%ϸ Bd]`ʕ$\[Wq5@\ KH倖wBto=?b汌eL>"SY{}&B7)|,T[o霷ӭMk\>o*!fqQ.7بVبRbŖֈ`"PB&#P;)^h=!Vp!44yfl*FRӨR!bZYMW &4qr%MWD'tTiUq2V'+lu:ʵ2\5+Yq*Ev+K8)١b$PU F+TM7+gѳ3B&Xں&W֜(`J۳X\+JwL!r5@ ?*2ë_xkJc]1;GT`e)mLdrQyA-g(KR܈C),zj7cP |:z m"bF5\8aPb%g k Q*sTIma('+,MW(&c{Z'2jTRmfB$+d*B2};&JPIWZt2` +A_ T 3+Ij-ZV{ݰj4c{du}pyM@CeZuKM-&[(}\QB0 >BD #(-|EG|Rޥ_PKh8_;VC7OaO/U=W›$覓#M£E kr|o 5|S^,^w,}}Q1!(Oʸh;?7iv}{6%2Z:,L0yyU!r%M,F5BQ;ECex9>mb@Î? \ogV_8&XbTbPc(e,-"RV(uU,wVJL`r;Aw~9X9qJet2hH(}p)!UL{sOTY<뢎UnOha%6lR{pcvW]׽cd !PGCYNf~~un Z-tWt^ 頰]\ĩ=ct~1P˦+ƫW6ϸeBӲ,/NR(|;;Dg?imM\:_*|cf tE->hl&G7Fһхh쬞G;-WOYVlcMGMFdo]-C&%T;f:\^0i¢V",&Z2 n_c-E2mffsϭ=^חU o9 l}f݅'o{2/v ?6sWJ䗹+mUޓXShZ O=֛ӫoeDa#Igҭ+][94%+"J1l G <]MbO$}n|:)MdOں/R=t롇=Rn=x}X珤pvۇ:@~iğHx7ѿϦ_p_/MG6äv+t1\`' wy`_e}~7 yTդpOwkoӛPΨ2g33EWO{#Y}uVٮ#\ݝy6=^}c0Pjތ@r`G0sӕ93{xߋ5\#c,/&g9EZ`,o϶?~MlOLexHbŠL+U`R}GI -u;51'm;}<$/YB<[lvU &ecJ eb &~y,oJƬIiمDgO%31rTL Zez?1*.91'\XU7m?T#.|6HG(%:|||(2}/8P "K\1"gJԥΒh J'RVXxZ|/>_4H^pq[u;<t\l#ndžJLER%B JȒWEU_} %tpGmQm @ 2JR[y(hp1RRkb0d!6^ 4hv؊ UG4,\̾PhPjLl~M &T, NSWyEE`R`ҹ(uUr_}J*|i$!RQkB$^ȌW`8+C:jãyS?$Dc_x"LLjSނw*/Z0扯e)M+e;)(.R"eC0RG%5j+)ur 7 *y4ރ'̈́FcLģy/"u \7 -4yZr(Ց(339E14PNt4')5,51Ik&b%Zp)&sEvC}dx;b4JԪ'Z-lmxڧ3dAiA͈1'cIΌ1LH*Ńgu7-V}:9XpcA*b d^1?wg b+?{WƑ_v^'F~5>%>SBRW=e :jccY5V qだKJ>O}\&.3<*U>h PW!WK\TpU mKԩ7JWk),Ĥ?= k%5_{7.ͻzW. >!KOfx (QQb*&֖$wep Od,!Q:g̩ݵb ҬD^F?]gI:IR-R! EQ~"_.}ݫMS??G օUW hx E).ņn'7ʘݡե<\!yYh ܯ.o'D}\bfK\7n54|ifCmQGL/l)VmJW&P9xEr`^{-SbCf{nD˟"V:9UGb4$D3RG ӚduDRDpǤU_Ӂ^I$ Ez$R :$xNOJFΚKiEIJ"B#?cSzuV7qC)rknô\9!|:%VU¬d+ ), 1gp-y5s k.Tqp2)N+j{[R(Fa)YCT$ssCT ۽[A]^PppcsNǣ`YYB0ӁI6"oUv(TЉ  M**12 S}=TwkAR?QO_Ւhoowzt{RŹF;LvPsw_g;hIJ[caIB8?Q_̇ޥ̟/5vQ6dz8"0NeV%1GsٸJw>#hdZ;_[/TsKVtԌhnƙf..İ Z>rԣl1Г6G ssꢓM6TE?͏W?o^ cj"{}гs_p^]i!j[4--Ѵmz>rC؇9l?.m@R~=j~Ob ?[w%rߗe/fktm@~$Z6ҽU:3l"<'5\ˆ;^٥/라{;ӋۛLOLQ׿ܛ_Bj*eCb4y!W+筭Z_ d*4.L;is Y(Yp(y5*2reWE5Fq)vk"/Sk9 B9d,Jl)BV'7 6l k϶(˞x GuRe1/ţ#>IE6Kr{*Θ{"cNKñCbZK4I{_Ҡ B5I.ER4TY@T9 ONWH6]o)ZsH`hK^1h\{I03d0\(,h;lYɽpԒ%/mZ2i FT̒ېГ `3N m"Ϭԉukyl|ǣgR?.]\Nq[PCdE .8WhAt -GbTҙe"Z/N0t q21B٣1北@h3VEbHpʋV7)O%B,BsJmM 0E)c~YAH%}gw2Bp_Ev7p{3Et7pC V%p 2F\FJZ%qx#ddH1s@P AU'x<0uQt盓<6 D<#9.K$:~L1H#QdL$O+ue\ #D5Ēs3EcQp >c*(P}d\6JEF3`q"2huGۈ(j۲ ߁ȖRB0믠d?Bb9)P7-#<Ip j <$@};1~w1I1\Q=xQ3d4kƈ .Ӵ9#h^y=[]]n(y:7_ꓝW)E1v7;Õ\\=ZIxt<(7K׾?+=TOrC_~=d  WLǤH~t[4_ zY,Jہgj{ȑ_v|9\vdȒN= _UزRj;`[]ŇUd*FNJ 5XY˂c% cvvn0-ۄXmoe^@Y+{.*WZ\eJr-!7P >: !ej -W qMwgV@Vr5\?F•E5_LS,7*M<.\=>W0xˍo9q+v }:La6|\4O}|Pq92p lq*U7|?7j8X'n030onKa_ f~|2|y831 \N23-3u3 >,HO;tPMGu֚#{A֜U[*١mqCVt_a1CZnGƄ:2[W }gzwh6?6Zj=ˮ|8^gVWMg:6aɻRM̼֑]֝f0*N4hbנwWdK)*o^xgf] KSFy+( f2Zؠ4+x̫߼\ߋK{,4d`0Jd<㹵>"v4)ms~ӓ'gHSK4Jp/<0s)sI,BRk3dzo,r;TYM™hyya<3EtTkƤW\Pxy~?jgy^\D_8Ch$"K}Bpj78 ][Og Fgޭ 9jbFYa/6@K:֒dry x_0 x8%6c|o6aƊ]u۫'W^*%-ZfLmlnałe9/2卍rXyoǐe݆,k!;!#Vɘ"-V~2E\̩., c^i0`OhB,^1UUKr!"iN,$&27ZZ(shEKcNY'n,YZ]gkdV{t'QS.*{o}ZKM{u0>V' ޅ9 cg4fd!\RIhh9UmOh(9MvޤRX]`)ɿpOi=]JUUtw=?'\[uzeU-k'zh8 ]B)uנ+վm+lI.O$m+DdOW+&6)6Z'CW ]!ZzBtAB+,l2tpKm+@I)骃t%$-(uT <BʦBWVQvB\tE46WoʝR샹|ekrTɛq8(q m0X0t|1wa**J(!b6gKT d:zL0/eK]Ȍ̅(6 e8 OJ90e$@:6 柇) _*ftz4O?|-`x Y-EaNF˄ YZ !197REF:E*LxLK s B,:8JIsdV!z+;s;][Z+.QHʭ91\E%XMIϡG^(jC3"TaaZ |zqKeJ7XPel- Zھ(+JНmI*T њJ-{"]ar#)+Le:tpM2a19sRE+\nqd+@骃te5!"LIխwH;^ՋUqzj-|-mt%jЕj_bJH0%*BLBWS+Dz"]1C d2tpOP t( g=]ud:tpөUUtAj ZHUt(Eo]uÚ2_f_Hq9 Z m5m4!*=;KU*4hpF4AVV2+lm2tp9c=z(骃t6=;x:N0XWV.ҕ?S+i: Mck*u;^" =}&z Ԃ{ЊU=Jd =]+zJ^ѡtL$&B\4=]u%G/(]!`e++DkiQZUwSZ`-T2tpOoV !t(Yo]u0yl vlK.Om+Dٶw=] ]M@Kqʕ6暗;/cH_%Ţ,4tJ 左Ig kyZe0d%ZRRnuIA*!Sy+,*vQ.V2:*e=)z!Pp@}܊IAR:A&CWThjvF4A褬Jlҡ+k %=]u j6${ꜼЊ#J;]+˅)]&=;+qme "R;^nڰwU:u_=`-D=m :V5Jt詠TDW) ]!\S+@{)ܓ;tń-xP XW'c]!ZCNWgHz"]ʋN=M$Bҕ)]`D2tp t%$RtAJ\PcYF%zͅSlkxlqKJԧ_ .<ɶ44AV2\%CWK ]ZyЇ&;t"/y(]!'d-5m+DIOW+#6)9$d]!t+D[o( 骋t5)ś`Pe]!Z~ tA*`G30Ӈcd:ߟ ߞLi4K%As[ݛoΠ%{2ˤJyUf/TBo0.!+GճX(;NoFwMlr=fGs^ybcݹ-/EeVY2<6;h Ւ GH-0嗅AWTedqpat~Bcb)?(mUAXÄm0nX%o4r P엯 'YAq[fJXk6 ޳߯U[NB^me9+ V|aI}3F=wiVrQ~?3Y==|m-T1wgDh%zvYǛ?[Z8Xq˘^JmcնB3Cpp emyd\JnxzG *5o"[ŭ:EK(!TM R-)TQ2ioʃ|%Zn)Mԩ0l2vzآ7KD&ݪ765[opDCѾ%k7H-zcp 5$܌рQK^#ɫDiZTqj;&/ glHcXB&#̧8^~Y 5H"qt0CorGS~]r\1ݰuI YI* OnY% ,ғݘ?8&,}y%(,!ΟTP)o. jR-#vC<N{C"D+"7?ǃOl{c<} Z)z-v'j3D&Cr-Kw.5hR`y}UbH[(},"LZ4mr$oI)L>TEW1Q|ԖCcŘ<|)ݸ4R!J#;.fָ~pdLb/7J-n4­ A!7ʐc"SN& f|dO*\txٯ[bl>X~mg̟.md[S_6me vʞ"xo~1md$Q (L$JQV9%D`/1mRWL.grN5/7D*g\'7'q%(>rd8)1#dTe"g2~ibISR Krm6.tx%&ٴ͆^j<)S;)3$Us#씸4zg"KF%,Z yH 1%KĐ( juF!>@(^ G.9Bf΀fCN$3s $G&䒘ͬudvS;v\|b.zq m՗tqe^ձދTl$XSzY*zd\lvKu>`jx嘶.ĩym=K¶^ Z2hEOey dYQVYDZʬPBbYrƊVRPƫ7b&1T]8aq{%ZV27KQ|,k/= W`Y&ERd\'UR1 -6,xT)|"Z ۝AŮKwx7kBHߍj~7:45歯Շ7tßߪ&]3dVU -Yoo_]ׇxL^wlHE^A`mB)$4u4150y;Aҫ^N$֌(Cvةdv,Fq 'Ŕ')SCqA880` 7㎖F e}񐳚411 1b{i3P<.jH wGXiXAK &0?#QB L@U:Z wZ":'ҦX @`@j}0Ƞ C0f^2F"vI8p< 0; {$0sjhmQ((L+ & QE;-?&X2uWS9ɠ ̱eX:ˮ0(+$B 8&RSgd75Q1f٢k6}N   C?q>s 'D^oޝ%@#!{Zp;[(LHİ{#Ċ*9ekX{w%ZKȁ8u I:ic 9v݊ ]y{`NHTD1 qY쁧@p2qC)da#DžX|}_!!l6%AyV{e,:h _>z9!)AJ8q5JSAܱ738 eۇ3Nk ܆(H8Sܱ#hL ]¸,c_!;^Bd`BxM4'Ϻ.zo[l+2Lڡ(ʚ'YϦ5sZlKf$ Z2-3ڼ(<[>Ǟ49QpAg` &To2wlm8*9Hd(@8KjIexms9:zcc\Lr0H|rpi9 a'׃LXޟXɦ gulu:Ӄp `o wlwhxfS[U?Ftt o5ƍqV ;``Pd.i'!vY&.rkÅ[' wzj'Y%RM8Ռ ZPM2$By2Y"1 Ux``Ѭg11քa;ni `tpiq0c`XiLP ,VAF1 &swؙ;Edgiy 90{.{mon f `K:Q4`NF8"q` $JlzwkBs4@.#q`6h~vc԰\; 1Sb2s}Ée@e$AљX7jC=S`|;4Vr6syYA중n;2q\p ɂ`"KEg!P g*N:];j. r15Dod,7d`2E®q E[}.vܼu3'q.v?v2po$ZP*wIA`B!bc;d^2aW)DVxˏps]!Ł@\R{i2d\Ą{N)m(X?:NBp)`|V5lCzj.\Y>dž2L/Dn||BWO X-nÐĝIN]&J.KPFyQZf2yUQ\`STBN-1 ?%4R/G#ir9h~} Qm_ymףt> ]?ojw蘎sH"{5!& ‹Q8@`J :k QEu221L77rLT"t ‹`SMavHkMilz>ZDX(!F#Xm@`bǮ8V۟)Fڟ&*f怅 e,LzZ!&4`& lz0!YIrp{[$񾩬6٢m*Yk\,G7*ƴe"D)b_޾}"?YK̻W'E…fBUXh.@s:"ܼ,__$hսBrA/ʂx$Lt$>n2˽LI{uDL&Gz$U(IQ 7FTʥ!y~7IPc/Mo:,h0yHgFWGgTK:[`(?6K^拕麥}-qXCB?+0 2MYn|I Txj܉d<FS/zu+?A,9qݻub6[u Ԣ}ԙPZ7dE7Q ˥\wޗzoDxCJWK9_YԋLOQ}gZe []}k }s̏kD~z'֏P?Cm>ɡJ/\aړo⠚峁*ZSI;MTt/.wy0H h}o(z6[?XWbv*r_.Eddc5S ɾP;@PhGnLO} *L2K~,?,|8[ޓ6[GW*O{1JKh%~yDZcApuf־VH_)!l`:1u2g:Vɹl.R00L]"$!_3}Pi:^@?6U8]/Ar68czN]`{l!T\35?U:p7>ϿzTɮ`1є{X] J,yx!L.Y]ޭ>y]z5'osLʌ%7:PA\}b t,ppeRzT@|l>M\7pVwԿŸf+V^nZn*k_݋ƻŋoǟʿyrCxVgW͆~sgԬ$,;;^wyspcs;$EcY"42k)5;{Yw*70 לǺһqcZ3i@on}|>AdެnRʇo&@yŪDtC q~~|)V.,O腄`y}7##0y^pt!E|Ws/1Ԡnl5콐p,*nWsG9:Xv|I"訢xPq:C<*Ғ7?CPܶڿ`(t%WSoWJ@/"r77/dͳwx#5‘:G1_7w:Pii3n&,HHW`$c\J%Oyc$ %+Hb:^.(M(ϕ]:5Mf\-ow_n\g,WY4B;D(][o8+A?.yL`yZ4(%9-_TNJ-ٔy#{0>IaȪb4\VFessoW"M#Jk -'45{Fq}]wqXK MQYP]*W/%@) X&OlJ?L)ԓLa<X1!h45naj,OܕT1RNDoma>IAzҿxNȦ3V_gyfV@(fWE)TpJ#F~֏{ ;v6^_xu)V-qέTPEπۉDj9T͝`ǢZLmyQ#/~&<Լ[!B!)Xx7NcCZ &W3Up K0 SHW 4K_ZUŴǐMb8MDFsm'<&JbPI\v.E="xgҀB5,g5P{z~yJ.+;YYe B1 ۦạ~3VpнfӸn->pPQ/qz2K$K}A<#ߤ 8&es"غ4K@'U՗j.Ok>{ɆvT57xk뚷xRaJ 9oPV?mPń2iLqgJڃYmɀɪOS|.9"&[`QQ"[mHH NF5s/ e$E*:Fr."(r.j=eBZ"6w/ (橖޳G^1G1ڃ>KYUݬ3$itALr{m_/R %A~7yN 'jxf+%ЙHZծfZksa}yB&S;#CX,?=|4 LȅN(&2-K-qi\V"c= CJmW׌ aykgil4WFJ{ĄUF~r%qp~2G\]̀ CΰHWiLfX0͈˄7@/UQEb|rO{8&sWܬZX:~j *QWH!0ڌOםI EF8MN KH`(]:@ӌQI32C1hCA"$tGoPz$ދs*9 `J"@4d. 6֖ }\QEp~xf 4FƀV~@\ ae5%{ Hlj8EB"'<,!z?_U`UR]%9HR @QHƸ6-K%Lm2xvB n/AmM!j;&{x4Nvȣ&TjOOs"KsAeR,cuI(aAcds#gp5|H;Kթo.edơ26Nk"mODB%{d&p.yY٘hQLկaE:d+@UiC碠C"!Քvm"|յ/YȰ~>5:bR9>olGQRKK)  !CO?bf ɡY- ܜ%VOn2.]|lEΟgյ`fY5,u:OL$]7"Y^+5΃KIQj#2$HAB+lI8kp|02`jf1U&({o:\H2B]3I"P祿P(au67XD?~To睄I = d}BiAЇ zݝ 1ֿڂESq%᜸\uȸK F<|36i׈~.ӥ:uȸ?oQV]9 @6hv*:d$ppU?=*a1nv}Xp)!cn>d+ӣ֓kH{:vc %Z( F/V Uu57H@oO 8]8v:[|ޯԝΎ]6GyO&xV+_vٻw웫ev :bk&RrA`XN-uA՜[dlݥ266ن}x(Q]e#˭HrƆtpAoN8VgLtװ2pBn|UM܃tK$π̉ K,+rC~tPHqM^:Np>l.g;!z,L ejk SwBY'CXܩpiwQ)q"ϻ\B\&G@#HX:&sϬS!4`χ6wfR*a_9CI\1^pQ$Λz!9K Sq1q2/n˕&{ul.\ۥ[кؠ?+wF5\W+sǿRBҋ|؉[;?~@S/}QtJ~)Vĩd;ĻbM ˛F)[2t&_# _?~Y͟yzyoh7jε暫<>)_!?x&Z 5Ye3?WV an<3Mv3+^o波%S{5rsN(JzMϿ[߱Zov^< vmC~rg)\ׁ/r1qpYZNcqU] ai??yO_^<ж'e'ŧ,֖m3en ~;7h7ke^gD)r z1^&_zdވlPzUb~:qNWZLEV|юؠ4#eJaEk{יJJl8eeC(HuU&^K`pC@S+cK36yvW9yxiW6,n8{jN O+5 `GG1H2 |QX3 Oxl!s4Min4PZj`?wt% l?S;AyᐆSh4.~=T \\I]df%%5\V=FA#ナ~''S7N7N9@RS*" V҈g=$W-ϲ:c#T[;fxd#6TV n8< !xCh/xàxyo{LbB2"yh`}D_^ʩp{[ {F@xd?o5orחE* . KSp7_-'焇<[Wt<ߗ)i`P/o ޭH_1f$& vٹ+]zM0̢-xSf־AkUەx. x^'f"d/u\€>>BȔd|,*mLVJ0ƳԕUUV#y>>HHx%Z Kehp(̚0Eos6t@?L^N<iϜ^Kع9׀BL/^؈&V=}}Tˏz/yy?~_,&w3@e5HWXeS idE{z!۸WhQ:=yD_:x)JZ~t$$wO~So2ŭ10MWuSd1{.='INA$ըtn0ͤF(.EG]ĺmN!55b#V^p_-N 2eJr) ꧷;." = Hx4B-eDQ Xm2TLBkX@> -L; \5G:ڑ)}LTFBeύ:2DžLu1Qp c edB\ZJ#ˌMϱ70ŰMq[ <{\{GݬQ#kD^G},&{HƲkc| [Àa$B~{"}k_m5 C-2")Ovsr:d|sX'׵nCcYM5L>^8{N(\ Ծݯ$G/OkwΞH(=tmuŌ"xw٭ɝokM}xJPӵb.5zwqϥFW(X tkŠ@e쮍"GqԈ+5l6ꋇ1o}N$e3wuU:Cu.iLmZ!qi8+u4GPrwl$b ;̆3b:4DPh@iQ<˳ 萱(T[y*WyaxyzuVêyq0$tTF ܐ99d}_I&!)/ \r ` 횷Wk9]BlMsّk#&*cJUf=\r]i3c/o۵ʨRf@fQuN4`gO#Ѳvc Wc)I^=3_iHh<\kwK/rP |tj_(JdZ" eqjje5u2tΤp{^1Qg#\`H\ZrXpЂ'$MuH5>ABW h0yQt`*~Xw˦yUgHg6Cy-ϒyH Ӑ`pLpo l z `T`:>Ιf)RX;7{hS\3B-Pxfu= ֦6ܬVdvlp3 4WЋTHl\B +tNi.vvU֩bh|Y!&fmx=6'vV=(_MJ!JzFI\Rg+p?}vxre,hs0L^uUK=yMcZI(xX M@gO5D0x;-8Mn4yҀ1W.yO&r9]9 66x}u>=Z5xrF0yq~XB9I9y}Ϡ;4cwҁ50 4cdWVКzO8GB)MOe0G~loe CH[pÄr63 Иں8(rqٛ(L>GL_{w6*X.mVtyJ5^N' TND(c8x s|+r׹SSBSDq'YLjETJl9"_VRh Fr|f]b&}1p!@![>KU x5! !&Qk^m)WQ= "9wS)J4舦~E5%`(OGP!"Vpgw4l^ ) d,eUBV6ǖy#; kʂ:-h}(2eH[097X'j{1 ژu8gfjV6Q7+4&(op q{1'aAs": :a1/>}`T7 IW=B#FF„{n qQg|5Xa{2561xKPmJH=Z Z%g?͋c!![:S+7 [A NB9qՓ''WY SљMߘH)X#qHgEhĢx<{pk+ #+)&iLpKNv9"uOi'zh]5T$)s/enN'I 7\_xUxgT9naßX49Y#w-4 ~{\|;ޤ֍eAצATw=O :<}sI1r~0B;y ,XoB߰1#ηLREaNrަ@&{0rBr}bɖƗo;NVEweKZ B7tv+.JcūqϼZT[y#Acyţ&p,bdcK:$B-r##};`5'oHPř,]X^]@Z⑨5zO&AP4IDlbgkU倩>wG HpmBy׭eʅΓ2%T]>]U MjH"hh};Eq@=P )PSmUzԌf 4Јz㙒 WWځZ*뚮(%F=C^#b>_?8* ,u6#khw*xny_8o ^p$c] p%X 5\l.l=9 828gf׎2K V)OQn+mѶiMgPz.K3(yzOʃJ_(< C 4bCPN{2)jlcsh%?3=qL6'`K*N7@оƵ?J39Vq"NS ]8"9Nmi|}&$[!e/DrH:[Lgt&0V_/Eur7@Darh1hbaH#JkUouHtN76ظ'ϧTkzr7q?Or>}i@u mo{P6O_eZfBSY y4@^UТ.n/?ƳL[4 ,rp0#ŋ ˮ 5| 45$;Qf`)Wyc_+Sy7L= Ҁesݷ*r.UBoFoYA;whTtu~{$[盭$asWuݔwcfoآ钽)aDLc"oN*-MwJF+_Aö`NI` ie!m*Q6ZtMvݮO; %*]tZ.r喰$X ЦԏKYfPBkZfB):쿛Ṿwm[d"p$'Ҟ a*앸{>*1 XS[ =#5]Npʫ:y|IW~ RrpTU*ZnoQ@ ÿt2e}ZQZRx@]C]U(?WES=~gWG,[|5CÿM#~zٷ3tU.0éU("'iJ PdI54VBq^cmTM@4N@ЂQ`z-/ ~B/醐r%\|>kü6#7ݑ+M;*޷a2 o3ޱƈP/a{'[6Lֻd=jJk-"B 0]-t51BBql P=vN a.;v{Qj9e5+ "w~+zm^$)YbA[hOɻ q3px(0,DOk#W]f)GLFN["qJ_Kݩ5A 7S۪9ZF܃\&y-'6s̻.dpw>[{ s0Nw>4:,0wsUsvpRN<GC>*sou(zU6km+uTѪ[^F,s*UӜQV~;Yis;_#"mZfww \K֝"*{r>dt l5jrHE`ަJޖή=_?hGh= [|qb:"swl&&fsy^Pi +UnɑMsddFPΤBJ! Fh6 NG|z F1f@I cXqj=2O-ܴٜ7OzWjG6L$8Lg VU1L$Zф *EN}'\Rۢ+fX3WԦlٮg b^,NrﻉWhaÜSO&("E_u{[#m͵vN"Cb=Y :TOJd@ 8%`QDt׃50vrnKm*7n7 qyKE<+02H%IRh@yK -R;+5G}T=`R@Y^_y&}#Ru7_}\.vخ~y]bBIp uu5|v̝a?%-:Ɗ)E }(TkS/Igqof<(pYΙ"w\jS?0%/ב8Ss z~OiDuĝߟfi|/_ʿ[ yql\Iϧs\՟ϕS$tRx]E]"Iճn.ߋ|OsO%eGN?{ȍ_a\g,~l~خ.9Wl'RNjH\DR+'HIf@jEOt7h`4} ĕz5uh!Ad5q.(0ޞ}54 ؽB J֝GHQzumxh0M6@D,*TKb݄0V4VZJ:m*q?0N;Pa#͕@YSL&:qMZdhR7 ՠ֕`]OPxt?,w2*ʘ1i NY+Lz6DQW"||J|S]Z|j : w<߱45BkNYc"**;=M$e+N=j/Ě SE̺Vkhn{ H[0%%iVܔt p2 %JvLt3d+뇵ƥzqSuqALMg ؋AR3vY}ۦʂ$P?MHݝաܝ6֮)dLN9wnRbq/)Q!HYm!vxx)jה|n@*ʽћ ()1w߿Vrjr j ^co8DŽ|le@aˀݽ&Jk^4⹮_mG9!i^d8݈ij(~#~? eG.S̫«­j 8ax3xηf#A \;hR^`! GaRB 9W2mQ &jl|i1y`%8v\ڡYDSn\6}"?(wտa-FP *ٙY^ &3 b٧A O9t`bU]ImQ"IO+ x S*,S>M0xvJuJ#RdN )cp!@(%~)wO'c8|H i*'kJuOB\zX^h';?# ][h`N[7Jט78tkۿ2nL? ۥq֢q=2Jp䴦( @` XRYow̘܂A~] ACAQ\ q AԪRSvd2!3eG `gӪpҠ BcvɄSTQ:ު٥RWnY2Rť1H2wKӲh?F14>wkpDU !߄zr[~nSEVw!"X6m RwgkF*qWxi0C?~UoR}fZ:?Sz )R_ɵ+ǜ51#\-z=Fhj.ޣ=0F64_g)2TX; )qy8Z@^a5ZZP"e~{ٽ|kEq ?jh]CcLv]P?"uoM퓾ie Q;m⧽x"Tºan΢p ͵љh||RR5$:9%yWSϠlP3F (UxTr#Y-D\;U -`ch9NriWBxƮ9lx!lr6D = NxmqqZMfVs+6\NV ]eʈ+JƲ\˳kҒ`y4Z %jTǴJxTMrW a4ȑvZㆦ[ۧW<:uaojzS*bPԢᗭ!+Cۣ5\ Z=k#ǘ 6 w=ZnHB:_:EkhսfSJv9}mRv=Y5Q@K 1*ug j+~A$df [w&|݆v`Xzcڄ=ⷠ2R EȬ}ބ0ҵ[:95M/$`1bGT)LUudD 2TLmj%e.ҡsHùg}:3S ZBd1@ŠQkx+7!GJ W|/g`]` 7Ùht֠߂1Z k+ʢצڡ0U>R7Z*^!Ňv v- [yչKi)3i?;Ȣ+M:DhL0i] ,p:k^͛Á;L罛#sauMoCղ4z-7z1g9+ψƬk+o=~5aL 3;A_ Б Kǔ ;?06-7Xk1W=&;cm韄ڂQL=kp))FQz-gzՠۭS^ x H Bl 1[}K_=C5|x\Og kU~M<$H͹B,>ǚ3sg8gK˨f<;zotII~9PhN6d} Ukk4&8Ri3;>5&G|̌f>ް]z*POs 3>8F5U2(iԱ ɷӠjO <Ǜ+7?> HjmxcZ.LJdtGqOQ/E|r?06xKג I(PEB0QbKtBxݠB{Q-GCh^EqsO@|s.6Xgfd2j0Wag*+#MI J 0ַ[͙J: c}1*U ^mB=_i6ʅâdCi ro [n#䶜 W-1J>Y:,5~ Vi)vAAH?^*-[QLV`YAmUoW T*UGDn}M+*J82;+}ciR +7hr3xG[?P0Γdˋċ`TT&TzQ&+ Q3Tt c%i -Ji%z{^NU+e[G6*wuأ9lI:)KҼR_SKM4sgSG>-Os{ awXWA%l^+$XsAdQ >F+g o*pp_aS ~-xdiI,/1Ր3Z Fċm .yZVgc 8H׹B;>ꮍ7|B[^*45`Ȗ# ynTB:l1NTߨj} g$ù93Z4C5.PsP*V:-1 tƊ3jEIs4_Vg kyT 4mx2iKjv[#-;ֱ̽nRSV 2JfiVd10,$(|+_Viqsd:֊sa#?{Fs8Had,9&mmXrn(N˒얺[ex,fYU.d‰LVlIU VK뢯2ڣI6To6=&[wa꽪@(J[eI״ 7GߵQi͐9zeVc{he\\4"gyG_Egr^Ђ#U9 s<Dz۱ ]"w?vU H`[b v痰@PFsaGx 9AQiz6F2TiOHAD(9_Ẕ`G3|9k Lܥd=oсBH:$|[HV37q4OW솑ְ[l9>SւgTY`4)ia٪X؍20T @|)̵Ixiց9 ->yM5 9#SBn1Dʽ=t+ Zmc)p=HJoj|t/ ̝_5}`z,1~R gHʋJs3t֞lS M s<۰pYtsǦM0xghpd3x,WfN[lyZnH4Ox xZ`O" Q[:6a|wQnst*PұOCsf#߲N,)e;xrl9n+uGzqwgE}K1GgcZE:.^toi1褃C`O9ׯbAK^2V{_20>;[)~)\%w57~3 OƯcE"V[MOUFiɗ.\ٵgIk.V OOUY}4A;R{aٳmHF! *HP\LZ]|>f oB7)Pi(L򁂡s͉fzweNI i/T0*SD%JT/>Ox0)IQ-`DIz@@X6KTwQ';%ݩc Yܦv,.-K"S?J`(1V Ӱ'SRz `qO,+H[3G١tUI>ڂ @l(FCdZI+*ܓZrzz Jж5I[bJTʡLS`X$3d ta,.-o% %F6A?/W9}&q$l/)MӊSr]h9p]vT ~ZhIzeMϤ/>X ڊ ohUbwdE7TO^q'At} ّюsR!tK2y/Ff /ف!R۾0%32J}URX?=\*./%OpdNJj),*9[3 ̔+ x@fЖM<S֒%b"ifŪ 0;J0f l93.vvõϷ+賛ӣtǯ_/kv4󌩹ɘ(sk寯Hm5>ԨӽSڲ\;bUᑊb(%.8S@A]{F [sZp\sPqVhWZf&XT ܟz>h1 r:uq29΢mz=u5yz?ЯoMוL__~ӯ;{tw_@l ~ZlൡKV2H()x}R RBÑ]*HT!NiAbA*@g@j/*Ҋ%kⲍB)#K|Jςuɍ^r#HTR9 DĂ [+ r'ܑH|9n&0L۵2UY]vEY4@DžvZ!kt*(n&ʑʮs*b,.n|34{}. l*}|4{{̐?֎˚&Y.~6:X-{ki[u5tzz6>bw>b֯bΧ#5(&a^v >' @"NTtYܔ0 _)Af_qfT)QLD27K'Rr!UF$O3׹LXU3Ys4E<p<0WrͲL jh=2BLxi ҏH&}>w@Tdq<2)hkk$@ƴ D`A.FZ!*2bD[ÕLsZj&wZSG#H#t8x,G%\r.4KMf>FaM$2߫}hrdݛvM-< r:GלHAPgc2<`!?o a]H Uy7Z[]IaoIPq,@Y$)9 0f0nL hX鵶R QS]hFYN)DcZyazE?,ͷ}&۽b ׿ K>{Ew7O3xf2m}茄ѯyR d?_o[^1z7c7ya~}~N\y9cXb`Ru2+թvd N`g";rl?gڍѮNCȑe?yzN> kkzo=Hvt詸G23=Q0HnVi*x Vɠ{任Gk^G2yގbV5{ݣgBosTE4sZbTr`uj7oNS;FBxMN0j=/0HN:[}H p -Üի8+N,S5uzQU, 2p  zZ'ϖJ1rŚ:&R}*sܲԠ1Q TZ(,_ERK j-WS\@V^>7bR5l$ru:%蚼p\5Ϝ" ^ "dۺJ>VO2;mmez %y@edBlUTօ6i&&LрZ,贗Gl4N7hif1%n6!@&SYM/P@;!h*bRU _e1VtM >` dlFeJd:yB}qi̔]Hx›0䮘˺P8 Y/bAbqV+ b[Ej\1 2t]ԚL5xך8Uvb֓tI዗pFF$j Zst& YD iSP2LR"aD\ՅXr9&e %Q݈1d[-71B;(ɤc$[Znhw̾EOo2.:`76,?:'{l5Nf%]|~hikفSlR~5&;0ysyswgt=5 ij 2V[+V0b3V>ZR DК|k^O#28u[I9pncx w4~|;[6f?~/IΎgvmu\~82b;3[a׺F.G9^/,{L5:~رtl% x_7p~.Jo1K"oތ̈@{7?񋬒O19B<*|1XOXZ'8eew򇳓 \.ݥ9_=}& kкÛ<%蓧dӔ] 7y휂WU:5+ܙ5)l|{:>5gfst4\~;'jsK%-F_& =jEBKR&*N*x7toZNkj3Hr7xy"CnDÐ-LnvlrDcv ƑZW+NYlb2f1"wK[cGyQ-\`heP+.>?nwn7C<9%N LeeCzj pj'-D< UuvmUNJwY@;8=GJx.vr~y\QA"*놳S2{_lM|te%oWFvMvHBGA1 '~5# 6VIOdS" |IuP>7sE5ɿJi!oD_ E.]zDod:<%KLoBF*v{7Ǭ.c I Z~p~l4[̆3Dgl¡9֎I?Cs*@B+ X9ExhiVjG  aS9eku2օtP}P 7>47AM}*~pl; _I Ië-ۏ\Fݔ"3xrβB6-ҷ~:q~fpN?>]),6y`[_ ~=M:yc]9j1)M=Z]gŽ?bd );_ah\|*K? ZqiNQi0\ YNEoytvvS^[[|er>>m 3p5}A'𪗱(D#,Zs|/cу~fXiּ&bCP=׊ TL[~ns(RʯœFڍBuszp6r/'CJfHEu2Ӻ&XFMdO$OtT!8]mĴU)slXJ1ʚնHփ)w͍e!RB! dq~yI]6ꮧ5Rρ2/IH,aT y a-k5v(F ^H` B-1Mdz8阽)D+{ huZ 2ʩu x-ZECCiἍLu s:3=Z@ &D,4kf1Ik Bt-p@IgJ,(2Ge+eV!39JavCG>3ig&9[^0I!$Z%U &J(VT9Ŗ8c&erL}W }j,0 [1 :V7jH_8$*)yPKRE'j G63n(qN"C/@H IG&Tv%cYmlmylIEQfn$6sy%J\Vn6jTby Ƶ>Cm)AYjw[RlQsJ a#Q1RTnc}U=R)3SV "DL;oPm~6ִYͺ5mN=G*tB+DkΝ "VGf)飙B7֑D _\Q. `77:]WV\sD~><3Nze/0ٗ<:zM"rA#o36Ʈ,~ ci6a,.!G)WsPװT&;7PS(˧e=f8Apxf N?\5(pY<Sf@㚀uzko1go]*#'$S&!^[qD Y˺"x3ĖL23\wʘ1u +#xA=b>mYjPՒ< bޙ1ת=.[B~TCۏ0qI+z eXQL7?^Ի?ӑedi^9gptrrM)b m %SzPJ 0*\Q֖)!$'79,]-'.|vS,x2}{R,+%b*™}SYI=}c+>KnwIJfGEB: J5~ A h!2GO'4IgBrLaa;P| Fh0(Qu#JcJ wrV?yM^ \SHsk٧ظB z-6#QZҬ ȒdHeuk:yHc"[ wmi 5/(Ss֘ !7<'"1KowU?4),+Qǯ??o\7ALOEͳK;(3c\G]=עT2s;LXܔ/Q\E]8ޮbX-s _pOy@K&ꜛ@ 37_8}k]䆫 Јk+5_QS!2bVpg}l1PFL2}.:o n"-]'uu6qg8'lkf[5ڭp9: zdrzIHjצQ!M>ԛ]˥9.nV_EQ:MyYN%d$rFM_"a1_/}=#?war67[|%[Shɀ˺{ش\ny9)tW")؀gImWtļA ]%~? -:3*q)fY8н>f?CBb:Cb >Fw{K ~>R97ιSsG'[Yb=H6€/u]~OЖՖ=zw,)g݀K.welbjfZPj@ ķM &(#1YB ,mtCD{6_3HNnr{~8iXsV?_0{uzo5/|aP@kz  @OʰUo/+(S[3uX{< j[Mnؗ ;R܀JwOӟ?{uNj߃wrw뺓}?1L^_Nh G :]r7tq:ݭ~̥-C4^qqö(\/~bK^h+-dkc9r L25jکjbi Fzb,%', QO','֠wȫ9YW',>w}I2|B7+ *>4kvsK>E0p|K|Дr@Ю\?Ͽ^S~)ۻ#yƦ\ꁎ4tIEH}ۤL*$S^=YjTQKL(qlW31GJwS.]D;~I{vz?ט J.a'ay:,oie:젯m,20ݴ0!f?Yb-$&P` ?IT\@OrOplyP52 {D*Yb[3̕;\\)Ƥeʀ$b+O2s+Z]_7lǺs<1>'-r4 z{9CAk^LVCdq~`5(zʀ7K Ιv~H$m&SOiZ{e /3^LFA *9:V vĮӔyĮ_]#ÉJG<@k?KgfOGhDS}c5@2TU`~&D6bMߡB-6ɏXj o0$vQd>Jԯ]S180;UuVX,I? @7H#>Dq5c9+:eS+a%l*ܜ_K #kSnǪROt7VM>D )H[Դ}sswQbC럣r~rk^2S~ g-s+1H'C2M i@eb()Ѱw-I`<'q~ϿdN^|$@ںv ʟ9܏_]Zn@$W6hG:ӨA]VK$WGڹlAxt%yu[WKOBt5͎si?؊4R$8tH3 | ccBA"*ư8E6"sFl?o6ҿ"\k`/i ןӆ ߷z-%85^AS 9n{7?aeB:iVg1~;ƻ/_kPTMƎ(rs򹉹^r]TFEҥ'X Pi)?|k;J~qL޼1a3YأN֎ڞ'k?7b`YFΏW7$QT?7ۙ1ի&\2WT'nkh)5ȦAޗrf׫8Nj/ A3 (hJ FF]Qd3`G6~o/z,CD ]1q'`g˭ˡ.^H\Iс@XJ"fh)PZIAv$%3JMZ@.RSd?e>]]?kݏba.pJpbV~X tfԷRvW K].N=?_}$ ipVDd~o  : Ud'jHv=[NǛ /HX:a5xr#8bdRL_Qj־ /Pf2OPȞK:#{cCNlzN1}'A[̮ܗ; Jʽ晤ҏe FLV?%)7${ONb؟|L-q$kV3͇tg[BE>[[>]e|i3 \ pT7DƄSY \g`g8peWyeݶiN9_'N Wh7 f#1P} 웂SΞEA;uT]qFV:@h'G>yyZ&ZEvj9Acw,T!Jy 𬺙C\0dcf{RNloQ}PxT>:C^()@1W H/> `vzD.,/&aI߾u1shh0Svfr,1NoX) fe4f9)18Beq=shA85Is=72M5תK >nd߯!8mne|[LG#~3ox %qʚ49)vP$GPx<0:So|LsN-j<gg 8fݕSrr9 Eb1Co_|Uޞx2Q :%uڻJ6G #aݥ\9bb7{]Ɩk17qٻؗ8) 1_<7If[N_LJ'؇puc'NȽgn)f8o t)zvRm5ӐjкawW{.E$j{UǠٶ("!nt]$yspGpxv5`irA^@>Hq"3 u{l %1eb(8u MUhamPBu^:p j){#]Ͱjx4Z9uB`)iܑ Iɬ)t׋:*(n̢ Q1p'CsJW9twݵ'8{PTuv|c7uwͳX~G *1`]|6c 2zfN3a3| mƸ6;Jv4edQ㻓@Γ/;_> 000ai[71Ul+16 ̗@b(NE!qA@F.>y!.xXG9G0KnDPnMQ'D+M>k(H5jFϵNQF`ڲ]h/DjTreI!dӑ)(0 Xf=3N2@|϶)JE? i.n({PK>.Nҽ3:Z1 Nd Pw 7n{յDwL.2~25r(Ɉfߦ'j9(+nde1왪KX1!$@"9!E_@;mnDiznz@>[u@4r֠r9b4Gwc{;'?7D-LYlDնY"\74ͤ%I .&\]ϑZ}Xr~uYS+|jeR+8(ukrqM\S+>phH&W$ӗwv&Z/?r9TZ>DGqj(Uq;9lqK<HgA9TdA/Ӛd֜sXN- d'jMb>cK!`FL%T/59͍bwhr5ʫXe?L^DkMx9xT/D)xN. rA3mySԠ=Yq7y8+U}sJTa}]JM35DiXH/Oe:}K)FxԜA3͗TRobDN2=- 0ȱo$Glz0Bu, @|2m5Y{"#)bc%8k5Yb޾QܽĈH4ZLͶ0ˍm2G&IEb>#YtdLf쐠1KRuMcIK(Ȕ!Ѷp͸2r's(h/Dzg y6˴_9L/8pxNsKH8{4|ZfOP5GH A2< ruw'؟ߵOW#+lvJ*>l-Ll&s}>2j(Gy:XrtD9Y 9ise),qC?T]'eVØWSbOA8uǹgčoݚ,۸^'|S9Qvo!2$ fUo,BOj"Tvr* /2 U|V7.\Ӂ pU,?jC@K>.s?Nvvt3< 1B9ىIr|tKsHk≄/l9Ta _|4Iʮk⥅/vs~u0q$Ɓnb-vcFJV(@;[ ݘ94Q@ ~m+i#(Rn˅n#bS 8᥌_īzeTEU7vrmY<:Mc?e>}3)ͩ@{<7t8onr77\,SFq`%AJ^g\I[ h?Uǯ$h%A-ש'G'l.zr:LٱakFFƂaq5̜qzo ^>EYxQ 1᭻(?`ŭ=l\ezB-3# ʣ~1Dcoez+ӛo F X|%uEOPFgj,.S9vJC-u 0& 8E-fsc7,}%u*&s NڳQB{VSI*[T@q@ Dۥ- "9Yj-9)hϸxfyŧ޼0\_mbwH9Y")S/U0KgI{F+ 0۴> CY`v:h{(-$WlQe#YUW.u$I<5ZϠ0^F!AZn$g vHkDHx]/zyC]/6{yՠCt8QðPN0ɴx<1˜_c)e(@$#뗮 řĚke:3(Z-e'r듈͝ H@G!rNJn ifr,0Y2&ÏШHĿ'bJwq+tɶұҔwοW"$Ru-MND@b _KTHP02;ѧ`y,"r ;.sex]apKT2R+;uNM:'Վ3^M]BOq;T;?@EUs}D}Wx{Egֈ =^yտl&ÁG/7O%D^e.jjϑ9ZHvX Fl kQql ✴Ys%(DtN(ƈ9Zq`Zڕ Wrs\*\Jptݑv %Hg-9¨r]>הζl2Ɣ*~'n~ƅMͽ/:=Erx33Q Gap7Mfь'r2m/)C$E=|gƗ&pD„+)pH)Z%+c?*DrkuBW]xjUDqp5 |&{3$e/O?~ 7q(׸+99hus(R!~3} ~EٝXd|qps5ibSe'Ŧà_fQ 8E__{ąW}83<} %{̾>)/Vxp7sS 3>0F Y8$zҞ%«}!4h HKfp)讀ixŏP<Ӝ % |FHTo6-X^au*;Y7F۠C)xRmd1ʠ5B<(l $ V)sF|b'ӥ޳O$?-YK.B: k7nk8g`gukʚҾ$[#w-;ӯ$s NϤUΞ~wsQTxob,2T{P$x=eUyyR+j!x$wk%وD7kd!Lte;V¢$ F$#ŻxWʲJhB&~!/A~hMӿ)^])F^wK"?xo`R,~AxArL U/#㧉Ԝ<\&; u $- OOGCD??>G%=|J!.G̰7sWqi=e(H>M)TEq3G38_Q1$1|D  *eQjZo:Z54bB0Af}ˋHe}c(L/4юޗ&_ Ff8;y x4Gm)1^xRGS $困i) ZPր7NSlCH𞺪}82k.DBǑ <b{"ޝ-᥶ZKyْq+{+_QG5*mo'5W{KMo`VR>y%AV+tęwu!'^"cr2_ƾ+ƙD?O2,93>Rzd@N .ɧFj}x8iK]J }㛛_ozΌRk-2? +}qqBÆ}ͳ^(P6;},g(:hc{q0^I)L<00`VZtЀk%tnI F ':= 0gӲMA]І+*kߧ՝V&Br>'1R+ cN+XvmY#i7@bЪ_{ƻIt_7INGy/a`뽋lxz0e5>U}^s} B9}_bBy QH4&qPJǧlI"l;E%=R*ڡG0{fWn5}/l~? _En96KoEV:xDpj S.RRzp-w&\IZC+F[Fc&U(Z B@K-A E (mm#4bn[C!A 7z= h4ި /5bm׌3m G_ 33Mgz?F./l` f87` A(݅ɴw Y G%g~'d~i{h6|A]<1`T@/ 5A2A<ߙfy";>Vfۅ]y 'lؓi18_Zg< K`9wyb.{a{6F?ff=s˾3Օ# ERzЯXRԺa0lAV_sLvhOptgaP=X~[bu7W+Y~b:Mla dNdN曢y>Qi@>pU.*S2hŰ2pK tpSPczDeWϫV^:ȮwY[D~,֋I=L!͙̲9k7f9'_R\'o9m['|뤑om~긣$ZH,}Tu֪ Sۏla{(a}hɺ~!nf@'2Vm7o'Қxw,DblV)#0WWy ;BIIo%Ҕ䒜6Q>\gZV=ɤ6qbJESuU5ȋ%;{L=? 4NRգɡg'ǒ}gGӛ٘0{F#vyU]̱*#\n6xJ{Xt0lšѽ4xJvhgwPK2 [tw8ƨ ,Sd82,^UՐXHD:N!gJ?:hPh!vo杋obwmK_evpC\=}J#$o5IQK왞!)bkWuuU2\T~ΒGӜd4=<-fx6SN^xO0ܐژ9aO7ys7_?܄d4]~x{ j7?E- ab&19}0Ro_ܼ;&JM$h(|9O.K$7)1f<Lrm_- `#̻WO`WhzD}VS.>u/.oa鳟ML-!_GdV,7_4?;lq5Wi n?eᄟZFr(QC:{Jqy*Ygḯ;PDt!%`$Gys9 AU:*^y" !jJf}!z㙯ǒK=sHUxd1UѐX1HJ@*-v; _í&Ksl'$(}fYd}nBD_lnt|&UʾKL]!eTRf;Ju|j$gq7m1 ߇N?MzM%(BKk*k!Mvo)SZ7.6Q"}D)(qiTpX >QBd?:;髕:Yv*9}nVJP$BMj 8T*[3v*;E~-k^Yw0f)03fquLJY!8 k*i}#-*@F7RW/}QQ\孙Wb{zxvhQ"N'/uy>=yvˆ:?I#Hߙg)K <>rWj%gKcJj2?n%P~PW~:yvYd:#L TWJ+K 6XaU9n.rO#(HȒo=`N~Y$gN(P28#T@l';eS({BC+7;*X;5ь f##%`y`B)Tj' !`G:(l~8Ee7;5/c: }O-o '5m6Uf'S:D9G '25pY eR A<ڔ2R&VTԌjgϥL K0q:|bNk}2? 0 ˰~RwQ^!Z$E*H9.$ҒrjtC6e.}Q\Bv>:S[s̕JQvo KBf.'-9Sw*cq[2}Q>csA@^R^ߟb#"y@QHCa=S zL>ʍf 2@, `w^ K32Jr0,0dDt,PCI*ZL| ՅVotJBJ#GS+()0(*+?-W(6HPݡجkΜ09pJƱڃ5_y>Y, .` 2Ff5epJ^OpJ.<ޘҔR/=n_)z 0_ !Z+38r{W) %BZ{[C{a}ot6MTo{ Ƚmv ^v*oe#Nx ōg*&SVX rk笊,dU ^UфQ笊V1L 1Yfń^N[߀Q-{Jb5Y=c2y\Bї/# M72u(btW#9dN{Rs((K(8,}diֲ1hD@+U$<Ū%qVR*X9w8n%J[fuD\Xu#:&lܾHu1Ea=ѬLĠSqޣ9NBr0/u:,i (T <@"{B$ dw^ K^ZCp}r@.L GJ'kE/):9ʊZR8@[GT&j@0W!%4"xG:tWk+m3gs$jâ0PDeE2yE0kfJ`& ;t\})K5=\\)tMy·|ȸD#ɚ}P7 7tD дM4e>4fM[UCK%JqϤ…F=>ū+ZܖV'b<3BY~ztgYQrYMoRF)àj>0zr750~jmy?vu:surX)t7ڌ.bYd@04Ij@z*e?, @[r{~1Uz F>pU>5},;B^ բ&?<#|?kbp먭pmo$;yojҶ9WFF:jj=EGi)Q$b_1R?*(l(& idfYEf+($QD7kܬ߼J-}?`AQ8)K?]27GFɡYF6nl%Q7%*PزbF ue@QhX"`/X?v_WwG`b5Z˃P: @ ΅kcE4^E•13 lϖ֔HITKaUӯJ ):XSEHRz/,?䈏NnOﭲ9-POS<\D`Jzϓ`B@<93$s@P}s(Pz{\$D@dќU&5t+ ^ѥ[.l[(F"I"<LJio TÇtjX^(RtN4n$ev@%rE^q}uw~ȧTS`8h?̓Kt#ӳ>trӰv5hqjF4u/ õ)i7My$ |? 8z\&avGiX6_<,v8̷. аewe~#fNc|7R}[hHZTq ҕj݂ZR/ j/ 6o-@5:T LW9;ڶgjն5 {ra<݇$qSF d}Gݘɶ1uk](p_r-!M#]7C̺Ո31L~v&RDڟ АHgӮ,Tr5YΚ Rc1AZޓʜ%A/;cp_(,vooH}~y 4cA]ć#ѯbM_0xم`dhRqV}HC hi rGlc"JQ06nMH' ={{CjFt`d~-\,B&p$a^^z~ɒ2b2ht Z:qOFe8`ʲ|OYcAzqD s On'~M],VQʇO{,)p 3%TߝR7wZ|:2I$~gב?5CO]'&ۓg0_O? x9~Q|ϻCߏq>@Ώ!%A 1CcF!'xf.g<7W]c]~ Ys_A}f\ V!UcW OFWQڗW@dL~O ~ U6ao@,'Pc|x=x>x7~ŜϿɪ9fUK,]6= vr{+bf\ĒGaw2"b:~3cu&0_ +S~ , n;)w=5tR7 9py-M{e-YeZ9}gmp؄=X+q`Ժ #]fbA #=Ũ/q/ v8KIu6ag~ ,x~1ik+syfggAV@AX :nG0-ô{vKB;Ή}^Lu.T!>RRΑ6>pD>l|ϡ9l9 q$SXiKtb"KRZ$1.|%a̰aqydT0MdKc蒍.蒻wmK6dK%N6ޢK2j0KOI"#Iűe9I#%(ntɻuɌy M6TI*XRR6P.;bc蒍.蒥b%dEF'i#"riT=2]!Ě㙡AK l~YF*%AԐea 5U (?iLqF[a-zd0̒uQe+2?)4'9N0 ϠJZ$z1^{W8m<{B0/b0TĉrLpXl۴K=N .TwBޘ ڙ!5UzuSw AP(I[k'm*at]{D- g=3[q$pWvX1sG MdhFR NhRvA݇q-aHzTpqw|2F8?KasE#Hܛ]!vflpߜ1&-H0j텐"hP5KO[/{OZ~?LZVB ulheͷC|x>H)Uͭn܅*_!zi-.q zGw\BXEw1X跭[o~&n|=+QV} q:u7YɌK_2җ̸%3.%3% &Tj&)%&e 9ǖTm)a$тX֟r}=۷K"/^eUZ 1B'a6gMw&]dV~Μ j̠=*H!dc dOP5^aFY `ՠ1X~`#no} .+/8V#NnutskR\dd[N~.>T>Vkz6m*g+MNkc@%z1Cmd|v~'Ql?ZqDGTp;jp;#L*b)$KuiЗa46\ fZ(5"RL%!c+aq왬Jɪ/_!N9tK !AlpZ߿|~)OG\.sΟp˳v@CLԐ4b(&:P8b'm?Šͫ܅<38k0U:AQR,L5c3c(e6Q)I$THFLK &Yh=~ch'?ꏩ}5?EAsһO鱹xNm?DBXX`-?"FRm"<21Z$)%Xm;CЫ*|%8W~cBwB;0#AY%]'0PJ#]b3Vz/wKVq2CjχPS%*RQLיy5 j@ՀеY%@ؗWu? ,,%j`q ")q„07Hm$02RC.G'@\os]OGZUś"K0JS45U5؂rTb )|VP!qJe!AuؓaJ(ߙx.$~6XԄE yJa!m-}(Ƃ- oVRtzt__q܃agĎsYn ^| >9}B`4|}avK' (lyݫsԂא\ւC!")E!`I)|b2,ɰe,/eh(<[8#K4 4qS;vp=>o;Wnd}!B b4`0WAP6*sN sMՖYSl:kTKwg9\)Aeg͂Ԅ*&iYXd[fGyhV4)KZ¸)F2%-Gx;{Cx7%u *K޼r2}qbLhUX:(̡2pyʠʼnDdžVHIgކ5"[3ʗ JmS&OtѾg=fW:5 k JEa]< Bfq JJ"DB4K @c#8_,Bil "¦'!氙M*@@15A|XpD 44c&qgJ-g ߁u~\ZJ.WsP7Wԍ|"45->K==^?O~X8Y0˛4xƁ,|.T}Gs?qv 4$zY{vi˜;̳1]XjZZŪ-s53^@f!8S#=o%k/>b*$b(:f.JiT)1%Q yƼvb氳G Z~tJŊ![Am<|aB7A ny#Gp>B@,F{uoU5¯F0US1x*56:B2M4CG5Z*2`# 𤄡B>c<սFO}Y E]Hbh鳩rwa.5_?]侓TsvG`<|pϟ|dhaX6 w q.,` ÝE}4o8~!xm ^K}<@X}/y^A +qIB@BSV*]{m2$Ĝ#7*.BZݾ_Ex%, kuO"ԡe1*Id̈'ƤX&Q: 1#ԝ6X 쨮 JLhvRT&FzX0ʍ`EX4L1ܒ:e?{Ʊ Ydޗ s M0j3%-I9oá5H, Kn#YxcOˍO|& QAں§F`Ɂa]TgJuS`Rb:-=Q2կKrKJ(2B=M?VWlM+Ap*ɡQj˸Kя́ 6N4KLV$}}ӑss*10ޚX]v9v{v~ V:;Vf.z񿮷(fozkPb~}7*~,Zb<<*PZ ݭ<5!H!)4 ;)) Z~8zɚ5]wSFDAA%ؓQ_g#{ϧ8Sh)hNARzZL*I:PB8+Oy7AP&{+e݀W:Q;H)0񏣖+8>o KOc|=|!b(@)_a>**N=r+ .-KXCqBҔVDa6NB#Dג! K`4F T帍(4LrD8u ,E?LQhR8Tyj:D2BУ PG}t^o(" !`pLLs(#U_ JF$f'4B̃{Ɵ.[Lݣ)"s1:5RFt<p22Rn0S,MD%xq v !F) ,hqDsEмE8 , `+ɽhPA2hZFhC)H:b !k)~%O&X 8qa0A=^qbR/3hh2]zB#FSɃ" )mj=3S >0N'\ ]FL7mts=vrEՁC-D4jPr@7sA~@i ݶJ#8 wn? !up\eZ' ]KGϴs1CS JiX!&gS]̻;lg,7yb8(%1QJl=.:kYksL9:`0bvcd)T S'5mrۮg @v5'G2r`+ha{ 8(+hfCA0:KK0<b^K`&Dm Y. wk:1ɺ`'U2g`AX<(TiC[Au%2YZ촄+ o 1~҆K3Dg%d ZU%`Ԉ!זPt-]B: k@z&%׀4/0RbrJMi&OPOR$c~J턙Bb̕ԃ9`NLTJ gp2MO$8ipz&-fc2\#͞ӌeo$9/]f.1=VMGeb% 0`aLJrܓK}#+.*?*{cMFQDh ( |HbnPd~-z_p'k?qNzUmoպJr,z;hso7hJk=\*ndB4g^壵{VE hu=)иsҵWRƘ&8D&WW$dh1fTS r4x 7U:fL~8A$>OMf.n.M 7>y䯳1 ;&΢%.'-78 SB+W:,dw/"<qSo/J@y۩AN>54nND:'% (W1gLܖ|#CS43i/GϬ>@*q7* B@h] 5_[-"jߨƦ~Ϫ볯6 S*F DH"_{s71Ed\4I4|^ܵ/1:Ժ0@_axuy 1ekrEc֊^U+\QfXŽ-kfSOJl A{PLպ?`è^SzH|8]d pOj/py`vV8U6Q(H>vSjz_f'{y,-%ѽzƠΑG[O=*(-s,۟;(8i8Gf­=S'sQ7 &3uYLs^*Ab ŀ4`98Qݘ2G.X'\AicyIgքϠؗaCsh&9!k+qDNM{j-İf}/%,b0Xu+}=W6K-)!ujZ]ʎ:D^&Ơ)/N/aN4H|ݼ2ԂPQv6WQ%=2lWSR&h!󘋷qRs`j@^FYEf|y`x'kOt|UbbsY2`0n@^cO߬b_c%yV"?<\E+'J/ b1[B7u>0^npUm\6reFb%<@c˺ ըytBVAꔾcu;]c?mʻ9nc0N"Nq#-ٟz ֭>)}v,BRqHs?`B,!$/I1 c0UJ.&qq\ .[ӎb #k#ؘ}lfC]03&(nO85O.];.]o7.vŮa07/m^w}2Ѯ))US`' yn|kRøbf}5ׇwMqhɽC6nD)zL8f\“(%|GtXJ(fnu]"5Fʃ_\ѹ-X+@ɧ/I.-Šo;P8]:ǴQn'ozP!pqJ>o˺fZR PC SzSf݊k`weMnF0ea=LHccmbe/v(pJ&{lvQԡn4"YBHdy)_15Lr6&6S(Cn[O[I<>[ٚ E}3UG'5}$ vXr\ UW\1قIo CKY.;•42tyǽVr#H DҲ#V %] iXmoV'',?نaj:GFj2˫{W1U;orDDR'q~gVx0]Ǚ]2Gv^O:d 1ZtN[Θ 9Hv\ޚ~]|S1XvNJX#!jܔZ=%KΖ X'$ 09J% skW#쀄bPbAP~:ݷDsd ׯ˔ )5SaEqG1V"L!X'F[c]|gCt2[ԹŢV"Q-5Õ]_ɫ߹D糏WUv`DBj`itPL.$t!)[8*q&6CѲ E7=oʨ3^+=)8YeJ3kL u&jկdˁDb>$I؅DJ B,,P)*:,*5R!Uf>CZ'6 x~"C}x>=\TE &󙂱=y cq7 Hi:M0䣞ki`εp8Ó[L7<;'#$ ̓b/z9XvaȏtDW8uXȈf/ʞ\ 4Xos 7UYZ5߹y'JM:YÃrX\^kN e!b^mN{Epdx\N=GR"' u >Fwr^hE ^I:B0:VJ`c- /%Txƺ~=E.-߈tm$fi8b6ܢb7/^Nw9=LPGK-^@}YJ;Q?RE8 J1;Vu_?^LAY?".e ?<7_wC=9{֕6CsvV@t Jq@kqDge1Ik3jG-sR-_=:ب֦8#\dF)/% A敐@PÄbzHy,^sDqTI8fiiZl-EJ]vu:uxucK\kK=jmN-~, DB B^H0]4(+X@HRB.KRN ߔ2$#n24r+ '1aM=aqbVxf(9 k?dr$ kfpPnK+_pXZRb` *4ZA($&Qd\ATZ#7S` C#[;ܖm !9u{ru + Pz{Xbm}i=& kϸE>  x `) {rVxm`̿[,[ v8TN5Qz@|n&Z* fqݽ<57wq_oab܄Noq̓.? (}r䑠ͫ|1+4bZ;9 re%5X܇.ܤ/ܡʣyXJR-jPΗWFBhvvCVFj@˃}F61ilBS[ y"%SnQh\N3hՊvݲ Mn%$䙋hL!R)V ,0ύj!2xOhc< 3(9f*g{ lts(·OC>cW{.]_>.W.?[]=˰.pJLJǼ ?޼~vūfK\ۗg#G 7yWg j2 ˳)V'ֿs?ZRsb__-[ձम=z{9BB0(E`Ww~6``F KjgTnl'#SbclP Meꑍ{IH0i/STww":X^]e | *6,pU\?7.óJvv5TҸ Xo5RL= VANvbHnGnsHX.sђ(" anUpmFԱ%o+"_,'r#tR@7ph6x d`mkM 2")p]o׊_][9`U|I~M?Nbg唏Kr!k@DfK/咶F.KBWʥ6I.岼wT)tD%j"UaH3ILsސ 2 ˟ȑiTyW 16\4\[3` Eŧ0晩M|=}Rac ?~I}cl2n)įo,HԌUnF$M8Kh\-TacJ֔~G5Rs a3-Q`@.l{Q-ePťp)Rz${hp@e,1jMK!`,FmA1sJ qvҋW !TVBBni2E9#X8 +m[aX)w8Ei"\@ \΍^~A{CCǀ?0+D_̭tA0 0A¸ S/5*(b"` V;HPqd8+䮘B! ( fMHwl<· jvBhLjCg d@tB>> bBjcsPõک֚=@!dڊ+J pGEXɝzx,iRN\ 12,`$ZDeZuBoo䋚OJӫe--!:>pҾMf 9Ԛ(1D+ !8AG j~X,k|ѝ[|/14 (P i}, YBtq{FUhC5+c&2l$JE+#(vvoT?~Vw˛ZCV*JvUQ#y7_ Β= w+Glo(i]5|ҩլcB\6p4ig׋0?^\XʸAch[L3u9Y ਞCUc,ϝR%#^˱"'쬁.*fm m42Z\I˂UdjVV[ ]|/"2"iV7C5 L.nk vhڴ`QѤٹ^hM5CҽZ$bbR=($քH$Bc~>ZĿ?!$ːp}8JE{Li5w/šX7~in?F8oYo`pL4x?&>/>wnf'~'hnLͷO!;en~`o7G~BQNn>e{w_,̽d= HZ@'o!";^-@w.ߋpq*%Rx"%eRɬL̰DiƭVc}p{$lgxf`ǂl=zCm*Y6}rO~Z?Lr]v1SfR#WTBNRiI8 X~03Atze<=|w ˧>DCh )Dxƍap/"<^?}ƱlϬ$FjL)y3]HuclVi\b&iP8J#MKV?OC)tOE_?zSu3z)5bHsMD{=xX0jap(Q1%6 .yH@8$KDB8`48^5# PT錂C|HpkJvb]?Oq-:7*5"?p0kij;ȒH@i\:S)B!l83.!P n&c#Q{CA=V]%Kja.@YAӜx CUx` EbP Xȩ6q7/'ܚ nffq-Tb8x txu?dAGg3^=x'9fi{oFAZj[ѥ!^*zfp 9g:7;/ay$c|voY|I(ŗ> ѯ5!(<;δ"$jf'uE؁tyA ]VFSo50)#*ńFOU@D_ƃJHTd1K@ TuN}x~I%05­y}h+8e^yz'r]“ 7M6UcH1h~3~-^fμr"zdrɻ^G~dՋjGrZ`84 -r՗ֽ`B1?Bȋ_iJ×3ܵu3]Wb\͟IԘXĉҊ%&X1/b K"c!۽s{Ŧ^]Zѹ 7&e&$|/L*=:IJ9ɤJ!|P_C}$C ZHp]B( w]7-akNͫv.:2WWjmy[a^+)eQ"nL@tVt>{AVA (~7y2p< ,yTվx~ 9=ԍ5'JSK X=AE5RA׉.LJ5sH5G:_袤-a0_Mtd!JVg603iv,U_kcL}G˷_fq_궱oX1Z,{,8-l1]HWZ_TDn:鋛l @?eғly*s ELIDzhY7Q[] ʈN6XiHFՕu=в֭ ELi@lߺIA`bPFtQź].ڶnZֺ5!!֑)D.[HV2S*Qk;nZֺ5!!֒)J{ƹoݨGdZRn6*E[a]dZw#$3Z2EIBPLbPFtQź]r͘[큖nMHg.2>7XQIgP=e%LuPuZZZZ7[% ckfMf'd9` B תZ܍&M;^d6fD jQE t|ѢI_) rERB_e R$Txaԑ!C z)J"5XesM@$86]x^ͷafb:+-m3^Zc-n 3rX!@93W/Bє$5W4s4!4EI4~J_TDK|G4P)K@}r;UoN^W S1l`8<?]gJ|+G"ViG8JFPWs8$m;; w8Սt~>yޠTʁAcK9xx6;AcVvϝSL e 3\m14d/7K6_c-4HWBNQ0?~=UQ>JbV*̈́\Hc3ql@y+93)@1٘?HE &`SA٭ |j5\ϗl[.=()V1UꆋAj!TUIY @ o$I9n JJ!^1+#kR!̉p%G2hUF>+UNTOucC4P+JŠC=UAm>JB+o.ƫrB|0IM(^ Tn)?O -k%<1ۅB!Cb'C|?M,b ă $ݱ%D]3\aƵ_8}'-| zM\kLiqj& ܁73\ *u#@ݽ;%{\ԟd4v/b8"c/CxgĘR1Pr HADSr{SF@=,8ҖZc8Q5I`ň& e %^>JSMT"һ,is,Ib +6VֹX'q")} H`'Ib(!ɽ8VĔZGNKxxk‡EӐo sL' $1; &1fQO(&مra4)1 9CP-41pA|L$|US,p"rD{Ŭ|aˇßaAvBpFD|6^+ qnxzQ{i-, {D:t{5yxCP=G &7|^~Y޳!f m"仹5եֲaF@3mV˷`F6A|9 G?spb)cV\2%A G$2őp="F @o Dӫ{*xlav4&r̴i"P i`8uC0A%faWrNوߤ~CSBvS:"  3DX#+% j3{l"A7myM@7[D|kTŠL6mTIrAN,aHd&$32uȁPdl8GӖN/+feI<:c2(n.vꐫ E(l$h<|"n)ނ(zBPX8Ro)ķSŇ?D {xK[JurrŸȚ(iOq}Φ@&Dl:ّ_4 2A 5d2Ylu>mp٭O#U0훜b[@.)YMzun--OG3+~l#*6kDaaL?h65a>]A0-+vS}怑KxI 28;܍t^#OG7ch1tg [fdXYF@`h6yqYjʑ-Z/דnŦ7Л}~d/nxOcXVQR>ٛh^C4BӼ \V.t"LK{" ~Bxe+d-W/Ai{u UIK7ZC߻(:`2(y0AH{P=k4U,`:=ӚȶnQ?9kTgTnqrlDW\wvvΎwxyGݓ=[E~g ᨧ;gzgiegd 'n#Ũ #ZN iEr\UhSV`ys#حu;ڥK $mիcͭxkvȭuyh]pPԊV"_o[őqAيh,ɳPRӸG.dX=R&0{Rq$--4X:I2Cf_[~Ejalwۻ"Ŋaus{'ː/|i"F?" Tt:гXAx^o NU5f#Gr{k@%k(@+CٳHYeP>^n4Tic>PfK `GPh6 Ir779w=,< 9G2$P+b'Q'|Y4pBKr bq"(ftp)Ql덧diDyfx)]"Lt=@Vxv# VX{^e0Ed/l_Mpy8~>N n,,7lx=o"72b9A]btN`JR qDip˝% 0ށg:VGhp~@7,!pIӅ~# qѼ/wM2{NM%)np1K:J'e$^ՠSV8ѢUX~HlG ΤY-qA\;ɱcؓEZrYxM1:xSW Ee$.:"_V]][++sӷ Y ˟fO]9j#!DMst5-xн(zic݊ye|>qhJx hI1hW+tGG8]1/V3 ZN8%N-k֕gJ6_a1v>TII*>ʲQj  VAJJgC$q h]kzS1]hYb `fkfli3fc}Ί+!Alͧƅ5CiSz>u>SQEs:$aT#dA!S=t8bd `~g#gnUM^P: x9dHp/c4# b%8+AT=J6Gѧ]**&IB ˀK|4؂t>Ul16$heɧSs@\z+UUs"B~;n|aYN-5R tĺٽ,F.H`#kɵnV^,SS\ R<9=AhX:1h;yy((‰-=KYrL6G\=8k1U+/~m$C\JAfc:J16+F܆0Ŀ>L0=^25`UaR Ysϵg~mA {!fa'Pe}9PHJx˄e}ј]G*r؉e:-ǟKIRțטQ/΀?<-i&ˀ:Qꨥݨ}W} ]%>IIOY%y2΁Y8֋yU6T#9%{^Q:y糖Rʁ'+ſNk!<w7 vw]!up mx-_̢ +b7a_C9oٙse-$}5IV2 jPiV9sLLL"^yy)gy{>obTC#tg)jb'-0#gYڿ;n0IRw3F`C} S+ÉS>7' |mUl9Zx4*fr@ʸ=VwD9 5FH^;W5l&bSQb s agE D08U,aR$=t0uj3)tXJ=fm娞_S>aau#ձr #p!C ^2{jarjk;C{v黃/iZLn$\l#1=^F V ddTI9ctxt҅Ԃ )g$hՉ B gkrr,)Gn{m)ؙSc ~Ek Lt5C1P CEH|r 1Kc8K$:3WMf[JBP[iM'C j(//uQGBDoNֿ^DHBL9& ׉"mRB+?PuOxo^ g3^,.}|wo6^/nu^ѷ70wN˜G!C$"dZ142DZ A`J*8JhCˑR@.9+w7 Rz(R!rBdugA٭JO1'p޷ @' ^ޛU1ck@֏$T3*=7O-5`s(EػM /:n]t3H-Piu>w ^9sEGpʔx{ƒ^ mSҝ4\w2P4Pٳ6 *k=28+m |~ cH 6$Pp@qJǀ=2$zaƳƓa&6?\*a!@sDp,)&HPAb0 O8`i֍e&uRz*+=6ݭ?1Ьޓ{s#1 xkx_)^q_%Bq[*ձ#( ө` v=UTVy|bSuV5*\ xrWlӝ0)]tq_z4ҜHl3О  T7s[}6|9ɾ{:8e6/t[ l A*="L8uI5|*kq3nuA9qGF6hΡ ]P=֖@_HiEӢ،L5 J܎5?NA >&x! cJt"!C,%i!(qÄn] bh^ ৙.1q , = dcoHrή2؆$<ϢRֵ>&!@H;'V"|ޡs-AmF{5 E,7gY6.ԇz[ծp_ڗڥ&Kgk-TFJM ὆$' F>AEkz1HHBʸ )'qI$ 3HFBŘER?-5|8com_j.Od l3Š:qlos`/ [|G:DEY7=GkHZRDjsSOQUw)Q[=mdAcJ] ;c;ѻq r$>  evvo_Y酃Ecn3 3*) hP衖TnJ~R:d. h0C ˋvcy;/"(Squ/"j{lL3ԭ:귻%pF"ﭼyci|Af淡 ~?kJsy?@z2;Ii ̒WoY3 UMiu2y?r'ZY6R)=qUNC-|m l$(F7Td RRRA&3?ab!*32CE@ 5PTP6:׌-Gܮxr[Ъ} c<.~ v7H-߳luˠ2<9- FY >J1 ˛7ȍ!>a_VDR8&PielRN.[VZĈhTQ A,dM˒T!Eޕybhzc)TOΈzKsDjw2hto_al7x)8 4+T 0z.ഹa:1f]۰!1&}?5?afΎm{3"T  q(C({(VN3N΀Ο]`JJaJNb^P83W0wm3^zn۠N/?uuSj4J/ .e1?gO2'>13( #4U+aXϪ/d(uyM+s{( b밷"Aw8xwT?\4~\iA ]l[`(>\5T; <|Բ1:GӇTf _ʃ<+!u[R͹kE H֡M-ᤇt_Gx+Ϙ{Uٽ7Qyh]Qƹg ^ hcɱB9_-zCYƍ~I6995͐ ̟t-V9满 W_;0[Tt5d?/'p' .ͷ?#nFBćE՞)q&I@ blg8Wx:Y媌VK;)~4y~NEe#m:Zu![)~U6_'駥ΫͭM=NP"cGF:pLe<\O r߽ cDO'S+XwUPPׄW],CBZgbhWbgq8kջ.[~=Ŵ BewY\1g5u\qc[gb̦T:x>R9&:A¥w],K<#K=^9e(;ٺ=7rW]*7%$鹟gbYM랔=;y,0N?x eg>C\,}ƽܞ۩]Ȅl((Jt9Y\]so[W]'`ܻ:ݳN{xWOJߔ 1}^}θi0v3Pwyjv-Y5=n#֍hvEHny3 ?gC5U./cuoxxORRѪ|7:<'qd"b: +Gz!f ]buBRJu@RKzI6^4x"yd]U\TPMσ҉SujtuX`2V~R|rr3M;HծRo%w %n$n* $#ށ )7}% F:>F=p^Jfc3 !+蓞ހ9L+-jt bvL\:q:cpGg.Fj"S;\}:*XG QN`bar7Yn_XshyG,cpEM\Jwk@,%{OuP`w%V%KV`n9 W _"ź ׵B6zA]añUr\pc}|T#6s=7nsms0:ܒ+Dxw[[қ$~ QЌk;Y4|dOu|&H*&P|:I%fJ6A =quu!wjmUš0h bƉʑKDg/\ր_5Gb_1fLkJR;Q5D\ Zf'$ \ ?|4mWTM:=+I1ظ h-$îHwTǭ 8-vE%G>F;Y4ϓ\hJ3>μQ94-11)I"! ,,^`pf./ w*@W1t+b1ǧc U <:IEyIX~l;??}jΫD]K0*.Q^,לKwa(@c׶엷D. 12plQDL-N$,a,aF$JS`V(G/J[dl9N|je*9KCJySްQT@N4/VQdN(K1:/,:MR' pw+Uۖ*UET&Lp)ueHHQR.g U9Sl5T e2FY-!Asy4xZ3E[XM +-' t_iN({  ;x[ŝJt=}7*6O/#`[?~EoCĀ'b'D˧Ī1uR O>+/zqVgN&.cFqx5'c BP1ץ=m9~߾V,Qi$GY= %]͠ 9iu[V~s*_*MʻMV6]=gb;# C+Vh v+]p+QDpϒo~\ݷUGqζ`~6(scYgD]lp-۟ʳt(\ΑJ>قP!1/ fnz?)o_׮tA-^qէ/ lkL&<ʕ/W$gxP)%Xr_u;OK`#FL5ofJ^5LR^H+D^(ҕ&ϝO f%z{*GB.2"?S`ǐ4+ j麱tgԫ쳅=$.[^C ѸP1< yU`me=Rx¯&.vPZK޶{菁!+kz% ݨEkZv,^ޱZp~eҗE~_EoLl0n?P`f닫f~5٫≃Jn.}]ҋAOQ &`< IX0ȁTdwN2)*KG5? rJyghy`w;VI K:~\;mJ*E@8e^ZBrT;KSfNRj*A*BLN4#Ec{pFvh 6Ćo.اY,.N8Ymڗ4t?KDnz`bV,Еzr"V*GplN kl f@V(@_SJLS2R!6+VteqPpqaoF?daY*lC3!cx@BD2.d_.nlxsu%] F1Fng_x27_&< .ۼM'3r|a"/׍h-߇42⇭ܴ|:V7X!k]ǡY(DJ[L0gr67&0؊gŧ{~Z-s ?<Ǯ|{ VO_ffp dK6 ~|{ dfka9Oq z36ohq[;5EXN&p} .!(Q|_f+$Hw~_d<'AM+KKXP&T/O ^Pi0eq%ⅺB!c=j_dEٱnCW|YWudNk'Qҗ QN!(YHyGq~g6t2ʦm P`U*,XC%gN{j@UF6VjX JC|%>ż3YledSAd [ s}JVJp.`9d8j/9v&) $aDK1xGxJ#QWjq='˨?ZD}bl?'eX`-W;%w?1$w+ |2_ls=iO< NfMA;63}/ܪqOMbmg!JIJ)c|z.M}e,h]ф7޵[Wܜ=N$!rm/S>R8Q>u~~{jYNX>zy*8}|6Щ_TNjJބXtۓl%SOTvc=Jꮕ~J#c&;p3o|VzgT,5Q|\hBf_}іg^իwV_jlʋlYQ1:&A-> +FMu%hgPo(u|tjd?#г"`Ͼ_Y :xQI/a[5)$kٺۜ1_'suWq>=<>"xNH4=Νh6vDgix;(;cAWvIS~g!uw__lO+}C~Wavk֚֝5, >x,Hgl>f׼c,Wwhkj[=Bj s^HqR` .|hZ䎏xGETjqފQQܟ$QG5Ukɠ.m_dNECzq|; <fB.j yftzy|C *:U|hP`6aSb7mWVQ=>TV-9A+/8FZ2za i^6yCJ6<&tW #ɧ1zw4ɧ &fuCHRwV| NN '¢S0T=*Z)s0q}AL,Z )s$j} !Σfux>!6B <Ɣ=Llft(F"Ή|dtzxAH(pd3px[5יst[LCtjY)C i:!*. ;Bep"87JsS ߣ1$]fsc2˔9~׾;%eGo?kF}h9Ye\Vqc1pB:l&Ȑ&€B IN+]6/H2eenA ey1rQJ#rn rBWP:᳒IE'\N-3YԢ%,xBk/ J0Ċk/_e.6 !Cled1X#K]g- Ì021C@+ .V;xJ9B(JP0.p"lsn6' f2B9S"q<&|w"(^2#-S`I)╺fr{zYKYގ+c9ͩf{.6!ՠߩ@2b"f8!-y \yS`U' 7q p!qP@"†p$afæ>aU_gMwcw B=9a%Wf]n_}-P1KXv9Cx0E"1aEcgj,OHAY%veQ|=uMߎY~Zc''fraiIԣA >uSUlX& |)R|V}_lޫ3 n#MH")1"3Y;Rb`)PsZj!Kc~SxJRl7JƵ[D}_/rwZŝ] ƞՎf1 XxCK1PA/Xn8ezԹ2C  [*q$+oTg t=j|T LfsX=ij_Sc =1$PLcNza;JSJߣd*r0 i9,HB8K4`옣9ϭF2ESמ8,犱Ԟ4B`nM&fٛr/! ?x6ZAl}+1>y~M:nԼc6D!5&bq%wiw+Q b}8:lxIP@QmٶEO=NrOScC^qڼw[Rae[Tx NUGT)*_ m JO;u:;unFǧSgCD¬?DE݌]=OcDrZw/j9l-` ~ oU{M5i٘9"}_N  )KT}|w˄N:ޣ|-z'nr ĆP2g;2!V{i04bgQi6k~5@R+tU_׌ʡ?V-_2lg(F$]'aI:M`H$?*@v͛3el !o aۻۗ& Ƕg s}zZ/-ZxZ0J'?vX߿Vmy@$BB*}}-Dוܢ@ޘ(1D%Ք#5=ܳJP+Pz&$ kѭT1ځ;G`Q2%,iOGSh9"5U&9Ƥ0g4<(%C0)}1Z9: X_BuRHZ^uSNXx`IT JD'$-=GRm!8U<'Ә1^ (Ә1V'%UG(\*>L;\0&O0AS/Үc%?sbOIBfObkT@)Q"*Ƣ"*ihư|WL W3 l}+j6$CՑo1G'j/|iUϕyqO6eniIUQj-bq_׾nӼIn3~di]\]_5ߚsWvתLӏ%K#{Gu }O,].~lhX i?8ݮ~f6*u't(HiPiU!.I PXT4= MZ=nEWi)D,N/)Mf(1¼M|7 1g #r=Z:^?Yoΰ$ur)sTOTvjP3xc.+-ec@ q#TLMYѩ. 4-ˮF#F]Nt]'d!; ˮ+L1ɍ+ ?6y%O^'foyߜbf0p9V:O_<"z\w4.'J Ŋ"5U:AFS_DqPRQ,א ohrW~Zg`-!,aajC^F 1 4V2; [ZED}1xMUz xEq=kgOga&7!G\qs\Lq+tuxSz`\#?Dޟw.=9FH-_>\tȌ?  8[d? x]?pПaeSo1$'#3užsƥw |D$8`@N1˄NBյANH9DXjؑZ5qi3T̢Iӫgk ,W ^$WVQBw"*#Jz(M,*Uf! ] HāD ]iEs0zx~=Ez \Uѫ6a5Ɯ1!V3wg6c s '}a,f٪?నh3j>K^x"O0&މ0qޢc7 Al鱿/Ud GqC*WfrwۮJɸm)%5DW\NB-{Dm Im.1rۨ);{Hj#ylwQFlsm]B~ݪHňmA-Sᗻ5|5y}̔*SFS=IO5wM}4jL4ND{vq3E=vfb&pӎkmz{6w/Df퐳-$kZܴ%Ԫ"R;D@+ 3'ͺiWGûnh qdx.^MnP0LFIÁVS.[D5X]V?sga4^RtOXYqav_f-ڹYZ%Mᮛ`݉"8}zT\== J+owT} d jc1o D{'m9\Il$;k9|i YE;,+4> 9fY,3H"35|%=ٟ\x̜Rl}H)4~ Uw&[!w@H"Rc3-?g4뺵&,_)ڳbuJzuړAN/ FEKjz<|F[9mÏj qЫ8zVU#L'.9ʹIPޕ>o8.Pлc?z?{dO傞. j} cLg: /C7..y%?dz৙lmB]s\F_:1PO~{BݤDifMY4N%gzq\mEQnGMQ٣( ȁؙQI"JY4(*aY=|ӻ_ۨ%%`1%'a'@ F0Z_™VId?ݰ*\-C"1Cbz'7AoB W:4{ D|g,c׻unEɈ/^wM "z@Z$ PR?k6]RsdZ nK+mL' 5874gvgq`N!VUPrrS>pf`&.BO!tԛf#gxrQ PnuN Rfg}]$m1LsP.t?L#WNy~{gbQG46'u̟qS]kƪk;?voG(. 2!t*/al?$ATufE">O-IYcx #;2*( MI$3 m+[F5KAFvB>%vב98cJ,r(1tPZA)쐌OlOk,]*ϻ8i]3 G.E1#&YXc,)FJnQ%U=@iIJÊq\j0-Q3Dљ=Z?+>}~Qf7)AC X+G? jn>KQT[ߙ@8G`SI.+SMz̈́hfؼAQ7Dc ވa "1ꮙTG!2yXxI9#]<ϫ(頇c@!raql()M~_ݥGBd9*H@h9*ډڳ $ٛ&B DK ԡ:?e;Ec%)ir7 dj+LMEzS ,4ٹ8zܫyt6i8::;xyC:Sk* m Y0ԗB&Ⱦ{5zP517~]_ CP!u2{c[=pkotGK ,m/X^^`Fa; P2a.5ɱx0JFbaQ)5Q:Tgh ϲh#K,O#,5 J6SoߚLZ420 I/'I֢\I jZ(„C9%@m9<) 9e"SJbG:?A $R/gWBJ̀8`d:9:TR̄vP™K! xVS8 ǔ1x_Ws}ɊC3wf;;@km11K1ƘaE-Xrm1^(Bd %\(AŒ4:!ڔ$V)?H> r^@pg&#}L{>mtX30]fk@W\}qq1wLBWuJύ7]ẋ[>읨u1Co;_>\t}֙X xwa8+~Y.ݝ>Lp I̿ݞ)=C;XZNB3>2mU%8@ 0G2S=q;oȻyFq ĉ/@Xzx{pן|NwGqL>ŕ7 3aA^3ҴF`'>eo>@`P`VjUl$3eߜn=?:Ă.6lW8ȻP`)4{kUUx %!EɎ`Ч>8~$u QTX!Ԛ1\?5"͘Ѥ-͕LʄpX*&܁1_ 0Piܘܘј&@$ &؆X rҀfz+zHjңMHX>㾞T=g"~J 0t٣^F-}3@vsI(NJBlVf Z"QITtAVQYeU^7Y>Cg0]֌q7 RoQ#YGJDH$ޮD?}zA!a@p{h99H QB )_*拀@|QzAzAzAzeQ*/!e5%q#$T)B!bAF3!M1GR.ԁʗRߚ1֘͋915&g?9H!4UbK+1h$搅V4ǙJjD"tj-34H4"F|m,0i8d7?Ĺ CٺJԠ@gMlGG^2@ޖm8xoP3!He9GT"u}X/f*3/~,C=RBwL&e G}lk'>%]!(aW@Q`Tx^[Iļ>V& 8K\"wfo!HfO(,[ENk#d@{PdVkޢrGGO Y{ޖIڲB6Pu=+ m@$50Jc K`yNIHܡ'X%b FR !Ӓ[BnS% E.oC$J3D# @Z0p< (F$KǝǎVֿpRyyR䒱FR׷J=*e+~ F^ZIdg[1K:F  +4Ҏ Q%pM9 BK Ή:mW/J(kNH1k赨`I$2^UUX-5ḀEЛt 9-X`!Ap$jØ%9COtI; vqc=\pk0 3I R skC mEFB&ILFf1 ^# W8q`rN12%W]xZ$^i(ګ8H\H,O⯺8csL#q,|Lr`(1ԫ7P޵6$Er8@p9r[f/_64ݶHwSM2%)IOهe*C!oFqb ǭ`dov0(w5b(P`=lx&b'p#&Z, :"eyCDm(B}69Nȧ" (;H4:tU9M1R0BSR#N-Y8GRBcEy6$;ȱ-34r }poW}+8OSDdp[y$bycn8bMxC*^p+ENe\µQc{ödLtc!牚'T ThسU!+!(™P RgFf2gP$=fȞJAn@#UHAN':e<S/)I9XY 4L*À:D)b0 #ƪxLqL1\?\#cԜ6YP*[iddJ0erM*uBTJcb8g=-䘺 =G/X?z*Aptma륶y:&YzҸc:µW1ǤBb:&VǺPi#( O?E:cKX9<ϯ6Pk',5*цe P*ٶbv57ΖkyZ?C|`Mn'd=)|UWE}і8t^㪽uDͺPrU%k]K!x=G7ĸbIrK]Ut^ 몚$ ᚨu6g+ѐn0нaJBwjypz⎦#~ \%p_\[Rbę6-Z"=#8$I3x @D:⶝bQ](H77wQ@&QͅYm8d,~{pC6{غV$%uB57\@}P{PTP߁fD+3%_c~QuX[І>ll$3ÿwZd9쥸ý'߶i܀Z󆔇[e&O/T-&r RR [ti|ϓ'萻E6xYZDIZvd/FAI.ߞϵ-k (i1~ %!Og C6M,X MBb&pP=nJG.{zC_27}5;c< ݯ.6YǐUhsOOGwP#!dBc5[bV~08IZXe7iyQbBP숧1E"\^=&o0&$;ASҬ3ߩ#5h 4MP.Tn%v$OŒ̚uTq,ESև^KJõkݣ”wjCtf+W=mqAxy`>)7uO@rMX^`;E[KiGY>0op?mw[iY7&rj0??Kj2fDcշ=},_xgiM/ /}$W*[I[)Zaw[퟾mGL˰=_W>Y鑊^ii@fzuuN5r>De/-(,7_Ù=n̚#O2Dܫp}[(ehXia+N2Y;1.M9vA'V IXFpOQadVwzGs^WLm^_%fҀ1҂Pib/"J=ͤ xkN;`?j=wZaSػbQC.A65nGR%L8Zhю`Fe4/qU\(A@nT S)Hc#_d\RLvrV#Qd8ˢ<+ -7J}mMnlN{sO'_g/*6}f?̲ <ن`1<2r}%ʟq]h-w/EwtLQ(Zjp'2>4~;* XީYS<1])fjdqc`@Bx^nNAY$Vĭ J<;aKp;0%+CLܦ)}3`v!;O-Ѫ58=D= #*8XgwQ:[鮁 *g?T8n @hHLy!"MІ4y1U=CBɷzK:K政S04`TRii&U& н{pXgB#rkJF[Yh˦I=>ݪªW)?r=w)C-|zlC1wyoAnY>MpMt7_reg2Py)~jAT=獴 mج?탓kw7fy;۩o {Fk7"2g\^f4mKSZPD/IQ'9lIIVS,\H^vѻ27Q=}Ra8R/̨LtJ1S űUKI [Mn .5IjH=;hpچfWXc/K t@ \xvvkZJ{nM! U.b=J-$V 6*>G՝W8AHXl6QS3<5ʘFXdNJ]KP =~`f##g{ uJc`pQ!<"8P!aО56^u:vS5fұEMFlʢ2|Mo2e\>óƮo2CW/XK{ԋpKUXzQ_)~yQZ3)/ `kO%GB(q(4@J( ŌЏEQD9 ):_b( PCxV3`p9PJJ}ef Re\` F S`&:CTNŽ Mr_Nl`7ѯU(9FKpoSr#)f($lzBvZ iT3h E Vgip-^W (ùP% p[I oeP(X񃲌X8\:FpQUoŸWrsZXW1fȋnH~ڋZ?enǶ~j=\Ul8W1-e[ -IZ?{ǥMkAIVOg (uj+:Vsz!-\J%=-zlxUh5t¯!!5B|QC<ңü!|62V8WR {ay=5̡M? ؄㴒hznH%z  wÐM둑X|މwAfzON釡cc7C̷ه!=Nydm`D(6K.jq Vy1}iDHSyH䚍#Yo<*;pm@^M }o!Cf9m1*)}'* }r"ڷ=?* }W^ûT7@Pto2t7WpsT0:Pz EqGP4qFp$g{ K˘02åW:hŸE4n]C!%8E $Ӓ1N՛*Yt,_)rP関SFJg 8ZA^MAG=.ϟ4X=Io*Ż/ņM ]f.'{5_{)}yt>biw?Znr%tp䕹ېB!2)|G;p*T!o|(”QoE M}wjm@tF9A>Ys;zh2dm# AOniR- ll cf8ڒ+n=C6mšD1 Al]3߹9<+O5~&Kc.M£<:#+Dwe-TXB2Ef-,!2 !j+-zh6~u&pٹ^cC-:qPWƁwp(U'Ɣ?-. ه\F ,d2Xn=0`*D?7UtmpgK/Wt&XmEKj: z%VclmiLKf g-)&6nhus Ղ!)Z S!}W׵x2\_tT||u7EEL#2C|at2,C ! gUYdB-Z|u98'?Ra/%_>ضmtV}5Z=z}g0 a Tz$``qa[g"Vv,`m)xe1SF(  E#"O׎4W>zﮟ}-mKSt=?'xCOfYbC&̐o3Ilmjksj-I]kk[bVzb ښH=m"O}"BRwdKH3mR}#Эyd_ oc$ŭkWbۉCze_fRI=m%陒OΑhL%y$e k?Og%a l:aj0}t}ӀA*W 1H]/^dbjjfAe?E5lRf/}MTW#;T]_Ӷj)Z@ -Q>k>:cOWu:B1.z1])$toWZaiB5֘7t7Ѓ9%s6.pb7.>{hq1`b2W=W*..֪]ʎt۸X`}q `bASw-.Ll\,:u 6.ƺm$:bGAT2M,%_I!.ZQ(&gbrz%BGGŲ۸X_,xX tq`b)RÍ<ظXW(02,B'`5W7qÑes)R=sӳgoφf\Jxg'_#h%?Q\g?K2\4Dͩ\@T oS!T:6::0mN5L4r!ec UmDut0؜j5::kN5U75:2kN5]SS-%|cye>r( %@=o&*f<3Tvr:rڅ&v''oCT8nrsƓyx2h>#嘥Ocbﱠ<,p|gKl7qd)Rs" aى}:J냟WX9[_;r"#Sk bh[,uD'X")e|g-RӺ!!.Q2?^nREAщ&֭੮SoiɺWSkZ6$E4B4'b4gj[,uD'XYO%ۙuiڐ(hZ7H}n{lbݺy%UӺ!!.Q2ԺƷjݪS:n{lb:Ow<ִnmHȁhL)MS-:{M[S.wf⻚kZ6$E4J)ڋ-Za=6LS14i r"#S\н-\n![c=6:]nO&Վ%E4JUF1ދH j#QGHw}S;aݎ$E4J|$P-H}UMM;?;V)zSt@u/e2 ;PvǛz+dJE *\=>65)Bz7kCqBRVjU8aw[SOiU 3L V$ J͙R(R@ ]NƜj̭jR5f5K5TcnSY/1KS9՘tm5f4O5TcnSb5fJ5TcnU1ZĦs1o"SM)ƬשƜjmjFJƬwQS j̚7}>֊s1 ԻFDTcN5V5"ڻFTcN5V5ARٻFzTc>՘!w$՘SUMw5f{QSj̐7͘;1GR9՘[RQƬ C"՘SUMLƬ(՘SUMК NYK5Ͽ bܻ<ʹO55.k]S]MP=B5ΰL5Tc,W7[eM  r9Ef9Eyi>&GpE\8B+)/J*-3 !dT*}J(x(«a6䔯& x!4yN$*<䁞Z`О2ZQg6b Z' B dBM!1` %l R-h ?}aAP2J*I"!Ĥ0(a1il{(pWĊ< jUV9E  ae@eo!%72"ynuPWp@s,$ - B\ ǵ~9H+&)J $[~ 0@jBZCETiE΋@Lu(I/-( 9J)A|w\n \C&gQVI ɨA 0P祐3/pM͓͆a@gsGC!^[!V>'=$\\_qra gM.Ɩ7S3MMl|4]o^twHf%^x᭷+T]] ""C8{ptO޾B=͗FKȽO.ʿ.Vsif󻄽w&K}3VrixoN & 184swSk55J \~lkya=y7e񉀲'vם8 ~_ dTsS_\z,{P~2π뛠y#чl؉ Ui?-FRɸۦd9s(B[#-m`yx7I0E,c/U~>@U!u]鹛\]MHv._Q1%|ȋ]ɦ*LKW*!T21:4mt?zw/N2Bs0=nFj2~-5E]ڷ 3>|_=JMηy{$bj@AG"S5 Qk#UQcm:$ ^+^n??sm؉E* .G0u^7<߆t8O3|a01>]y|lZjXҕ4 >&eBL_bjbͷ|cFxٞ4$ BSQ&|P"%1`\_˻;d=Æ,rabZ^A{~a|1g Mp-9_$~"C$uuS)Y+gkTkBK/J#9qif>T9H%CZȃRW eCBf*8~VߑdZtGp[}/;nꓙ2> Ya`©f`h{eJ"dyq1w#&0J ̱gTdw0҇[J[{frbKfܳkq〠`2=B!ը)\@ ?8 \sx2av_뇯 g<4wgkdfL .説0CWGNMKa1#ҥ6@YydX K1`0ۨ3cxJBt=djYX]u I@A; 0P sa0IZijXDDvVߺds2 p+ٲթڭ@@w$N`29Tuވ|`Vqs05~#OxlV}ĝap ~j?SPDl?f{sUEh(G%:o|W>D?}L.?nB򙟜tYDߎQmA`~ZΜ1REn,NJE>|p5NEes'_kHqƟXgvwDXS T(X07EDD"GqN[ܔ~E5rG6:YT^7%Mr pp^ahM9ř!BiQi^;?%ΜUW#)*yݮPbN)գz6]ע]"H)¬wdbPrg?RFyl$Q!Wš D_" & IDK0%*XcF/Q&]AUY8m0A`WO86i fȑU14FpЮ%ax̃A[`6d6RU) 㙝؀ d4hiCZ)2zߘٕXџgfLN!X،F/"ݱ̄-ՒYqýs&`aQt aƂ&oK>rauaC)SXN,J?7Uw개˻ق8F1 ڎ >H w//Ep˻~+daB+Bowv2 ܯštd.v׬'Hy靚EUB+3GiـisחMCCC mΦwުqZVjh“g|!g+E%?nfA맋t ɕSJLJ6NtTfjC"bLm#7-5Nsk-/m<×ظi=jto@?_דRCצnPd/UA kAqpE); ONO6ۙN.n.P>;n?-] ݸp.˒@mN W` jiYY`F>}ԗűI1 bhH"*;F},wx.q:' VJ3tb9:'fnnr>ͫd9u;#GY CH1"#JHD@<$e eFΌdȐ"M<5+'FWHb&12jcXt^0 FGcE4$TF넵AcuXIPSf<#<9EW {k՛ʗX=*u ߜ]6 ]$x|~fcJց۵֗.  EOnk.k͸ r`CSR:NnCq7 A,KeDe mEb-]Jtp GT`oԯuHƈj/<;},\8hFn9s%IIA:i{*)h9RUӤ$qpZ=T(Gy "Қ qEi/-J9 g-J 9hVo5d%DWUzJ /q;xn^Ay{stJp,ɿ5jBer*e2;V旅(͟ 6۹zo>.U t]{J)oͤ_sI32P5Y?Nw0#7$ ׃C]6D$뇺x #=~y?A_ɫl<lp)ZIO-um54ɖ){~4ږ<)9z_:k88|W|k!"4:J]pLCŎ>ց۳#V/,ZH_yoHܚE1ߛȴ> E/CЊ?[IJb{KKJ'Ymƽ7Ь7`i"PySq3lz1Hӑxk6Ζ?@t'95^`URyWg:4B$ tn=kx=*7'SC}MY; U֞sh[ B]xAoMt[\sh"ʕ(NyJkQd\",r fF\yݷ/C_s[ }mJJ7 .p{VOvRo3G⌴F;`Q9YǮ/JlT0mvAxpA`Z?A۟>UF-"PwVUaT7[9|g7Mi/ QKsA7*_)[5$Z-wX&l8RY}5Z.Qhӧb=Ձ 7H:ɚ.X}~ 4wRkz,HlFlN@km~ a\#:OS9T\hр_(rBH3u>rD4AFےx*B4sZE3 ȃAmXpskjr0g$uW:THtZӯ-ܩo J__nQp6 e\JLB31TN4ell@!aaLa JC&xݎju;?X%XuB=E֫MA ]*.Aϯ-LBH+ )k`P^ C&$Ns U,\bzsuՔJbS>5rt`.H ΪжROx|j; ?~O߮>ﲔn$9t7ᣟ^8 *yMܒ!l SX)vo.Ϳ=@QDsJ/!q1PaZEJPl+l ƤS֑8;$iAEB,ٜ.{QOqA6'B:>׊r6iz%Ŏ%E@8Z|8) 5HH"ĵ`7HS G Mc[4` 2A1evbΨ VΐX%2fYpDOcf`#4/?WIAڼT( Z"LkP"ݺ t .3jlPBOpޕWcmo5cJPcrA_T·_`ƕZc:X.{8Du[I;*-{ | .cRJժN2.̅cd/P@I8[GJTXSZQ-R򀅄r U'FbZz1zpf8x:%`jev HڐW1ZaĿzv׉ 'c'c;e8W^"q:֗k'QQ{H\1kucJ|F>-m]nsNE؟ٮ;!rۜfw' LѶ/Ľ;g/庭}/x'J[8kBP om)"\1(QQσuuD!(1jqCs鑷$qwflPv:Ʒ77ŌK}+L<21JcpDZ &fG\{ĥ_FJVvzniw Aj`ckmk8*7zIMBcpVe[=kR-]CY[|_{ \$5:Cΐ正]!m%^"ȘgE=^  K-}+ qqKߋD[Юν/s@5:|g,l`əoD$s7^aJłG"./E,:(yւdlpOb$c'g. g"F_7,zzY0=rr}.3m3^?}r6GQi16G:Mg3T IDFrK >#K-0#鐨ܴ%.K;kg=eu2,$J!X1ʔM*6)l ӈ ,2Bi,u[Xy0ܡ:`&*6wBx}Fպ^iWNX8ơ8&1r%Yl%gHR6V;ιT߽Jc(w[U1g,\sž-AMw&mH)Z8^ =J_5zzˉKVcgQVY=yrq5^V͙ !aG8䞩nZyPsIׯ2OBr7Ղ,G44/~AY :>N'6W up:Ͽ }fw;^ w/1{緙84Cwd ݆Ec6m&)'faԚ s낈9{elwYkE&=nceȤ! Exbྡ5ϩDPd_-?(WlOze.y0(;Ogo*PrNu= ֊a܁lԔ4TRq+I[cP>ä lqI@x嬀 k&=,+gW{T H$k֧nYL>}Q{iG7&\;ۦh;Vr5˼`bRň@석T,SՏ]ʗb诟9 #n]5ܬa} ᰽FWըƉͷNԢ麖J.W]xuEs͠~] p#NEˍSV$7$ W~ghĒGh?Y]!fL/&tw`lGM>3`:_g=pܐL9/J[,.(JՖ bbxDEO(NQ㱏yH8V2A&d,!/*U \QJaݺ pkC{-y}dabi)Қ.^Yf1嬥4615Hp*39!d Rud#<aTD"&I=#C5%Ɗ`bҼ!=䔉o&g6VN{bWz̔PhXtB#`\#8WHzRٹcL4H^X:bX)`S~AaA71wp֓,z./jhڽ / !NO^*L|;CWr\~A~Vʗf5P[.~vQb}_O9qY^}%p'(cv~3sKhlCs-*IX9jI'U)헊{1zpf8xj`@R4麺}f74ǩG dESq0O:iX G7[ YIb x1,wUOVov@ 5[ݴ\PL~.*i'I$59:RRp yseHթu|h+[ql)n2`p/Mrgo\+*EK%W%lkŵƵ;NMHkͶukpPsfukgZsFD%RVObkC>Hy.L94焱lfo^hz9Xw6TdFkRX%-EJS\ _ Er1\i王x|/0f:_{*W5.ɒ[ůi"Eծ0ϸB']fK,_!H-,5ǃjԋ\=?iu*:^uH( ̫9P^o.c#9+NZ>_:ETҞafQ"uv?!`= ]DXxg]"N &AJ$^b)K+;~۹׉mduw足xmؼ-DXl/$_~s; sEk,ehy|'AgWu?~0,3c-rrml*F:"jvCÃ\pϛ+ϞA/ӻ2E?T}Nj7ጘ<勛̞ۧIFPA;`޻rnR˼1:/?|  xzͦعן|O`x-5%\fmZkZ6tg^M!;;wZ)c۶$GKYrqY/ò"nɳ6W-h֭u>} w6:(.` gl0/o=Yejq47&](/l @ sEZdɃ{o6f]FOey@w<۲* 0?M/L鵟af+mWgjǿ$zͥ-߭1Bky=;]^U>Bp)Ԑ;4UOj>~_( w=3Yxokt!9)IMV_Gi&mvn;J.8;gVH%l!Kyx">I7P[VyH3HU 3Rmu,fҲ^|ݴ}ڹhVdy]|W[LSYsTHZ+~4W&Ɯsa!cki#H;^Mej75 nu7Ѣd}|T_Z͌ =?θń9{Yd3lBoS]qrsr|qS܅8ev'ZANܾmCwͿKG=pKT G=qڠ4@IƍfY h*G;ޱNGLJ8zsz o[/jgI1`*ۮE"ތTc?!=nb#AϠaGvoDͼ& >54NlӎY~9Q8ZԵ$U-U$#>r#1Q K<` IKx(Fx$Wf/O|1Ѓtd˪#{2 r^ r#N=> i(I63+v5Rf) Y~f8,ʔJEp \b;Fm G4)QWL0m'&Ǜry ;ћ|:lL C% tqV`Z]ړ+fX< 7o4 &ΙPFu̾;+S:xɃ@,pp7D97ױ}vU"ZoU>ô®X Sw 㳪J/J.oɐ M6zq9FH0)] RhM?/:{_pogO7ϺebL%9rP0PQj+625&FSC8wSPHZǁ߇tZG?mfܔ|)"VV|J)YDWt.!:Y췕*$1ۧ]<]z7˳%~]pLs^~E]Rw~ZFJ r%i)uyˆ6%g8Q; d)f{Yl^&".{9՞$a{,hup^X#c(*f%%L1PB^ {"$D \3-xoǹFrⷮCebΟ>[ijQv1+VL0qxzy큛j/?EԧWZ9ZDh-W-U͇\g|>Op21pA890 Vk3m 7̮Y>kbHBPTχ]DZÖ&cJ0j.  2J^-)Z=8A OHR<`c`Q{)W 14Ēq#MIX]AZ'*Bt髐˲ϳa黦&ׯʢOWDK285QPoLĤ)NHm*R0?FyW!" ø/W=HH:XlF0@2UjB;G.nx>7 -jvA6'ct6hqj0&L}n~#-6g -crPi. 3`Sy, 2I5gi=g'#^b :4QBcZ[ZgBѴg",q!HT8u)nZ D (-wRK9Ixx5t$d0VJ| C叾58c>3#PsziQ0-)bC"tFZQ̛M3U?"@(-WLdV @*gܚkAY,x-1SzĶ; gjGBkPtD)ඐ`#-Ud@jC]`m34P5eds(z$,؋D'Bi_% "S9u&T3ևcf^bm̪x1i ̌i}kۓ-.蓝~ I-# הip.4EZN2ÍāR[TæeeD?c2_Y|r,4fy~'|w}q/CLnr; ԧ/~[L umsPR2`V:嵭c##Q'9Hybgڣ>Z Hl g-i$uwh'ouWِ'®6_i=ԟJn@p]5:'yyNoH !\Td썾N-1ƿXƉ$rHѬ(hwyiJ|t#,YldhmeP*xV')ߛO jPp;?%.{F\T/+!ϞlI'?Tza圿؄s&,Qe?mQKrY{18QR+[~܄ռdz$ouZ[j 6jvq'ٕ΅!DF(zk5wr&:#y#k$\7*'{4q2""''R۳v 9‚/2g_dξȜ}Q9B H 7JĐRa. "F;W\^9B:Gh'Œݡ~Q *ds ӥӥc`(DtwA"dd)+fTQ1Bif'\zP :]p 3K R:g&} } )V\1̴Ն. ` ;_*HTV_[c%FΗɶa8:h9 _l<7ٿĚ,k8р_E(I>(#?H_sjҮ7Q^#BƠO2ԣ ʜoP椇̛q?+o}Vn$JmnWa=m4(̦]\r*rp}?4x@VEaQfc~WW;(*xP?0*K^>N,U<`6U<S%Dn<1+̗yS9{iSOY'H3aGw۫ das}#(,ЯWoFF+w?校ce3 ?m#dUVӣ Bqn,ՓjZ}N5;~.*!A-3HZmƔ`fjdM%(%۫7o/ffPM]hOAg[m;lNA-XwSfO-)*Qvۥwa6iݕpտp!fwW߬!%E,PS5Ɉˆz꘧&sW̻#䄤]ߒmiMh瀾}f.p Fgζň_g6ftLk3{la!Zǚo`8c1.m`R^`"Kۃ$(' bN΂[k-7A8%lnhSid cz*BwDPK2KbX7Kۏ~KxtJH9 !A;v$P75D bs&2`Ղj^K[CI kXk0G|\<_>Vఉƛnt~zyORw D}k3USwPN%F[w ,oᓽ8uO6<'m#!Kڭɭe`z| o5f~ xQJHJiXUf?ƢU;]iH*(0V/(t_ì| ᷦ[Нŧ)g5߉hw켔'U&$]ױ<2몋y-my*cv湺U #:l3bSiqwʜŪɘHe"u|{9Gu +B(-O% `xfL28.gc}Vq۔T'ZjWz6TY)GQS{dXJP_W_izz$.srhݪRtM\O_jwoX\6&VcQR t~ ;]Si0R- .5Ղ@=y.=Sx]s%%8#a<$m"_#N6sE%-Ba8&hm#fIR\̀9Ә? Q(NesԅӠvt RsԠZj{^Iƈ bu{9B͘+"ltzm:hk^>`VV xf\!r9(^+΅强04"Rt:+-8QRɞ0tz*N^62P۹AÛP%`nx89-M)$ NA( Bd9,A!Ȫ/ͣ>#Rڔ2ޑi//ϗ;Vޑ1^ib?EzP^։$Ia؅0&JjQ #Z}͵V:[ր5Q,"rx7&P-A˕oO::::)kvLSs-#e(g=wp L1iIA(8gE4$ߎvVcb>(39e]V(/Mx)# K{j X<H74 7FclPć@HQ}ւts7PxK2= U0C 288o,S4;2FmsJMʐIMfǙ$% [[2_ֽs@ й86bBsj$+7%̝6 R3A.s& IJWΟURX;Fc;{**,m½H=tky1'.u (%qf߮D5iDХdg$'Dam9faMHR _Bܥq(_eua#bX3Y4DV!m(;2F-zTe 6J+ 6p.(0;}$5%{K?O+GLsC}̏f*K:/P]`8~qkz@>b{) *4PdH!kXb̍@R%N`$1kM *OD XO y\\mט4Tje00) ̅ 9 j`>p!eZF8K-Vz B|!Be;5q@,q;7 ^Z60,~'>7 " *Cml:cv z6WSO[8 ͻZCsۛ,  P]g%;0ps!6(tAAM~DIقP1h -ɴhל܌4|^!eR0gpMB.::MGR YڋnNҍƊTٞ ȆcHN ɽjH)Sl3%!wJx[a>*(F\!TImkKsɌ SS7I1VzINe+w߀dȻ1_uZ$<'$^hОƔ0scph)2Aޮm(ݧ U[PeoSLtlkYZq:`# fK&LLj"ry*|\aWpbd3L:rΎl^RȲl=aSZKKWXȡNu=ݜ$&}_&o PG=s] ~߾/ !j(9ٹ{7ww!T?{VXZ7OLN{oycdGg7\}3Zi=-4 0gA)+Zl|Ah~G2k{|k 3wn,UIq,rucgPӒ>K$.铸OKzP:d8Za,6]X iXK?-"ɇPzO6hAqҵAWI@rF1M)0i^MQ`F 1E`TS\M^GS"u"4!""ƣW_GB0kVMv{Fpb|27KKsiZMƼy^ݞ`̙Ɠxfho itm+֑"1P3A)ˍТN2^_n6! 4Ɣ/Wr볳/n%`K\k]nIFWwa^~نs|Op.1 & ߺLq+DHitL d}0'ⷉYl3k#OiUBrBNVᐪ] b, K5;*(G::Y*T{#-v:˰k+2?L vO_eDqW&%p*. j۠ jX WD0Ƕs-!ðK۳ !1moǭ`)#e_|ך!LΈLaO8[4SUsՈX#]jӛNs\A DB}z2B;BF&Ŋt]7 XFL̒NL#KۥȜP!W J]҄SQy{VJrAīUDwڮ'zY4إ$о.eK|[Y"fZQ?SM ^iHFDGH!p, :ie`F" PZ_e ̖Tff%Tm 3jAJIaS[(݋#rt1WɾG*e x Ɨ u4 ys/H̗yz!?oBaDL`]·1S;$F Hn0U{ |auZ_@Ur& :9{;¢-{b 4= i%.uN PSZSg_ <Ԛ:AFx0f,yn{0&GfN/?]_͋l)R6иE|ñ$\t68[!JEsJAbt/EDy%&PѲ63j=kH$| 5{pvUJ+p,"ic>Ma2fRP@yž~Y `8qg/ F3PJsl$TTM|ڏ s6%C^Ҵ!A Y{>%g?* +{G^\8—|ܩ(q!|PXk ͖0F!AɨҔxPH1zIRإE,2KX9Rx}3gW8Qlԛt)[؄x/P6K[?.3@8= ؐw ;OdR7$k hmysw|EtcFpO F (Qy jkUS<³uiB(Sba>B/9p߫݌6cє> ^|t{TH|>"ޒ=s@=o:v,Ad*+\iŋu E``1L`3 2z4CF]kLx EचP PO9f۽^ڠyn9%c$[NHo9l4 EP^w`B`8ؖSV\]FCfq2MX금m9@a[aPCЄSOSAoqQN?*]y`OKJ*7Zݽd& h<_}:*{ eb ' Rd!=Hr`R,|HKVԸ`QN]iϱAIa?-J{ RGI;aoNH`{){}ݱJ3G_R;9o-'SK\uB4vHʵw,"N,$WL6*yNkUJǥtX#YN9Ah\~Jchc ~m̖4bŔCwt,| At]0_%b;穷a໾c3VHe,zK!AQ8۱5/4I[鱵ؚZ*[q&hGXvaf PBDx@lHlyalD%ƣlxCF+R-Qfgc=OÀRWV`A k)%X~A#ړ3;H{2+.&*NަTqK#ǙM8 Kaa M$&Ȑ pD$Aj ˘Ʀ;osČʢSPJ7Up֓oQ_V1QHT+P@=*IDɴtV2-E#;WsLaT3Qzյ}̺X,T3?=O,fg%0Q8é :w$4Q4ea;o~ H0W#6zs&Wct+P\M)Gpf.y9T2 oo {+%[%/v]fQC9)^wƿa[gnRp=DbG8Ѫb,R%H JލU0d|ږ)TB.V tcQf[m? DvkX0C–ޓC"og)6馀 WXvEzp{#%!un#!`8~XqtHNu_0! Ռ42i2b 9S Io gWƠwCe0 e3tl h_ jV[5̡Gt.b"8yPlTe ];߆.eeQD5Qd>g 'J3u-"[ͺcWͮ7Aft'J2嵽Caf&+D!z*fr&Bkyctg ǿVԠN4Tsf;YL []ČHQ@2_SK-dμRF7Z'Zk͞w<#W~ _L<sT#X/lk:ɻje=YV/=57fG6T'x>@( Fjm;. o=x&8}87fGy ]rouB-Zl^Ȧ jVyjK;靲iڊko5? w1 ,<{ 1LFmKVQ?584b8fTg ,؂-%B/9kYz@aVݛZOx&xBf|w9bG>Va־ě#ez*`~9^5hEZ4D ݠQa NJdgz'e+'k $KQxGAƅcEɝ^YyPވ!;V!Q0Ћ*b1K߸3JTn:6XYh-+G2^lm<*.l.Za+ݞoY 4禢椃f$ai9hturLCMS\p>wړf}i,Cj3 tp.Ew~}Dc ѝ|CשrEELctTh,ar0IY%&OI46ATb Lj5T>0!SaCoO j5xqjo\j3N8dn $#MO_}'{^ݏ#zU6WP>EL=HyWxM)m:s,Y%(D+#zwpvV;\cﭹ˂ zqdhӑWuD4H5wU'o]8'CiOaO 38./1jy{~rH>j\f^Žn4'J 3yHHs.*ޟb;ĶyL<"\p ^)q׷xzݹ!Vlnw))[ENqMX˳;OqK΋Fu.l/ԌT3n0'nf;):b8fG!>`r#) )jgUu:' !Yy #cFp;G ᆭqRn-)5pjf`єCV3s~ 4S*;9TXF9 Ed50 +=v69@"*+O:ԯT5G+̕t:?>Qֺv:>6W7I ;>!42H3>#iULaw41?f{ L_t8 ƔOi#/U'=8ƍ\gH!fMw 6!;2 = ѢB3.#ƍ5LH+f~ֵK^^|Yסmcfp{qm&68hc_Ȓ#q#RMQH91)ٙΌx7,3 h$U})ZNhb] q9J(_#qu-BD5z4ոC&3k5N IAָBQq"& `]Yݧ8j&y)sXɱaok|#_=@oΦ /Kcʨxy R$1*ֆc]ZɮJ`\V@Flt>-&_CTaYǠAWU+4o{sy$⸒t ʘ$V |>A7sPIEK9)1 WPhZ5'^-'%[A89[ X@xER=`h! Ml `\)`BBE51_Jhc޳zzKw+=*DE5dP}mfv=OSZ&#x)*(˕ Y ^>h@p@i+YE[4UQ+Bҍ|dyBo_|^ ۘ ب&>aʈDN#e@S[|i&k ̑:#O&٤pMoc6QpBB_6. r}x(g2RF-`1ݺ4ickys ~׽Y!Yץ{eʖ 1'_U`\$151:2 NVCq# zϕw0W=M0KBJ0 v0^*5X= M*#p%lX*\ Ld0"wB1n>%1Bab};l*e-LixF&kaػA#h]6d 2`D f +YuttTD׸]q*.0W%sW>gQb=+k$}%ȓ 38WҊK64# $: Mz!>" ljI^ŪM'PDk􁱶3:9m04\dm  ZhՅe':^dW-Pr*$А& [('l"M!ژ2w]}?^aYyMltRQ/Y1|A_ >e]y{H*vSͪ.hzgLW@*@#pg3dmNK2[dHHzJE %G4Z,$2A4XBTIS\k̤sNWJ_Cs iTK}̒iw"EզY3Ls n*+R ʩ~F}e`ia{-X`6/)¬l =n?؎6`85*,FˎƯUos;W8Ƹ+r*(NPx UVfh-)x [H rE[¼gR!:1D1ш{(j!:Cc4D+TYuf.JLVu:r-QJ;͉ǹ\G2^SYuruUzج/YXfa2*bo]YĘ 34@Ȋ@}~znkM؛ tׯ31IHe@ ]IvZ"MX@|!TCo%Bt1R苌p$;9%RݫIB:DYZ_"] 2b,^NYڥ3e"ޏ>C1\I=œg+2QС*Z_&w :(Ÿ sr!yrZ{')CH*9 s@h".GC| ðs39dƽVЪE(ysuw' d@ xz[g~MvGQ+F {>Zm7!&Gn%I0IBmCba%m+k.3?B@p/dl/U*{OU\p0j0bWc2Ƣ9jeep<=˞{&'o~>|w{>yΨcۡk.ʡ1<?] 01yƅ 'k"Z8St0mcƯ~S/kWk/;ipǽGlK4wJ]_00DA3ʲOзN\=7gv@>(GW[w =_zwޠPqcYy=˖S罭 7G~S9'~' /ҭnqЙXj+Y g0\&zץInm @}<hnM&`vȴ~6oc C~ h5ԍZ_`t~6q ;OMW٤\2 ,9vz٢R{p޿>>>>gzP[g{iтa m|L=@` (VCk6[38@tmj|TQ5`p>e6 _ X~{ko^-2,՟6| SK";a Y3Fy$&(zN\cIòO ^gx^7׫ ]f<i_]/*{ xK(g4W"cN%oSu":DJ1{f!(K q tHjŐJa.1÷'9jtjsPxC1!8`1C1C1Wg.e^S(ʁDeN E!"ɔ{b Jзg,eaNSsgkԔ{lexAȩ-FzzedFP󻙢VB3Tݙbټ{i^Eݱ%cgj .P{lf;NJ ƴk@אnƉ䗌EiT'^zڲ9[23eLJO@rV8QD Aȓ4-T( Kz l}/{WƵ쿊?rb/WE.1!qsQgbH_#!Ќhnb4k툔RQ Rqq)>&1B)Q!/Zu[$%i! q& 86{u4EL\OJYqHh,9* V Oe1<'K.A]'K9)%ZxjIHb ͝dB83\ PX!ű8TP)ڢ8r{Ѓ;"\<$o`A`dP BLS".Ku HŅ+Ae C/*& LR¦t@jjLQL;6.E^Zdw *@,by6Xά!oS!h.|iQʵ6ot ;eQM,[V<ܲ9s1#I0#+gMY|pnL$`cE1A })ΜŀP'fHkOFq8 A7i$ 1n|2 Uzwr|h;|{Ljʌ,N:~.bEj"ޜץ\&lL`I,hEU^tpZ0 yDG I  $0"Rk5v\2A U) 09a0L*d 6e/ʅk".RF=*PK]Xs*C+0$%: ;=J <e[}2RiŸ~ũwQ6x2*ɵ-V8?,R=_$Yj RCM\;g.o "=b\qd5,KHO(B ZdTz4=.%%qz\WDgDꮃ ÞxiG&qiG&Ѧ޵,9 tRZs/Z8 R!9*$%'F*r&Ł  L Q-Fk7gl*2 *V2sS'J;=`'V"X"(>ZWS-K(P?T8t?wt40/ps$pT r!()90JG`c;p(7H-+ 25wd&& El":IƼ.B[i}SA{,A#VD0ɀ*Bb$ZEv9 +D^OS4ENSEI*p,-T#h5c)~4*4&B̥ B᮸5 WθVTy*%*0cU*.^p5~YRLUA2̥(VJX?u K2R,#% &K3S9Q&`? ӓ[3srĦ4&\6;IY[TكK)sJ9gQԘiU,+5YNr|; r垙su9J=8U 0q"O|ۈna}eT*G)r TQ>#6,yz{8B1=d!U9E)19rweYhgiˊ4Q VPz^+wjÉuwL9?8.Ldzޝ%6 )웶K;,\&Ca%pēMZ}>A!=MlʓS\#efϷEwpVe0HcP4,&wMeJ%Θb"piJEJsCuk@ߗ J|{D*7wå~GjVjY.0V̅2>$ɹ70jIr,IIj8PHacSG{: +^cC,MzOFqt{'on/=(C6~}8=w1d1'{z|pt滽?rx1+\7'tϲe{ec_.ZZ^6jZB׮k}Q ]e{R$tvڏ]=]s!ystܱ ߶u}nc/wO,v.7^۠rǗN2ux˞.팒? Nfx ӭgzTͥ^%Z+{ck y5-԰{s޲W]D}T(ݭAa7kVoߺs{>4;8yk}ɻ&5Z˿ocwNgH/~3?4{睯7j97;Ͻe7fjm}<\taw}x;CH퓓>lڐ%GqG;ukF/CR6>C翿\OkVkLU^UWmSyէ?񪼪*ʫ򪪼juUd&RNUjS%_fWQ|`&b04^$F"iRk! F*9~U~[QS +Thm|كIval][X7o'J? :uW2O `?~2#}5{Vf,Yo6BI?$9pG1Q qi~b"b #]1F b$vBxsՆhG8HEqc⹫}O3) 1B++f@P::"!: [tTd׬ |ֶRP HfNϣπlr1IR^ݲs9a4`y1`Z(9ƨI4R:!QVݝ(q<։/f`3+]d߶W?l5Y4+?rVýkڭU"-YKWLsq{4ǿ5gxniP 46-#V AˬcX)"*nӒc0IbLbv"lD)fzҲS*Fvj oéc:08rh!Blgl sLLcRlZp6u˩y)nvӔZL Dq!M \  2Qki nPL)qW2p6[< y@Xr1P#pSx(*F09"wm=RI4=ȈHK`ygW֎==yP^ql3Y>BniNuF~5O'FV#C :x;?=1绛_;?ޡtY=? ́P&)L\6y!x)G~m_BK6|.|.u_.؁|KϟOO~pA#8aue]-T1ڒCs$jˑg](Du4aWkfx ĽcYJ7|G>=L5qE6LBi}pb7Qf70$*5.h)A8)]MU`U-4ti6o{s $P|瓡f/*::O9F3W…#qq"XA4""F+"Dxbd~3"bjAaDp%/BsΏR &v Ց=J[kOqjn&F:T i st*87/e;Syk(Cn\zZH zr|SyɷmL5ː cLWȩPuhrM3KNZ0t<$n^^D!CTk(#qyᥟ~!8y4/ۗ?|ǟ\կ /=_џm_r _|?(wEը#ԧ?y-#* |~ϫ}u^݈ߥ6F:F3p@gs őObLN(*pU,o}׵a?Z2 j*Q'Am1,mFm-{~ʫ +ὧh^-?cZ KS*.fWsqå=!E/nu"$Go [S:?7lQcbN20ڡ3#8|] -bkȺ@9ŋK#V&JД5}B%}EHOҚMKoI8E*hj<~w.zUQƐI]qMAKe\R}Ń))Hk&͕:iRWq]%5=pA"8C }/~D*0\8A?òF%>xQ SF ÚvBC0˃j δzXyXW{D2!{Ż8Mk !«mz?|Q6EOGUws퉁ZgAqdǿ\jŽ>o/|f=Q;¥ D'*n n\1{TwqȜQ)΀Fu9ݭ@ϣݚ;Ɖ^k{7w2ϰqс;h # DI%X.1J8}(zLEToΣ l(Ȉ#kX Ȼ0 ROÕJ37EqeLDL~|x} (|04"G$d#קi -R1x א)q׮ʮy_Gi]4~f'x_y~|3%(B~~t#ij(L' L0]2.I@чܲu| M:_EI)EȷVWUΐ 9u,.2iZ]>| F}ϟ<vL42NdMuRL<yA^"^uR2 NPo-ٗ(n\'L[U="E9>f5 4f/MIP#{hSK3=]3Գ Ѿ]qrlU@͆{^3K.;Xb3)77*),s C=4jhHD̰ 2{֋zǒȝF|@C77 CCZ: &vUkZt dr3ݜ Eͩ4vkE)YTZȻ C >[cU!I3{~+Pry  74G%+ h7UH d"USP,S9Pα,kV!t$Y+Ɍ䯒:wzluS Qf?CJلϿ6\'nF8Iaׁ0(,4,N4Ei֖Q#mJ47"tCO5zR3_#\.Ԉ3p ޱ&5ODζ)>)rsrc:Vlv6x'[Ա4=ȷ7w%M \2%0\ - ggYhy"ZCohV˒Rb@ڮSo38[gUPdw:$/xo7,Z/,M3H$6QUhREh{ZKԪ'3ݤ#7֝f>3a%"_|9NmV%)f1z5 Ns@lK[c՛"53v,F]mܯ;#{6e j:Ww`֘dtBk.;B:+ ~I^5Wv@޸K\Iq1v/C@satg*>xǰn¶G-op,=TSz2w~>2!4|3O],@%C*&Y2&AQ pw Ι*Ũk)H2fSu%=HNƯEA;%BN t `;ŔqbXr˦SKMWӣf/+k冲#æ 5rpyTwɫE,]f4ʦu3.(C.L? Wbv@W{hS@e4σ-)9P 4Ȕy8Nz%zV敕)afPd<φ/U*L.9$fHAjH*g3w U)jG~yMmolڽwTBt"gP9g s1>8fq^đȝ9JNGۯd;L'whz ΔV,u$-M-OʑqqF=q &oBAHS}QqRP+Qi-up=Pk΍JMnZ#a&C^3x'kEwqDdLV.F(1J)P4i49 GˣC Z؅<ۖz]K.uBKq(RsZA}7f(޼r6`gaRsߨf#ýGk)^NCn:2g1+WQSuCjn-1%șcH}Y1ǵ37VcǛV@޹MX%B߇e5QV82z6)FˠQҒ MJHqa\T:6XÑ!PVg:`?ߞ.%r^@v/\8#}˫$V@ώvZHkLC Hn~q;?3T})zXݬwBsUʭu܀ދ;(6[#jS|N띌_o`њQ}0(ԩdjuDsֱ!~4 >A nYhܾF}4>"B9|8u(^y0Z5uhV0F J-._#o ߶bin;P{ɁgǷ#$iQTں  d?U.VқJYUA)IlLf!qd0r(I ldR%RUNjU`˖m٦Xi ]U.Ϲ$ýR ݼAd}?$<d{6w}L}vm4]r~g iW@+3M>}zQ_\-`e.i!q!4b@hV#1 HL(r@!(t~0s/hb>n.'vleQe 3]*QMoNitcr؋a`p_:p04ߡ=ЅD ʛ0o/mM6zp| rs"$6a#PB BR*,@,v #\vQ*ĮR)tT`ktI=BSմE-av.YC$ 1ۯsovIa%x"Jm*_b?iL(Sc!UGN#}@@ PR 1 ZXkuѐc6T0,iR+~0٫H]kTNjG.hkxa=d6MN+]FLbOӉebo2GwN\Jz?G]t$-fena/&c(wp_%?z D߼WRcjh52s0|%У̙q -TZ),Z\1fV7X~L(稦+:_g}W"H|b?Mҳ}3zzn)~j8f:v㗧4eY_'˰%PTTMlUa2YTeT*3ܓ3!AeVc5{TeG'%ŗi]FK yfUK|r4ΊaLQḾ;)EېHm1ECVw6~^z0km;KcNVyWX.Ag =,F8=DַHQ&[+D;#aO>U>" q-S{M֭,>SE;)YEG<[r*ZFzm8tWu+}Fv]KVc֭D}[r*zNޝH:ZIkRsX\b{{Lʢ8'[Rp *JEm )4šorYqw?".,6@h}q?zFq1l_r|0[̟iwX0mnuZ܏G/-_?{Og .]ѝJ #Vkn?xa, .PнcS?G#56z\e߯mOLOɆZi_['ϣDXH\"b4dd 15 =(Y_q%.BYQ! ii B%3sAD!0]]ّ+xP 'L1mh ro OHhǓXǷ$ʀk0?mZ)cviÎ9xiZ48370cB]}[*}aOj~QEe]yqj^"q&ېk>}{\8d8Ф (` o&YYcx`-3!ܳ]`J$>hо@Ί hNnޠR׳u_1b⡤\rPSi'ŠL<Cڪ;XjڪR6ׄΑȪk/阺ZziTKGP2+d#b~` +%va~INGޤ0!uW N`21DK^K2%/vep9K^e;(=耀P]2mzZim٢7kyHfb"E wڭE+-h1\!;NhJfhj!G[h<ߟixˉZ~ ߁ a( !i?(El4iuH %a2w$ފsoK`.79y?Nd~ׅz?ht&I0ꕦCWU&п )vq RLi{mZKI)姀[e-dY䰲%%tkL ~AX:2RBtA2[0BZvw8u_*1+4ߪy3 =yNO˘ HVOAR'wNQ<}bh)q%5,f3a(a"5u!Z%RW Ƃ&i $,%cӜ:T Q a ԣU/]312lכ-_Wis14&j~Zc(ʀ`"P$ Fq { LIoCl YsTUb.6v #c?DwK8C3OoJ1 /iiBbId,$&i3Gm$3DAKԷg|+)% ~9?<&+Bp?3U67+zP $Q#Gi_ܥ_O)Ci&{ `ňlH*yl'H)lեY*]Vl<0$ ͝Hg/oN}1MZLLwn,|P$Nr< 4˕BK`,Ggq?f-zh1" 5@Q!G1<8ZpӮp)>Ϗ iW&a%E= %G {) g_NaK?b5F#٘Vݔ',v)Hۙت]|J01 VbJXF*#m*pLgQ@Dvg$0CDeZMh19E c͜%MeC _d\tp] icCDQ c1aH@ŒPc1q`_3Y:T@w̑gt(ZEɓA(-7)7d,4`Eܳߖk į|uK_0FAͧdLPM gϓ $SWX#fR`<z*#",E\ryLZq'q8 (-eDDZX! !!'W=3g2M axų/K^yFidċqW@ Zr.r3vʢiT7Jޜ֖3[@-!}tC- Ӑ%01ZGG ؞3«*Ι `E\^eu\U,k#TZbl!=,QDH Mk2udHJFy1y8 dՊEVq)/%2L)RDnQTɪ;S My&#)Aں(0q2. vF~so85>:5c~sF+g@ˡ5/&=KHyRV"@ҎZsS~,%6hldߊ׈ݡDWG&]EZ!$@׻NjUőgt uFxv=<Ai6#w {›~<8X_WO {WsK`k,f8egKv{/ š]q]G稡Hޙ_n{qbP-]CI]uXPh4%*^`N{ڴ iЩ7sd"=f:+'Ж8rv5mobn/v=@6&; ڂVݟ $cR 긋׷WE%bjSs(9Hd hؚ ;Hs,B( Zsn^gs;A"3*R8e.*XݞؗKͨD`VN6dNM_*ZD@%b I3W49Tj\=Hy_)%8+ʀRO(^]&XH8DܤI*h"M 32sifmNVoK %=>xut0m3)۱w۞Ɖ3r">U~yFW鑸9>I%#7^xVM|8 *,*Qȷ=wl}B_^u )d$^v2ǬU7 ;)kkp- h}+ȶjnM*>8jK @2!%MKR'+$Cbފ$$a7[eʙ5EتR;Kzm*_PBeO޶WR[MwwE{sfWFڑaMx!s܊:8B%۾|1>p`:ܬqԎ|.@Z;&OE8Zpex֋B#i_j tQ)*xRvrH3GSSڴ [srЊ+ZSgFh1䔵OM$_'YIjDo!Y9$Tk8t fp45:TZ1>^v1 Ք'<'݈ZIwEW247X+.6!Y"$v;2KݕM0&K*LLMP` \;B wPQBē|uXĪe>dR)3w lHƽ+0GȘ,ɘ21C.&*:(ŝ5m((B2ٖN/[Ob-'w'ZrjꁔC`#XYQȥgz~Ɛ'F(K-(z\Ke^܅!qYWeNo%F8^(-XmleNVF9Wh? Ͻn<\f)GKMnı9 D-+ȼt`-?qɿ$ oFQ_YGl[pWdz#OGg0*6x˽w-}uWuX{.r~Ȣ -$֑}6G+VZ&9#}7IPgซD*p=jX S+dC8'zS++_3() >7K¾8S y6$R7孇r~u{7)]J<@M۰5͚;}gEJT'3I('Ʃ#ƕ*s=ߝU XK4# 1n k\xu-$Q k[q S(~bt+ŕ4oAถAR%y䥽Ot3!q*.RBYpE=L x`5`:Z[`Aib~VޅE\.\Ț5jOWK }Oznсd, =&S*G&CYX:tS?hJ>&I%n@ 9~RnV?H'C7{4Uz_CJ`(Xg/>G3nL$=_q[ $jF0=ssTH68LW>5w Twm+NΠ}~NCXCaq7PxK9e\iJSM1]P&4>"hDgN=j*:iݮX6̌xJȵ\SȎRƲ4[=e_I胭#BK%yO :, _SB$29߀8Oql XV`kS^ $JǞ3AS-p]i5JhsnHZWmq .fa5}y:"M9DW@ž1 hB2⬶HM#F:Mܿ8&rv:H_z&l8AUվpUE(jDWV#-YFS8L̩XFZzFƏ_D&Q̡q+& 0r_W<wej2&J̅#q%ѧqYx7 aUH| cM+[^bF@[߻uFta-Dնv;Nwt kL*&a%lDnqȝn HZ. 'cj5 NhN8 n6(ģ]) wIqM,zn>,2`:yӡ*-l׏蟤=Wk8`*v]<|Uw A|8.םP&]wbIGJ&#BX[R|-|!Dy-]c}%x!b).g%Z}-ε 1T[.sj0@}Y}I"DX3b5M> ZY/u-T>U/7W; +`Wh }}WAN}~-0ozlZᯭ&J ߥ"d_ (~<dW~o,`L):lXD]yG?S5Ss[X g LhjJkw/[Zﳂ64邁=;u<&Nq\SK/=n U74pV8@B. 0. |c>2pI KAm5eD~ϻ,tZ/FAVD/X,nmCV^kNCZ&k ӬV劊quKK3?eklSK&XIp$8V$(nRsVoo[ z/S\zU lE׋q=ɍqvhuKewA:ۣ-y햁I|8ۉׂ4R+Vsc6Sȃ c0]x K=wA%Ky!v]hjIOJhMD!E+ 51\}J`I~,:oȵ~w q?N Lf2eod<<>P'=3_<7P8+gZq7m'Iv?C`Oɻ=7'ϏC*s^|56rXѳlkwtm&gna6P/f$oAia{v>/^zƿ{3I\{Վzn;_@IKgyt;t=B@t$v7^$*o{ t=(?@n"M7BxcB.csnzVNsptK r%3ڴGMes^Z\&O {g~~gl<,JW35yCGq?󤊖b$l!Ҏ0υucy30r>7Mr!(ۖ 3;"/wf-qI+'lb3C;cьW[+Cf6I,tX9/_[ 6Z1nLmLwU!s1ӽb=# ֺʭ,:+?"3± ҹ#&q/54BEHAH>s]4$ RSV!L4,~m&E "t+JWȸlDŽ 'nJ4Opӟ+)%~F٤$̼ل N0,OpgagkQ_VKNf9e Hx3sk]b[n#P$b n4񎽏wTq@DB Oؙ@EEN0*"gA !e< ! Sg\p#%^d s4a s}ݍ s-%wVWtPw՗o/MHxTv{ήV =y#`N/ L0x7}6ݠ/ѢrJu-9Ka=?&Մ?Xe\SXrAM - {A`jffK >ޏjL#l j홞mb RvY;Μ+{w+UH8}8цB:ڊ"9К;hxuJRUo]M8D!eíq% uv $&Kl|sd`.Rʼn/CFS_KƄB_) G '@76X}hâ:mjʶt0\\I!LKK|GXžIx-V$S:Ύ8S]}sѮ$NO,_9 arȆi#+Ea8`Y&'9l")*gG׆:͎0PAn{/N6\l]%=Nɶ2Xhg@f6fVKe/01Y,0}5ǩ0Z aTCZs/0WDc| d "x\Ux4Y]ֽi) ;._%-m\@7ZT`'ynD%F0}4.\KP%V>)0&0Èd{#d*mTj:cT0`Yej 5+Fݣsg$a0rHϠFZF^%ϛg0 &PEA2Wؒ``$7Ye2/RֵN>}s.c̴WJs,ԗk_#T"yZfJ;XynwtD򬊓\G{+lU䉸ۡ\ ]<ɗXlv gjUn 1Ey/xkDO]&$\;| `V)εqy K zqҋ@-݃:r\j `Xkk]aUrL|S6}LF~Rd{ǫ ?{s/ W{IWםlo,]Rĩ6YB\4WZu }eFm]r1BzxB~h$+Z8ӠJaqÎ -m3 ̙Vfp!\97iոRCS',y,ώ×? ˿2;ު6?}nRh ~c뮖Iq|J!4"tjaኔHq&G&B)neGkoLƪo:0D¥n87FDg/~j&O1d%ok_oP'{Z;'u p{GQWc[????EQS[+9nY,e(gSMr6%,F9kP섚<^kȬRY.碵! 1YƳZ+,D_.: yIYim* !"7⠆ozU/xHϗJ,* VduK9 &_OW5DvofΖ6.[P\0r0˽~,'ݏN&D" Hf7[TG"}Y?ek;z_+;3{qĉ)XDOӋU裢㏝N/ ]_g[ssu.UzR0`Q2pA՗?fL^MME3$`U?[scTd5būdzXQ0MgKR%-m\$%g5آFi$v#:rT@TV}@rx6v"],cphƓy.׳n߁V-lJbUv]Yz'+P.3}\ o.ҙ?xo}&Yd,/>wżeUe/m.r}fjב6|z"O]gmbm~Cɵz[R1kDX HγNU7QJ^ju QH ,2(e*\J1a#VSWg! !#7ûN+#;]: @1UQʪLsѴ'0uܸIn7#k sR(y=KqHǐf/5m7[$OhxvEu* miHx~*v\0QZM&< vHv.F1B*{9HD3dyC \]Oi.}f'UJ PU eD"d$ᚣ ^ k48P+|wC<{::ܾKG=+g][ӧ۫5sч *0hJ/+; 6xLNHo)Xd(/K0ք8@/aܡ(ɓ^!SVvRqRJdRgftDHVN>@SJYAt]t+QoK&s$%d,fz|=vO_O3?Q{<~xKoJzW7\sv\ fdI}~=}t0pi|b&q?8śOgwa4-*o_{b~JpA^_ݹǿю勇lȵP}HJ>2M+".-jk37I ^׿tƗ*͕Yݴ[(Kͭsfwkvo-F e.k@cƳ7WWNso -m/F 7̓%o~Ho "t?k)Ys+}o3oj?uݧ_n<?_m}'H:Z)2|R{hxگ,.2ﶉiѥ4oBRZEhl97fGޏt/}i67[?;`xgg>?18Z3кoXUbyNFRN6HƦLP13Or\]MB14mȗYuI~|-]S~n^fB5˳8M.ڗsF9IXˣ20Z#oԐR@*&`7_ݪf#H1 ">Db" IGAM(e]fN3oCDLIwYL͞aUR#KN;KQujkB$yxŚ^I jsbh½ 7?Weg;#{Su B [8kQdp ZEҝJ>y@J$UmzI#[uywHd(Sc;pj^x7Jhц_?߽ms-m3{fUFTp!s7s~fz63F_!^)Q~ÊQ-hkC8ǓZ!6r23SsZOCpZI-0j [--`#F2Br"hE1,ZQE7tNAj0A^󪵃N;V8lǰ˟0N7d=XmϻXkްźi@σK/57lJuq+zJ+/´k+4Uv@7Iu @d"Ј1V6ZfE(=!/֔mmomlһ0CpDl4mbjʕM$̪Kx=4 ;rtI*+7*ē!L&V2b"twQru7|EfA̤$-C\ђHQH饶tPh$|[.Ҧڔ[Z5dղ0n߉i?9a[\72\7,y6R7`Q m?#I_wX]or@`!O ~%Iq߯!9g83RD>q:\fyiXK7vI.m??:m152.Fdʏ!4FjLfƁ'zi5tq/Pǽ) sB ؁{Hc9i}Aꄙ;;A76iy hes"$ J9Gc&Kڗ bJ`CXYH,mHF}5TF立ReE'C֘NePL:8RZ -c'66>b(a %#G숐b!>%[DCx0CLX/SZD՜ىĕHNYA,Ԩ JKGt3-[{X[{QiOuZ+yWA.QؙHN$rQ7" @5'J ڒ4 r{BdF* S0!nv@ t-<)h'OGѓ:-d2Bb j8wi?7+{'WBƉ,R6h/>4!*KHxLBˑ^B2XrNjA<ǐ4.H2 5#Pң#exHN`G2 y%6%ȥ)r1% ZKj,2PgLJ"vY4@% 1oIд]\6Jk(3Zrqv-=5-E}@M 0#=[yI|--9F`[.rb]iaبf}s5V(Υ"i}r)niA!tr s6{d ,VwdW(]l(pK>N I8[ZgXiq.i̿^!4XrHKʍ=!tuW?}-v¾>rdB׻_u Fn}T^#kwn@&e0/p_PM 3r0-#\ˁ;r hRY'?/7}\Cψķq}ήVqmtxMO;ıM ʼnpҼg_%vv?an(jY{ŒXO猚ElTbw[01|uD_j4V{BjdVrZ+~>[xN]L.!'/~:-LVj # , 9r@soc+0z5'FL@7t/Qz3:CftMj(Իn2RL 9r.b 1)1_|NOK9ʉO6Cm.M@"k$ & Ncq{dO;?38ZhXT)CBd Y22K`QE9PD{ot=Q[ȈVΑ Au!Tҭ--$jY--I;#B>-q72e3Ӗ){ Kۖ89ZF X]A<oE Q h72VNDmtc3p[='c݊sߛ\Mk/Uf4 ZFb?6_Ai{2V`wٞ7/~ `ul/gtzNr2aDCPOrw_Zpni7),UTV7̴x7NF?\]mʱKۏWA_F˫E]';hTqCkS?]g>J"G[yljƜGĝ&񱣛`[Cm۲|1-㟔\_"=h.}'7D#8ieb8pEGs+nO>L6Zc{jT]QK>bOҜ`Bڔݓ~"-ܔr׀H[v:)tЗaao]uI2 S,7 e?M|h0*b>{46,PTnk:Q1gXNɷCŀü4 ڎ?UTg( hUE58B[j?UysWa2f6jf`TlBU?slZsTJNG=^QVHtt(? !aqQd9J&YD>{1#h͒hJʆ, IۇՌ~R[D;;k5xTkr»aCVZ"zp=6Z 4>q'9S~-x𯮰K} qD!%^mSmpXr){{J32.܉a`J:J|">ͱ6b*tZo>VS]y)c)եXzzv~;[nJij^MoBzZVc}7?ʢ(Y׷qtu=/ ޱ[g ~{fyE>Y=zS.ll~:rJrz^m{7S|B&miޭ6n}X37уmlC?CV k#4RN+{[ i'GXfϳE0T缽Lۛ碀Gu9/n¿^uIm^/Dv2fb„1VMº(}+KI&~U7=\Z(3]u4u⸜\&Qá|3)сq&:4 &XҥҮZfn9ct\'b.">Ǩ4hb,F5<`18Tڵa\zU#u]t[쬌I6NQ9 Wf B' :qh:Ďq,ҖGXЎhҧzf&? ӝ/U#]yCcN]^4n>٫ yI̯g<דvw3>^n}WWx]3gp<,3x;1|Y{BV"& ;ꌦ@ǪrI_f(sf-l SS{vտ6W <㎶{o;\L&2?{WȍP/*G~]{݅dvi\o0uTfTFוJ #84{-;_%+xNU%) 8e%%# ?.U~Jmf+ (_O7LMFOF_8j6 )[+5X l&|p Ý҃n,e͕,06xGb\j:qU5zԀM/ m~^z쀬Sss˚e|i<{ o}Ȼ/aun9^\*FU;<`?m:mcE9mJiiqv6R2mi eT̽Nzl# p]T0iXn . g(Z /i{"hN "#{o A3PFT8朴 ) j4Gs_qV tW~fg#kYAӒGT wqds^)Ӆ"(e_E r5ĐtrBЫ[%FV1&n-)b֚#͚5R-s V3Dns\̱݊H ˆqUװ,r]<1Dq\>y~9e6 _ƿ$:o-GO _V}[y)&,Ȱ8) ,Ys1FufL.X5SF"|.SŌp*Ƴ QdI}P< s_< lp$?_O/D7 =c\6HWuzEH994T^zH>8xMcӥQo%(H'$\6*0hd&# 4 Mѵ/a״i*j,!Bq[)W[[W3B玸y6O༠}+ #t =Q/,~SN[k,t}\ !d7K3b U{g0!`" + ř\T$יT1ZhM Б͡X̸GkmEwG}5{[@7~w}U;s0!Xdz }GK0[w~:!`Vx+5]e_mdp56QҚƨf{u8Idw#&H6nOi0&Qܵm`IDM^RP[1 B!&v e!3fX*r$2K@o߶ 3N߂hq¨I6QR!RR cJ^#KU v'=T'M)Q9W1;Iϸc MV(Q,/ʘVqn(Ebmqˈ`֞5 _-;_zcW}MNa㖖F6rɥl/n2yxg"6 *1ЏN#aBCuz¤y Ҧl(E kqe|C pI5`\1Ÿi^z$Jؘ/a>/~W CAJyԁ`>N Pq# 7hRc.k.<= F.yEXcY?EgQ6}(jO3 }x!$H{,CCH} !aՐ02%F(V9 jFIM9)-E1 n i 9dH(I{~ sߔu׍r0P3d<~m筸fA{ %0Er#?]uz3x'ѬC~8vaEGۋQ;u<$aOߏ?aͿ /ϑyO@l (Q:aNZixpReJV:v^0&`Cd bx5xi%g5ْJR8ӻJo? ϳDrX_Ŀ0 ,GbkEeqbD7}T]Xr"J fgT3I>s*ϼҥby[tiVuw)fIGJO,}ّ'^w){#B͆&l;7 vqȊXqY+{Q%hͨ8ye1hmjΫaDgKtC= n=*&,&*uUmET5 6$YgeLVd9c\hO r ø+,EaYJA4joYúԫ$օhNW9]PI -~ܻ Kfb0}3}ݖ- S!JVtQpC#RҢ} Ҋ`F(/`bqlBVsja#M`;<w)NVZgdLhT #S-$F7b68"s;#(s 1 t2|gЄ͝ZX?)[ ư19NX@+wcf8M%*}RD)\%` kQX(Db ƲX bE9ɁJKOL{yNCA7$J Ib uI`p\ݒiǘ+$ ې7H@2!9Wpb %0qX0v#ɾK6\TȒ$@Y6y04Xڹ&k+Bvam )0BdI/ԮPJL$+)P\$Rh5p^ۂq^`D  Ɇ =z]r 9 aڌQu0.9' Q`EA^ä a999v9L 3FH@8^d c{sF֘2y7w7` 2ބsW_'3 x&|9p {r?R)tVf~ټG6$Q=\8oTy7d'`Z8A=HzxLXʡ ́/hCv)TEw7Wo,y;ews [+0)0ÖyG()ZZ͉E[UBQ>ʹTN;q$sJ*5eR4`MV{g2j:,!xiÂ^!eGT&q:`9urI\}o~qNR׈.ϟy2{p[Mǘѯ}iLB%.arD5y֥ci=U.ڐ%LIE/h:jӪmې%LTJnvGN[%LLbzw6MG:[ZBgϫCGd ӺfCyɜgZƑHj.ޗʔgiE`XI$ٚÔDYStJ3ĢƯ sBy%$ Ukd)O(/C;QK]øn^GhvJ[7KHY7SHzM.;wvbw['||zuVrupj HP띇;?Ei VGwv_|>b Y[dE6?"b͆Ȗdޙz $ͽO;</f;i K..;;;^KVnjr-ao/)JcJ0P1ecDiE%ѾIhە+Vo[Daׯr_܆4ZK1a&(QtPU+O,`=}-S4sI,7߳.6qb:(l3@u$qb—xT(ӆJHZ'j.SP:cJ(x5Yo4Nـj(V"r)JsZ "j5]h`,+LG(T:4s.sr.6L(q 7.R,TCBP-"fqA߮3px]ְ\==Jg <>+_aK1*c߳֟_OԂClď6utQSSn13>(拕y1oUs@O^R"Kz1_VFtP Kgv^l>Id$hr ڰSJІp SCAxĶxӻw :[}K.8l׷]iT%:if_Hp.|l.d.xd<׷vi0~ق:ӝ7>d!SvUl9 5*aWa[juDhrWs6T[PJ;N9v1&Š-ǮBKz:J%Dد{{v88n5fM݉R"%On}Nb_ ݪj1![N4}p "MW` y8ٷ"%\zbR>3SHFij&;kkDf\WiAdYCuJt*DCĭa DjiqL˂iNԁAL OiyS;(Z%P9J&բcɠ ]wK 3j𧥐ܮ9~+~|OVAtǻRatk'+:7=jDe1Y}*נ{c3iM1Wж*w]cEk3-Dih8xgUN&1Z|m +AU0 \alyݡ/tMH#G ꛩk8~NǚVF/#W&Bx c 0TW~>=)]/6[M:KG6'Їnmh3Fd>u Kn~i}Wnjϖ(? )JlsAHLw;;p6|}VAe (&%g %:9Kh*NM fLhGϐ]))CbTGP޼,eV7di^=O'm%>,"C,גv%Kڵ뢤F}TujAH&)gBƸKdj #nP+bs"HcfM)B|HFU8\6Wō+f4I2Mm91C nnIC8jRʝV 2PaocO$Y=P X&cO#=DuεWlAlKA[#$5p[-d*`D15#N  'ap2T eU/Nj)[u\_ v`&wKOoL. gc..l\y؞wA` }h Ņhߒup?}_qO &Nd>߃˝?]ŶD7~*"JH$7Yj#'d82m^9!Wt\tqy{.\<<..;?ﯮ>\{<wh}|y&3^yĥ ?ti׫/T Cy)ؖi쉉Kzv_DwV"F4Cl)l<>G]B5Ou*BU/cS 68 q͡ ']T~NNpM2C_ dY9*i@4wԪڥUni:;8RD ]"ǒei7Y8 yYMjKAJ{, %Eǻc/"m#CQ$P7 \-FZh,G=Og3qv<| bԁw(,DՍZ>k`K^UY^Bns*zdgefQ(%Z?:8[*<κEg?*+|-|wԄ#BK;90RwwU拒 4{c()zU}&ĭ6ZI.I@fEA׫ {^J5\!X}Y~ Wv\QgTm.P۵:a zy"`M'sնy `pw-i2@h4A.p"Z1!>73>X[F/# <ȵ'{> -%%W"P#zLJ'%/d\jv w띁;s.͠Nu)ZPO@syoYWBCKPT+mDkm4ָDg FTB@hUHxcpNq)&yѕTv Hg _XXrNWߌW21W|i]uhkmp }u1ڠXU%D.UULSm,&Z,*dNfJd&ΔC kh 6}QM_^[PThRsH3FtfPƘC;*#,9$M3IMTZ.xr=#c$Md 9T be%$,n,gV j u_j&14%f1Ƣ.Ը#i,"w\e5!D`yb.ځtRM, @Uǎm qǛ ?$sXz0I*˘bY,P4*uN4B B`CzM8㒊j|lP4&m4 ڐ^$IYPRkD-5NheM.)쉚K܂^Wp^⎢TxOU++AU@{wFEMe.Ϩ*%Yw]z}Z=Y K$ڮ+p Ve }n%z>WtTt 2-> 3 EL>fZ|syĚVI p ^qXtPa<ڻ(j4z*e-UgN/?lf>y> $+fǧ@1ߚ/dJQCĉFg-rT;#ZO@Kp$HG/%ANI cA8KZ ?Ia]fQ(E|c՞"݈}E"YZQ[qc|g?Zu zL$lJ]`a oyTyvbU0=vWt =xt^WYxzE`Nczg>K8M 1Q4 2:M# 5)"2-edş\D g!#VX\j[s}6ǖr&Vjí%zҍޛ=u}Y+=ˋ'^FJ貁zq4=#">(3 c2 hzo8YՆ ՛ۄ$c,ƌY(8q~wQV2CT9Oe=YA1qj+32b-ifR1^n&ޜy=NydwwQ 023M9~1~|5%<(D ZGXdS(Cp{dFgR!hPyTIbebKNpRlf P 8ek9Yl@uDxfU];fL4OG9Q v>xУa/f1LC Yhgո0{ԧ>[sMhו4A?{׶FdELi"3#o0zF=O;0*k,QjYo$K"ŢRe%ΉsFeh]FM$i0y >0K/RSU&ZMtq>gaH6ܐG⁴R*>4D, l#5}a)\H4xk(T< IZN4KϖGIϋlltb$(rT.luR)bq6)>ޟ4Ѻuf[B`: zȤEd}@?ULKa ~e3b@%5~}s*'(+zbAJ 9ivF<.عi5fHA+;N'+*Iu^г k~Ș5aQE /RD:Q&VӶe&cdwipysќY|jv~~KMxY#ugGo#ǔj|ŷgY<5=gɵv?l->E#v|җ(87xrMjd- _{pvoNI|2[?%-e=Sܧrfq&͟LEsr,*yPYL"9Py 9Rt&YIO T[)ur#nѢk&H].o]>~Fn;˔TZ}=_x 5ykhTܪ>J;h[(7@Çm7>jNUYLHȌa*Dl 2&$i_jޚg|n+**o3jQ/'~NY_LICa_b)~ʊ1 mb08nm]MMgd>ILeSژj?2=]pgSY\^ *sXvqs˦MX7@%j!m'yvmܚg|i)};*l=[ӏѴ̎G">u3)ةj睄;+T|T]ٗ2΍ngyFuɴ+&钤UT_g- #}~X1v7Fyf$ 7(k FcL劉gTLd 2|e'ct1T XY\J*x8 8{}SVy-W&05rO8?XWyjN& 43ࢩ9+ G-IGiҼZuV758 ^*:7B2ezckɒ( mhfS;z7 5% ¨MJ2iW2& }Z*YW1k[$*@PIzǨV&Pܡir 6Xkzú'a#O o4BoH/~_9^k&xy˅T C.ϾLV˙n/,],<WڀkAwttdlj#ޝ\OWc/{tN.6,E(r;'syJj7ˇ-Z.W\s"WذTJESBYjZbĴAl79sQyQӽۏDukrV)%93,;tcgW8E RG"Wp1y PO6L66}`M OhAVTo*QuR(?9锹ZiVu~h[69@bt:$o}xHLGSj_PKqdŷ:'B N(yz_Khd@*lHb644R R'6/4i]M&1=9 Yrh1"[P BI+A@D`6PZ[; ra-{xr<hF/51AL"S~`BbT^dXsRY; vYL0^o Mz,2pe6q>udG&Cˍ&KNJf}ƤӯK^xRIcK`AnҡRX(a6l˵X;)Z!r!Tl=pG>2M4B\جu0kA"i,qm`JL$\([b9IT VQѹDqQ6s'VDU0^2!!9\$gȸ&',8ŌHȥ V ZD[ܒvS }g-SH"8֑CL;gjt/5Bo ig+G(⃈أ> NHT;Y$)pi q)X 5ﻈHˤ i[Ypz̠)/˥t$u\QCM"R$ U$oep‰Ľt?2(4MDuK뎖F6`kZ aPKxAUS,UTj6SVz# O:gIڵNhD硢ۅJg*- QeoE'"L lH$!|72169HXu{t9~|'.']\#MNL描0̋9~ado`uz4WM<'1V3LpMzS+@\ r<\,-wl~2:ǍW|F^ ڐ&0L|BfZfxI'\tXR>#PBvH4cͭV᠖ң+"q$wGgˤL)>6М߬%qzrOT ~`*@u24"\_g"k;%"x~M?_={i4O8tZ &7< Y OPװ68NѸiZk&ѿ0=;G`nR'x o~귟^_@k]. .D7!|SMpUaWE]d܍XuC'b#{)w:soCGcFO0uQJ )OBgO=ӏ^U9~aLʷg/~7'|f#Bu?urlwUk)+ T 6YMlR+vU_z+4]r.U ~;z @Γ_Rma6黿ys<=^fy2xZJn*ҹEBZ) /I?3}zDIY}{x==Gf5/pq \w؏:0-y=6w3 RP~햹_KFu 5B&웬/wo l gd>L~Dٍ o:/߆ &1V@ԁbu YxpUh@<=@e{w ˗sa@o=E*__^,5YW`vqfLp%˞6'17OPgr:X='FՏOLVϠڍ a:on9W#(g7):ڟؿO^Q;c^t6O/& Ah߼(;֓rT/-.- y+dom1E6tRuVCN?OcZ.24N#7Þ4tI ĕq` l%(st|F-ۃNY(y ԜCg ޤk?4Ert^k%P&w2:gXZ%Ye3dE kVxYUop \8j7Y(Z \0jT'ý֎H|E`TJ׳f-0nh4gFqs*MpX0l |q())7AMR8\, 6z.=pQ*ңbw%)Qę0ϑf~ E@6"$ L H.~hG1V^]* ;5FKW>@asnŗZ0ZBrNhic!LjUٵɵ N3lm? ˰DCi08d*Bn1Yaq0+:3WQWBw2`bS@m`a)$aU{:VNJSwzjW }"޺wі͖:&-<ͶmfͳCAn}\M%C϶I XB^᧻ugF/O5AKc 0 Cl|\+R<.a#HP b\L*ZdJHS}&Q>Y8'H)@iLXHkbۛ\DT 5ejU霤j7R#QQGZzx:&>5:4>> cI3Dih|%* 0 \ fkeVTKUp%2B#N_},S9%2yq"SDa0ryj5dFb}K.JhAȊ$%КVE.»RN/w7~!Y Nӂ]U=iMhuejtMS :neAYgya,J`XyѾyl/X''."᭪}vRA6ŭmJ!i.tѷ?n^'״͐3&+չj[kbR/-==u['NCdubQ6?mhh\AC\̐2],OfDliw-95`h_*x] U5@Ph]S̎PcjHf#;ސmxjhB{da]lCY?9fzx,>4A$"{S( X>=Up{ްsr"&ĊY\L>IhUHQ"P>vvHk#xb̄ P(@RsGaGaHu C~5RTESQwU:Gڢ$H:05'KٳgE6RDǚ,Mm|F!r*+_fh1 ,cw7E<ͦE諀.f_:Į),@"0 Iһ\|>`JW%8XЕ?~btxs-Ǜ ueE:Yf_+/R+1Jb\Q v"}1,R[Q2LWV׺9EjqD1jZz(Jm(]uPWYr^5-Nk#JҀcF;]V/}wAoR+ :(oSVm]{$Bmfž=׫$Өܷ[@dҸ%D{ʽeH]pYqDib[ e:m.5'1eqgg.{.RS9pLao }qX$vWpnƽ^}xl݆>neh> A%hx!SH?\>̘!^挸\NZhJp'^83c֤u2l .',qLd͢(Kw['ic%ÛCIc(Ɛ֔,gje&l6)˓Mf9LPS+. 2^ _pxxu0 QzxrƩ@#7Ic5ڔja2vUߺ7ݼp:ҍyZ0>œW +k6a$Z)1mL8 <c2`N&J#E vZWnp|< MNwdz_~Lj%_~}4z^-McӕXΎ v_ b;w'֬icwъcй"Ce)q@.G@tC_< +zlA_t<[ \=)N}KqԕQF Rx80Ցbǣa%̉nޚgf~y BME͈uВ`Цvsl-m`MqcBn:ov5{ 7X pbRԽ0+R&`Ǿf?PTpgr'𩦲)f4ۦe5yme ï \7ް 4K͟:'xnS~ue\| HN&h4TS!2hRWΰ7x.\v?VL?hx0<ɒRUC72֧8+s ۫Mpe+KB[;uJ:Ǻp۬sh[0R"ή&e@H(|!WݏqT &>31%b<!b-$ִ1c%6VrfԲFW1#D ,mupa 6:;-D

kbͅRϚ٘Z:Ñ 4`LJ:€)9JFf ϛ̻m gvGrI#3lrs9p&&y9K>! kb҇܉ 굛WMGf6w#Zj2<2+-!IF?~pw&^ 䂍 f4󇘙0Ma! #X1A!C:[ *ql ?吉COYN1IcN? t2g%{@ e2_n TkefяG~K% $PbFS[@pωxrPEY3 쵷QDb l. ;ulz|%Pb*`e$I͔{@kԾvQxӓܘ5X"MbpK eNux*Iq٥@8¦z'{fk&̎ONx,ɐ1ƚP>CKdqYR־l _Hc{=į罦MG7#s3uI*C`.!|`o ~r>'’ #DX ʴ C&Q+W7њkZr?dmT$ E (0E Ȕ+26G[,7k0Ol >-r8O8Xw K7c(]xƳ-~=P.P/F[=Oa-l)RyR&lH{ƙ[Izff>Q gl|75!dpB߹^}?^/Mr6{kyHV1 9'(-z2vqꯇO}Hh- 8z<R R{c"C,"Jyt?ɳw=%J1r"f 3狠!UA(Se R) ?=LL%l,| R1XYRi )$P,eyKfn$2+!!I7dΰS&x1,sHh0n #X"A^pe?V^|fU MA|Z&b"l)7#LH?81v}rm]0D,JY W)nsƝK#}i_DM!*w%[My8gYQe洠g[L`e5jC{k?}; ܺB*!"҉: ĊD /HzFa f[V '-wJJmy*.M[ e寲rKR؛0)Pqg{RI@P>R@"L%,B@z^.ig. h3vUȐeY'sLA1mݴ@a8= `^EĪlKG'UaZh2vUQ ]A>v@/oe#l F3y$ »%'&µ4c_7h94Z8Z3(FX㻢C(@qm „n'c,#`8{qիՓ8<)˒dAIf-7xvk?kD)%`Oc4`|qurgI{8+?mP}T_!e#eAaӦHcg}4HGC1=vutWU @ʃs.Xm/t~dݧ+F99*}dCj^l@O$Ֆ՛ӡQIK8T:3ZmCuupTj8TtͶ\3=}wwɰQَ X@P2踣 FEȝ6ޛ, Ym2C5~`OL_CDwEkZЌ>6 l<ľ>+?m%ɋ}Y fՙS=.d_: VFcK *`D(?d U QUR[!֓Y5ZCY«cxqF[#7ʔX1ʒhc^k Qcǻw] 0!P"&A eicNG v^25kOL|%P6~ÈZ+u !7m2kBwO ܔ4m5^%MsNƄN gB&Ru-{^PȄ#9zJg k>Z>$VPfwi.{ĪC ӬĪCi[U  VUk8T%V ىUb0FAty GPWUeL )]ePbV{I'TUwN*ƂZ4uxJsrJXuGAvRX5G!y)ƪdw RU{pRX5NO*6vf'TUw)FIcQ-cՠDl~;뷟b,DSKAIs 5VhM %A2|@d|k`i8rJ [cSKC&`'[wn$Bb6,!- 3p˿ܹ^];-&tWs8yYLL/qGJ@B <9Zc*P [ qu */Ԝ69Tq@E!J9y Bf2XⴵYMI$  >nPՔZP RrUڿZ$q-^Gn{/~'lls7WKVڴ4b gr,y> kXļ7I3T7al {8\ 3)ޭDlXPeIOCI>p,NSxl:M,V?[Á_cSIw8=ˢW9-kͧ 7ܝ>Ov:E߆٭cHF?޾"txfc#WP'ߎ3_x NzVd*&C+3'*~SRFjSL;JŢ]-3"*Vr,*F5)Eɧ5=xޥ1j'.FTdNz??^/khbgYڙ,!oLl~9[DBvŃ9|?d ݷ~0"rDTPͨ5̇|zvpIiLmnΔPΓq0D6]SilIvw?;۠iE{Ó"Vy;"Z H*%?FmuE!^1yr<-7}5t4!d[=Iaã$=>UeS? hhHe_hx* Odx3& }lmoK!y5 Ȩ1h>|}~aBq;Bx?vFi0:Yz=UyFz &t%ipZ)A[#*,W4|)U㠔+ciH.?H$(fҽ߬{kM%1xuH&PvT.Tj)$VDzh! LψQAaxp^QHVp %Qa |KF~A܍k8П`ʕ햛CU~Zu9B\/o~rWc_~k6f!ݎ FhV=;Zo~}F2Ly=ʬMi_nJSFndxc1и z4E$ I5\`cE֏Aͣ#L$"Q*[<蒼kUg%nHR21T6FغhċС7Qg{*^ӯ5Pt2=\kJxʃ*fƾ5MGZ)ƖeCw>O޾l9MZS;j=|4n >>&ʕK+W Z{U,Ws_3oºj/ #/Oӛ'6"g/^$}\뭖j}@г g9A"⥁Մ@++[w\p"H HԊgokWJRM筷GN{Ԣ5#yE+9^6DM#P=1HTJw(h){ KT'ot^˽ ieӶI$ HP?ώ$HmH9y;gXP6Qm7]z]^2?Z l-V}#V{qέ#;Sjp^{p InK&n+.(-m2[$yf3Ƈ jSP?mTf37NS{>Ԛ)f^ܬByt=Dl mk#ߊ{hMO#?c]]֏FpwWM/SX.USSƕÀ[w=RMC%L>D 뺸U%̫xc:Cg/-yyt@-siC-IW.dJ|d[SN9h=z3S_L5ϽLnې\DԳ}8i7DvkJi:G(`/ݚW$jmHW.+2|#WQTw>ok)8U|Cq︑>$9kUrC/h|Ta4ۇ9sؼwսZI ?{~JY\L %(떆%ֶ9l,JMEKN,Tj')G 04u)7DN"%rYf؞\sp$aD30 `M&w94$j¯V5Fz{1兗j+ ▨1]Yo$Gr+ļ#"3`6a :WpfZYk;ɩ"+jsdU_F^_5(GusSWD~=ZqDi~} A9=cwk8UyG#_FC=3إEդnXcئMc2wcJj(4u|E {3s"9;?-O1-5MCN0G;RNƙ_޼jChj<wHSLJX#)=7o]胛7Σ :]z H[?&[_ʣN W8Mǭu2AA9(}yp;?ܕ8Jm_ {N4!WR'CN)zs2F\4$9S$Ј02)guG6R;!4̂g:"wpNSOg:}*!o ɠ5Ӂo'IӈFTp6d6㇎n;ѭq&aʺ8wV!ڀwD)WYe~@6bz:k0<'elQВn% Oc} "=e]Mn#sæ@*lz'#ˉǞF8Cƛ|yn!-&{ƍY!M=j 3=k}(ztb.TXoĻ-0"Xh¦[݋r,ʣ8OY5mIC \(>5HJ7^?N2U-BX<λu hA".YHۨ’t[SXT#OrRg!pCMKj-Z 8tanX>f4Ǝyq`J =1z7A#&ORmN;btSCEd@ft!o)B qF7fRm5wnv[ ܭqncW[<[@$m|mlkZBa&}H5;4܈T^c%B`ϙ>l,|}[S7ϙtTv)Y 9mvwT_m~|KPտ>пKȿ`d!-U4)wHlB! ܯo~ol=/mo%wZcqwFIN]f利r=-)Ь1SXNS&ƺZUZ08mĨ Ǔ0 -yy39FT)[X$X`^ߢ}<$0n΀圃w EZI@~F)QeH|w!·-8P$/Sh_F! _[0L?e"C^8&%H@d&_N u,hHiOWrq٤M'w\W`AݭEwM~W4?h~ЄEMHhS&\eE-Kil]yr[ZkuS(usRE]ꇠw+\?nuj!EWdR(;OCݧAm:,v l}0' Ւ5 *\PֈZcNV"#{eAHW(CwbM@C0PX;u%ٚ$0 ¢4hdڒ!eS\#N:\ gGU yC*Z+h8/:t~4%qghDt+˧^Rey ut;1Ca?L: j,uw&J;)ݜѺv 7,SL,=>Ŵ 8gFS`}&QJŅ>oHD-v PCiڍ|ؽ߳׭G~1za>-A|8j| B,B'AH`4ӑu#h@MvNqO=".aUVE ,َ,iݥ&DbǤiK'I#1<2#>mp#>b7L6㢛iԁ2%ڀwDE|D+x-9z@hysqF7 Й0ڀwD)WLb:g3[y)/%r򶞏qrp0By @.`pL>FX1jcH#4*^?)Ew/\5wPJFڛ ݼkuu.^2^lC(+rZS$]3eYF:0u?purJ!3>"Fo.xn+Oh;.)>HC$JMiOVJ YvKhUo_>Vٮk}8ǏwU͏Vp^PUwq}Cs<CJ?O}3X@8'O&C RK~JM 3:x{+9eS NE-uMKrˬL]kTuX%iS)(IKʩ.@*G)]AKٱXS;l[}+^.'<(Q- G@1ov= J@<yf٨)QzL꛶\Pz(k =T>4ͳbۺ*~/f_}h×{qB"pDq;-7m lU8zH'RՌLcUE]` tWa%QT|j/`Ș)(^ԡKf0T LbJ[=m觫oG/k}IoWCe\/yջ`3EH(Ew^(v?jH ƸFs2o8zԅuO{+ j X7"Mp%.Z}x j[|O,s<= s<&,Td>? Xr#Dfu<ח{bD^|.0Ŕ./wiըz6& gCK 0+2=#C@nVd2u%sAD^P&DlmL^K\TAye*kͮȔL/oC2!.I’z:fN<+JTH#]LY((FBTYuVf3ao2_J5$v>(à'ɪRMGavZLjݱbV-J#nH2F^TO/ 4y3 aɍUQ֏r 0s5FȡSyxՆ7ڰd=z1OqGzZN=j;3*0bx~1 [ `T$x5OA5֭Gw~1z1QN5 Q8Nf,Y@^A+(Ò^$m_nW u}#U$im 4;ǥّ͙߰ԁR i(*`x'B \=*L%`Bbτ{H%Pn [@S,*s *3sK}Ӗhw69oVOn@4LRF[JuFX4RЦ\ gu\*/t^UY45KuW on  ZTS](+O۪|1c`߾vB؛>6v{(ɤӾQ؛Xgz,XgruCS:Jw,ޯ:0b1‡ݻ/{(vt}xWd/("+sa#>_HAy^:!ȏ )yG> QZE# \}#j9TKTS}HH>XxftҡH Q>Ss\;n# /AT8z_Uk٩j g-ɢ2Ef\AM(J BecBTTWĚC4fv9.sfd5H!6 Ɇ;,j%?jQg(/;#F\bEU]1.ŌZZ5zؐP?]OU?TBa;۴sOˏU]mo8+ 9v[&d efaH4c{۹,_Qݶl'A-Sŧ*zmu1bRO1iZԋvlƤЂYMuYCQ¸)4 IQYj=m'Fw !E͈ybPQ:FRe1C@X2&lAJM0aEA ͍)y 4A<b`d)bjLݸc7BVDs,ߤmߪp?4u TuFn?wۇ΄2tUͲ>iUY|C_A@ C?3oз,Wb__dsw6}٧?:!Rs-\w[u=H!k@1v77ؖءp\k&qBls[>{Q8>7qL Xv2Yg˳U˳Ez(6}pV^ -TiՉ33&ைRx[34A$`}yyQqôgQ%]f~R~(7j2|aD.ۋ)zU?/g( a k6m=' #ֻ=mկ!F7_BPjbV.o?Ͷ̥ L7ȶזw}qtyS^.g{./V޻MXpv[w!'04yb]Sf}# y&eSM>YnM»bc:n#&SyfOօq))&Lbc:nc.̻ŗ1zz.,䍛hMVۥ)#b1hv~3bQH252Q]6\MXuiO1;o߮Moo7K-\P>E?C8RDzH4 ^~P)@^`J\F2qLebutuUtJ"lZoJl~1y KtSi.fpVdzZgK|v]`>X镳+gWJViq?ˋ7VczRl}x<,UG{;OQ覱_m\φO:unvX}&37ݡk<;.ϯN^| UBKE1%|(?-뻦\pUcOW޿mEυR`wZjWꎥfQjRdҾ *RLf<*Q*QbqELX.'o1-u4ՌĒݶx lq4'E2's^t*SVR咗t~5MP1YmLFC/5-| _5E´Q5j <8gRCF0r5$]P3r6nB~Jܿ6ĥVk+98UϾ{*Buƶ!ٞ;j ʴ#7i啳+gWuo(fn&+Y\YN EiF,DF9ǔVk\Zub mľb-7Ĝ۶\jh8-7z3M$㤶sT4JUiTVPc,#(ML[E*4ǎB:f~t<2S`/=®ؑ"D%RS.XcpkZ& a,e HJ,+8B'MBs!BFX @b4M+DN|cTiRB_5!9y*Tc Y,3yq\qp3IO̫9_;߮5N~ylhFs ǙD JMgޣqTjTBF T9b@<) jʼew}c b0¼: 9~P}3lOq!QJ9 ]_[^|-@M 3SDG8 ZZZfQ댰(Tס F6xua!oDl{ލ= X |L'!mAjD }{3Oօq)6yϻIZѻbc:n#&=ZZ-ӻua!oD6Y} <1oՙO Gθg'H9\iUUW#wlq.Ū#Jv)Qn\2S7ʛ?LSsK '/f^|.\}`H~}wwTB/M=Qlho ~bR.~\er_.c@6=»iz_ua>u!;'T~\6OK)]Z&f ^R˚8x'vj9D:ښJ ¾J*9Ŝ . @wGe9voq ƜќLwfDL|MaKAZ$P*4bS*E!B?AVӰ/uLw֣PUz9#G$}ǯz;^SPCRW-knFӄDz'_@Fh4ķ@R9з4MB HB8Dۿ ɨ"sMawOQؠYJ%S)ɌJJ D*m.)&ʨ\dY%gFPWθa` ** )V gU "7Ðyb%az[m'qNˆPwRmJ\ %ͩb)Քg͍$UcVX?4+S6&P4MRD3j@_. 428W)7­D0AsLVfdȸVyV "C{n4e%RI_*n{!'4C&݈D*.Vl=WGI?ƍ%p=`¤ m@0/F=l6ueﵨe Y.Zu-S.iC6y pEt vϱ~{j={?ߧr3i}8Znv3sq:r+uN7MY,ˊ"-@)$0&Mx~{'(OQ찛_8?oq}N7D1:/b9/q38v91ls94MY˝u@L7$=[b @ U~'B4 i˳L .bhL ,m).skKk¤ysBS Y%;oqLJ.͆oHg|Uk,O W0fVAr\16E ”#@KHa׹rSMX^7S4'3|Q; w&> W^ لOS)ap09zY8}'nB|0N>|_BxIC hӻ0 rg0sb^߶,m* ׇ^7Mm>L[BZ$;{}{^_== {}F57ɾCxCw D`qah5mc@K"!BkzҬxҗ^uYQ Bt PyuvL)h?$|K9d 7KW DvW/ƺ&{RxOc1!}GxQd'i|{څq)䧽}fwtb eNk;}{7ޭ y&eSN>lz7SO8wtbF"$t⏤zz.,䍛hæMDJ5똳ogO 9م>w%;S`=0KmC\]8-컦:j^-|AkZ¾J0]%)JZ4+E1EXv#xl%댈s-OA;K'2LL8PH EST|<*vNu(\FŌ0?>9ZUa<~ (Qԑwu'DvQYQ 'OM)N;k<`KӞ0rItyӜD{\F=p 0&tʩ*mD JTm@DVZkv+rϹkTn}h%ښYRQ"AVZC S3ҝg;\ F,M3DI-rah-$?^Z),,&LpՐ/%I$en-ۃ8-{d{jwon}ޭξ^ˏ!g~ ~{{Wہ˿|v,HJuqYkX⚆}E${뼃Ԍ@>8`{-ilq(TrJ+I׈(=EC)R-}R_6|F鉣#,}'}R_6zAR}!s tԗMjg"JP KK}ٔ:)́ V[ eON15r [\)D\R'=vm dGTJOg(] aIpŴ_q**܅?+?}-"q *OǛ CGUTN?!5~|sNs^/gsZi&D*E?JlR`<8dS&+9Sb92;U K9 jW2m- J [f~XpA)%r90 eNYi)+mRѐۻX{[S̤4"ۺsm¼LhLD{7 `hn&"kgʎ[^Xj)45㺌L ^!j;#dkٗ6!{(ҰHF_JINp_ƥ3.&f֣&>:h&Wj8T-0Aj]O{ިQ썺}Q3jKϑϴО;;ݶn>gLr҇O WOb_Gm~H}DjC'~c=L`t=E0ڣfC l,W/lvu!σӠa6쪊X>Y./M&c_:ɹ[Vd_PJB=wTOg܏ZDQ̭,]ؕ5HJǼa[WiD@$02CIe0i *%g9k6c-UH93Ccl*.MuԁQXX+[+TRRg^+`^=m'Rq(|R)`j(}'R+g;Xʑ3~%5#Oz./I&R_6fYc|8Q*!$0@i .WSꊷ"n?~Y/z>.Z[Q+4L*.-:][? QpVZW{nCǫ~Azا|sHl\k,DBS偩&dYf{oSVU̫ ![B?z9G(KpdDgԇLI='odd"CƟ\cxvil<󌮱D=pPmAsd<5aG4Z#,UYkݸQASr2yF2I*\\sbMKKa^;Jf#r`hDQRR83P˨f\.\Piй R˕P ?p |r0&L h'CSd3SJ0T(-X6EYeZJL - :YQZ%i\dE(™A YoOΛ[*yQwn|W߭!&EI&kBFerEB8j*/B1qE8JiC2Z;s'dt6fiBNt /"ddN9%w9ia2E  .Q¡"+J.2ʺXldPuy &%;*$rTq<*A)U~"$> `w<}'n{dgR $(gVnB/~>LaLʙlTRo _m02ŰRnQVͬw~ -!7}|Z|~(/FVAzXgUa]9VH'TAc+`PȜ)1Zƍŝ?edJ*ՒV:৓b?su$|efvW닷o;O+=J69?#'a`-fR5F*yf>H!f?B#ՉVw\ҐDJma3aS\) [ #0&}D ۫OǛ˚'eCXQ鏫5"ݱvt%n-:s%It S۱GڴKgG'=`i;׋wHPFan!2u-_2J8 .&cRZ"Yf3c(7D)Te!GOy":(K#FAp5f;.g𖐐 ʦLRk\P2B \)hg3UQGcgk6ayD&,ͮsS֒10- viJș1F U(.fkX5Z1n&>](P)mʉ}ֺ "“Z`@@:l-q dl=qI@'J%ƏG#2֗Z\yC^l8<ݫwY S R=V5ZEq%h~3`4 whs F_AO?*<7*neuj4u&:1mm8ߟbf+hUGR4-Zg . 4nZE=U${QZUWs몘>[zʑ:~1b94(nk\WNL4Icݺ :]&1ݺ_En} C@O>C~ݨx16툫+V9tt~=2WL$ *nyU^DD %wmvbZ=n _~%lJ*V}t}m NKKeeG"Ux];Kxu{L-Z;eD>) 7dnN&+ 9<>q\ڒy 1'kҹ_:t)P߰Uc̋c;"T N7)-;Vs48>,q%(i;IiNb>k X49:E{" PzmQI+Їht0U( +% =),Xz\vgiS.ÿ)҅U ӕnᝒ}*v"D|_Gnqu=2lͲLJ2MuBbQHc0#eIfL[!0Um~!~]äh.fcW8 xc ri'δ6Dmt5jaֶڒ%CJD\?AsaHМN0rCJ]fCRЦQCV rz~ u[5 >PP?m1B :?}y+|r.Z>}p*׋JT?)$ u{ ~{ɿ'FoayvĊ'^/XjhX#ʲԌ%Z/{НegnZs30:\N*O E j,*MjRj+bRV%3|*NU w=+N NUzMڲnP=&m N??ë&mo/<\MDך-0S4S ܲnl C1pNgX>wvxruk!/9ŸRM=# jB!,cL7+u@( ,v@j si.eF\\ ÍSpm1Й/ 2nN!M±f䯠!ڨjUtRm~ yyd}k]F"f\R'Y\1pIMJT%Tgsǚ 2Q ŭIw9׏{QZ C!A:hsA@Dm{09%!Nv"O7Z(O҄DneKFhD4BF%Mt$1(rg4TBnAU7%24R< I+FDɔPT"nrH" 4zQ~Is=A&N>vG>rtnTbN'D^Zx Dx_,+'KxOQ<h(1Lxɧ(wSPDb*՛mٗlV2b/:"s4ul3 cY^eϊeћY4Z/oj%?H\wV= Ȗ{I u}мRfἯ?5@PN=FT5,z M#nlQj`v1kpe4ɖYOhGRXls+}tMZcd[}Z%i XOԒi?fst(j]IZ("#(І2Ay%@I1h#kSHW$jBi77S+E0A421ܚt5,Fp3E2mNe q"s45(&xZ'Z-#z´!Nvۂݶmh-{tg*'*JYXqrLfC8%JG¤@St"8ft2TJęrFp"$}-= ˜Q"AǥB3T/T8. FK١ռЪ 0B=_F_mo]!\VMl5~֜fcu \Pe?PODQ C>K{Q4 ">(D 0\2B4uDDTjJ !}vQhM›+%E[IDT B0Q\J驪qׅ`Ĭ'6on-`l24znJdp %a 8{\oarv02@H6:!zEh.?OI`D^2u7\mU:5 }%-˨y հ?L Sznjm!0̢ 赋Π@!Gքs,]~lE6+?]Jz%KdW1tsՏ])cgߏ7e{tuOh VH[mP~>اT(nu8XwrkqSHJY޳-W4+zvnoF7i3\d]3@@4 nP tZb·C-GT=20`v/ng_Ao?u}yNnRt֍7dz[.Fq7B?ܾ<ŷ/cSN+6AOD冺XX˚Faq~tZ*r}LZ \3 +-*,/Zb h6;:mݪznP'3|[ZWkh` he8h1 uB?ǺA2fC8Z6)ڜSFjism7Ȍ ;F ',8k_wLidB)ZC%@؆Ig@ p|`SITCQ~2pХ%t B|I[1`u;G0 ~Q)B*vvG,Gv*{&+ݡ^^]]Ưn"^-?;`*FzHێsrq/ a0d-uKm(TZJԒp.Pqu jZ` hV8e-`q!.PdRRd]U I959XGIԲEg'.I uQ]_:~V H'LG:8٫@pVI:ؑ ,|.&pW |ثJD;؟=0P9pKA:Qh†IWcP {.Jvr]hqό$FdJb-UDy"ylԨe43cBI*#A#THQNg:aTh0PGe3ubXbTfS 2D”2)c$q (,YL#-#fb!514 4J&3UT$#8<4Xn+$홍^H`6A#J0dʭQ14nMGR,?|׫Ͽ̓ fYxx<3:LhdbK͒q=iU@@#.<.'qNdo|`vrVj;:b;9Ja)r.K_8o ˝DPJk{ڣAq7Nµ[f~i){|9+-PA'8>2o?yWf=<r2]j1Ap D*S 8H!dwpKb_9==MF+'4JJX7*SPxa'^J,l,jx5}skD<+%ZÐyTQ}M5b/Q?[)Zb5 M*Oi CѝƧlPWS}M$q;m+EY)OR<ӲQO>m i݊F`Bi]ڵtUS`g=t#ܒGMkŪf{ҫ-J%hJCI `)KA*ٍ*e;KR!`hh3Rr`]F!Dڳ gCLo|[*!! ]~TyJ *\D\TBԭ_ja ŞoD }v6K*,;,&UK$H!KRttSDFBmat*Jwf[Rd(zd V`6%4J@yRD+  TD QA  J 8'5L˒"D`@)Nݗe!b *z۔J9׈,ZWwԪTit^U[`!uumEe6EXG''dҒq2g;Qt ^[#’HXmS:c@T`l-w)so0xr,`:EJBm$%{2N;jQV- ۋ@1` ƣ J :V[Sudc#}:l{pD^u}w>˸yX>>/ˋo>ɿR5.C /_/>3|VF/C!Ȏ2tѷccgrKO L "ʐ7R 6퇌#y"WbPrNGQZi3;+?ZM-@J Q`h< (ro<8GmS%GBԖ`Pw1Bp {6R覼,JQ5h TvQxff:_j<? T?tJCcH@`)DSғ}HXyꥣe8:aO]kQ#zO:\2ӓ6޻;-^;jz=.#}({HEL5Hٿs.jjҽ=#T1E{ws驢F܅(J.|Cp4>uU^zpq9Q?|>/,ֿHFGy`2i׿1j6ۂשKڞ)G~$#nY~X=8"7y.YL{L~lV=UvLM4ɦ~9*~#ƻMwy±[zC ӻ尐wnI6ea 說:wKtRݦ"4*=wK`zMt˦iSTBYi3BΚBsްZ] V!<qH qcqm_f* jq{wpYR?B"?63K jb;cg/52 /UF%Abz;(e;;J-b&QJF Jmjnl?:;΀x&31 -'mNjJm~]{Ro UWͿ`Omcܔ}db/P`=FT>~E}F̪Oi)aDkofq:$:C.n?0_{"1bQCNB[~jd}ExVWv_L,x^LuԶⷯ6f:KZEtr4o\>:ƑΫ-L~\Dʨ6 xAI9rcԝՓK=Zr7<1J@.xސr˿){2Kd,t*! @e!#JZ[ 6N;]VJP!Zҭi"T)pTKuu[Jov{!Sn`BHcΡ 3] h͂DժICA+wSҾX@ɛdGmK2+J(3|+={+aL-?ED>v;KZ}9BbD==&w!R]QUSo=Sgy_ubvar/home/core/zuul-output/logs/kubelet.log0000644000000000000000006427073515134361753017723 0ustar rootrootJan 22 05:45:51 crc systemd[1]: Starting Kubernetes Kubelet... Jan 22 05:45:51 crc restorecon[4760]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:51 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:45:52 crc restorecon[4760]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:45:52 crc restorecon[4760]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Jan 22 05:45:52 crc kubenswrapper[4933]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 22 05:45:52 crc kubenswrapper[4933]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 22 05:45:52 crc kubenswrapper[4933]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 22 05:45:52 crc kubenswrapper[4933]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 22 05:45:52 crc kubenswrapper[4933]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Jan 22 05:45:52 crc kubenswrapper[4933]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.304997 4933 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309497 4933 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309527 4933 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309533 4933 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309538 4933 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309542 4933 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309548 4933 feature_gate.go:330] unrecognized feature gate: Example Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309553 4933 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309558 4933 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309564 4933 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309570 4933 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309578 4933 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309586 4933 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309593 4933 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309598 4933 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309604 4933 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309610 4933 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309615 4933 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309621 4933 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309626 4933 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309631 4933 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309637 4933 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309642 4933 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309646 4933 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309650 4933 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309655 4933 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309659 4933 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309664 4933 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309669 4933 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309674 4933 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309678 4933 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309683 4933 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309687 4933 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309692 4933 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309696 4933 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309700 4933 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309705 4933 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309710 4933 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309715 4933 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309720 4933 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309726 4933 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309731 4933 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309736 4933 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309750 4933 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309756 4933 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309761 4933 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309766 4933 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309771 4933 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309775 4933 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309780 4933 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309784 4933 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309788 4933 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309793 4933 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309797 4933 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309802 4933 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309806 4933 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309810 4933 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309815 4933 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309819 4933 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309823 4933 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309827 4933 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309832 4933 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309836 4933 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309843 4933 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309849 4933 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309854 4933 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309861 4933 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309867 4933 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309872 4933 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309878 4933 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309884 4933 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.309889 4933 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.309983 4933 flags.go:64] FLAG: --address="0.0.0.0" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.309997 4933 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310006 4933 flags.go:64] FLAG: --anonymous-auth="true" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310013 4933 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310020 4933 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310026 4933 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310033 4933 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310039 4933 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310044 4933 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310049 4933 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310055 4933 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310061 4933 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310066 4933 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310089 4933 flags.go:64] FLAG: --cgroup-root="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310095 4933 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310101 4933 flags.go:64] FLAG: --client-ca-file="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310106 4933 flags.go:64] FLAG: --cloud-config="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310111 4933 flags.go:64] FLAG: --cloud-provider="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310116 4933 flags.go:64] FLAG: --cluster-dns="[]" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310122 4933 flags.go:64] FLAG: --cluster-domain="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310127 4933 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310132 4933 flags.go:64] FLAG: --config-dir="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310137 4933 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310143 4933 flags.go:64] FLAG: --container-log-max-files="5" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310150 4933 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310155 4933 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310161 4933 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310167 4933 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310172 4933 flags.go:64] FLAG: --contention-profiling="false" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310178 4933 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310183 4933 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310189 4933 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310194 4933 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310202 4933 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310207 4933 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310212 4933 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310218 4933 flags.go:64] FLAG: --enable-load-reader="false" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310225 4933 flags.go:64] FLAG: --enable-server="true" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310230 4933 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310238 4933 flags.go:64] FLAG: --event-burst="100" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310243 4933 flags.go:64] FLAG: --event-qps="50" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310248 4933 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310253 4933 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310258 4933 flags.go:64] FLAG: --eviction-hard="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310265 4933 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310270 4933 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310275 4933 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310280 4933 flags.go:64] FLAG: --eviction-soft="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310285 4933 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310290 4933 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310295 4933 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310300 4933 flags.go:64] FLAG: --experimental-mounter-path="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310305 4933 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310310 4933 flags.go:64] FLAG: --fail-swap-on="true" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310315 4933 flags.go:64] FLAG: --feature-gates="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310321 4933 flags.go:64] FLAG: --file-check-frequency="20s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310327 4933 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310332 4933 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310338 4933 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310343 4933 flags.go:64] FLAG: --healthz-port="10248" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310348 4933 flags.go:64] FLAG: --help="false" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310353 4933 flags.go:64] FLAG: --hostname-override="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310358 4933 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310363 4933 flags.go:64] FLAG: --http-check-frequency="20s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310371 4933 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310376 4933 flags.go:64] FLAG: --image-credential-provider-config="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310382 4933 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310387 4933 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310392 4933 flags.go:64] FLAG: --image-service-endpoint="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310398 4933 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310403 4933 flags.go:64] FLAG: --kube-api-burst="100" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310408 4933 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310414 4933 flags.go:64] FLAG: --kube-api-qps="50" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310420 4933 flags.go:64] FLAG: --kube-reserved="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310425 4933 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310431 4933 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310437 4933 flags.go:64] FLAG: --kubelet-cgroups="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310442 4933 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310447 4933 flags.go:64] FLAG: --lock-file="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310452 4933 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310457 4933 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310462 4933 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310470 4933 flags.go:64] FLAG: --log-json-split-stream="false" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310476 4933 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310481 4933 flags.go:64] FLAG: --log-text-split-stream="false" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310487 4933 flags.go:64] FLAG: --logging-format="text" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310492 4933 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310498 4933 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310503 4933 flags.go:64] FLAG: --manifest-url="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310508 4933 flags.go:64] FLAG: --manifest-url-header="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310515 4933 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310521 4933 flags.go:64] FLAG: --max-open-files="1000000" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310528 4933 flags.go:64] FLAG: --max-pods="110" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310533 4933 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310538 4933 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310543 4933 flags.go:64] FLAG: --memory-manager-policy="None" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310549 4933 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310554 4933 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310559 4933 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310564 4933 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310575 4933 flags.go:64] FLAG: --node-status-max-images="50" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310581 4933 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310586 4933 flags.go:64] FLAG: --oom-score-adj="-999" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310591 4933 flags.go:64] FLAG: --pod-cidr="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310596 4933 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310603 4933 flags.go:64] FLAG: --pod-manifest-path="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310608 4933 flags.go:64] FLAG: --pod-max-pids="-1" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310613 4933 flags.go:64] FLAG: --pods-per-core="0" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310618 4933 flags.go:64] FLAG: --port="10250" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310624 4933 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310629 4933 flags.go:64] FLAG: --provider-id="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310634 4933 flags.go:64] FLAG: --qos-reserved="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310639 4933 flags.go:64] FLAG: --read-only-port="10255" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310644 4933 flags.go:64] FLAG: --register-node="true" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310649 4933 flags.go:64] FLAG: --register-schedulable="true" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310653 4933 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310661 4933 flags.go:64] FLAG: --registry-burst="10" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310665 4933 flags.go:64] FLAG: --registry-qps="5" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310674 4933 flags.go:64] FLAG: --reserved-cpus="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310678 4933 flags.go:64] FLAG: --reserved-memory="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310684 4933 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310688 4933 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310692 4933 flags.go:64] FLAG: --rotate-certificates="false" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310696 4933 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310700 4933 flags.go:64] FLAG: --runonce="false" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310704 4933 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310708 4933 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310713 4933 flags.go:64] FLAG: --seccomp-default="false" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310717 4933 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310721 4933 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310725 4933 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310730 4933 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310734 4933 flags.go:64] FLAG: --storage-driver-password="root" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310738 4933 flags.go:64] FLAG: --storage-driver-secure="false" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310742 4933 flags.go:64] FLAG: --storage-driver-table="stats" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310746 4933 flags.go:64] FLAG: --storage-driver-user="root" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310750 4933 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310755 4933 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310760 4933 flags.go:64] FLAG: --system-cgroups="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310765 4933 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310773 4933 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310784 4933 flags.go:64] FLAG: --tls-cert-file="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310789 4933 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310795 4933 flags.go:64] FLAG: --tls-min-version="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310800 4933 flags.go:64] FLAG: --tls-private-key-file="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310812 4933 flags.go:64] FLAG: --topology-manager-policy="none" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310818 4933 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310823 4933 flags.go:64] FLAG: --topology-manager-scope="container" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310828 4933 flags.go:64] FLAG: --v="2" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310836 4933 flags.go:64] FLAG: --version="false" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310845 4933 flags.go:64] FLAG: --vmodule="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310852 4933 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.310858 4933 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311004 4933 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311010 4933 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311015 4933 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311019 4933 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311024 4933 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311049 4933 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311054 4933 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311058 4933 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311062 4933 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311066 4933 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311085 4933 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311089 4933 feature_gate.go:330] unrecognized feature gate: Example Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311093 4933 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311097 4933 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311100 4933 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311104 4933 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311107 4933 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311111 4933 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311114 4933 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311118 4933 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311121 4933 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311125 4933 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311128 4933 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311131 4933 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311135 4933 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311138 4933 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311142 4933 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311145 4933 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311158 4933 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311164 4933 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311167 4933 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311171 4933 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311174 4933 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311178 4933 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311181 4933 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311184 4933 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311188 4933 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311191 4933 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311195 4933 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311199 4933 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311202 4933 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311206 4933 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311209 4933 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311213 4933 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311216 4933 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311220 4933 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311223 4933 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311227 4933 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311230 4933 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311235 4933 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311239 4933 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311243 4933 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311247 4933 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311251 4933 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311255 4933 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311259 4933 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311263 4933 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311267 4933 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311270 4933 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311274 4933 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311278 4933 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311283 4933 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311287 4933 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311290 4933 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311300 4933 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311305 4933 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311309 4933 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311314 4933 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311319 4933 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311323 4933 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.311327 4933 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.311346 4933 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.323812 4933 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.323859 4933 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.323992 4933 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324004 4933 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324014 4933 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324023 4933 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324031 4933 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324038 4933 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324046 4933 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324054 4933 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324062 4933 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324070 4933 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324111 4933 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324125 4933 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324134 4933 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324143 4933 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324152 4933 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324162 4933 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324172 4933 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324181 4933 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324189 4933 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324198 4933 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324206 4933 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324216 4933 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324224 4933 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324233 4933 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324241 4933 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324249 4933 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324257 4933 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324265 4933 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324273 4933 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324280 4933 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324288 4933 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324297 4933 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324316 4933 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324324 4933 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324331 4933 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324339 4933 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324347 4933 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324355 4933 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324362 4933 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324370 4933 feature_gate.go:330] unrecognized feature gate: Example Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324378 4933 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324385 4933 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324393 4933 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324400 4933 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324408 4933 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324416 4933 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324423 4933 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324432 4933 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324439 4933 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324447 4933 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324455 4933 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324462 4933 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324470 4933 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324477 4933 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324485 4933 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324493 4933 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324500 4933 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324511 4933 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324521 4933 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324529 4933 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324537 4933 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324545 4933 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324552 4933 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324560 4933 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324568 4933 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324576 4933 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324586 4933 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324596 4933 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324605 4933 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324613 4933 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324622 4933 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.324635 4933 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324854 4933 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324868 4933 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324878 4933 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324887 4933 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324897 4933 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324906 4933 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324914 4933 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324922 4933 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324930 4933 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324938 4933 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324946 4933 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324954 4933 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324961 4933 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324969 4933 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324977 4933 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324985 4933 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.324992 4933 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325000 4933 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325012 4933 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325023 4933 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325032 4933 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325040 4933 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325049 4933 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325057 4933 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325066 4933 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325096 4933 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325104 4933 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325112 4933 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325120 4933 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325129 4933 feature_gate.go:330] unrecognized feature gate: Example Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325137 4933 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325145 4933 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325152 4933 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325160 4933 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325168 4933 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325176 4933 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325183 4933 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325193 4933 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325201 4933 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325210 4933 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325220 4933 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325230 4933 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325243 4933 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325257 4933 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325279 4933 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325290 4933 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325304 4933 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325316 4933 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325326 4933 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325338 4933 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325349 4933 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325360 4933 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325372 4933 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325382 4933 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325392 4933 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325402 4933 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325413 4933 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325426 4933 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325436 4933 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325445 4933 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325456 4933 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325466 4933 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325475 4933 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325485 4933 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325495 4933 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325506 4933 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325516 4933 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325527 4933 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325538 4933 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325546 4933 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.325555 4933 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.325568 4933 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.325844 4933 server.go:940] "Client rotation is on, will bootstrap in background" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.330279 4933 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.330415 4933 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.331368 4933 server.go:997] "Starting client certificate rotation" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.331406 4933 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.331823 4933 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-07 14:58:16.215457343 +0000 UTC Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.331928 4933 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.339670 4933 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 22 05:45:52 crc kubenswrapper[4933]: E0122 05:45:52.341737 4933 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.163:6443: connect: connection refused" logger="UnhandledError" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.342778 4933 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.353632 4933 log.go:25] "Validated CRI v1 runtime API" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.375698 4933 log.go:25] "Validated CRI v1 image API" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.377684 4933 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.380233 4933 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-01-22-05-41-20-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.380285 4933 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.411425 4933 manager.go:217] Machine: {Timestamp:2026-01-22 05:45:52.409022339 +0000 UTC m=+0.246147782 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:d621d350-6f7c-490e-a47d-b396db235280 BootID:c8062a48-506a-465a-9977-93e8530bae49 Filesystems:[{Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:90:7d:c8 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:90:7d:c8 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:1e:fd:14 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:75:c8:73 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:11:84:ef Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:f4:83:2f Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:5a:59:f9 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:ba:fc:f6:0b:40:ed Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:1a:3a:08:fc:fd:be Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.411823 4933 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.412034 4933 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.412569 4933 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.412852 4933 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.412910 4933 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.413258 4933 topology_manager.go:138] "Creating topology manager with none policy" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.413275 4933 container_manager_linux.go:303] "Creating device plugin manager" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.413598 4933 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.413647 4933 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.413915 4933 state_mem.go:36] "Initialized new in-memory state store" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.414048 4933 server.go:1245] "Using root directory" path="/var/lib/kubelet" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.415297 4933 kubelet.go:418] "Attempting to sync node with API server" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.415342 4933 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.415389 4933 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.415415 4933 kubelet.go:324] "Adding apiserver pod source" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.415444 4933 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.416691 4933 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.163:6443: connect: connection refused Jan 22 05:45:52 crc kubenswrapper[4933]: E0122 05:45:52.416767 4933 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.163:6443: connect: connection refused" logger="UnhandledError" Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.416786 4933 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.163:6443: connect: connection refused Jan 22 05:45:52 crc kubenswrapper[4933]: E0122 05:45:52.416863 4933 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.163:6443: connect: connection refused" logger="UnhandledError" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.418391 4933 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.418917 4933 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.419597 4933 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.420105 4933 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.420128 4933 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.420135 4933 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.420141 4933 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.420152 4933 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.420160 4933 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.420166 4933 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.420177 4933 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.420185 4933 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.420193 4933 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.420221 4933 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.420228 4933 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.420401 4933 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.420800 4933 server.go:1280] "Started kubelet" Jan 22 05:45:52 crc systemd[1]: Started Kubernetes Kubelet. Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.422170 4933 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.423431 4933 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.163:6443: connect: connection refused Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.422219 4933 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 22 05:45:52 crc kubenswrapper[4933]: E0122 05:45:52.424744 4933 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.163:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188cf761a6201284 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-22 05:45:52.420770436 +0000 UTC m=+0.257895789,LastTimestamp:2026-01-22 05:45:52.420770436 +0000 UTC m=+0.257895789,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.425577 4933 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.426313 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.426354 4933 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.426519 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 14:15:13.009890508 +0000 UTC Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.426787 4933 volume_manager.go:287] "The desired_state_of_world populator starts" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.426824 4933 volume_manager.go:289] "Starting Kubelet Volume Manager" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.427022 4933 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.427872 4933 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.163:6443: connect: connection refused Jan 22 05:45:52 crc kubenswrapper[4933]: E0122 05:45:52.427995 4933 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.163:6443: connect: connection refused" logger="UnhandledError" Jan 22 05:45:52 crc kubenswrapper[4933]: E0122 05:45:52.427365 4933 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.163:6443: connect: connection refused" interval="200ms" Jan 22 05:45:52 crc kubenswrapper[4933]: E0122 05:45:52.426742 4933 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.429008 4933 factory.go:55] Registering systemd factory Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.429050 4933 factory.go:221] Registration of the systemd container factory successfully Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.433334 4933 factory.go:153] Registering CRI-O factory Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.434014 4933 factory.go:221] Registration of the crio container factory successfully Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.434203 4933 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.434251 4933 factory.go:103] Registering Raw factory Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.434282 4933 manager.go:1196] Started watching for new ooms in manager Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.435811 4933 server.go:460] "Adding debug handlers to kubelet server" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.436053 4933 manager.go:319] Starting recovery of all containers Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.440731 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.440951 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.441011 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.441068 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.441185 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.441254 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.441328 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.441393 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.441476 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.441539 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.441594 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.441652 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.441708 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.441798 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.441882 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.441940 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.441994 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.442052 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.442132 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.442188 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.442290 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.442399 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.442459 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.442513 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.442567 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.442619 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.442675 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.442735 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.442787 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.442882 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.442953 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.443021 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.443131 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.443222 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.443286 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.443364 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.443446 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.443533 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.443617 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.443700 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.443779 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.443857 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.443941 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.444026 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.444129 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.444219 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.444313 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.444394 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.444485 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.444563 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.444642 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.444740 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.444861 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.444960 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.445061 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.445215 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.445308 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.445406 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.445507 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.445623 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.445714 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.445805 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.445897 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.445998 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.446230 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.446356 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.446459 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.446571 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.446679 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.446782 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.446918 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.447029 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.447160 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.447271 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.447397 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.447511 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.447618 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.447734 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.447838 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.447956 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.448096 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.448208 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.448317 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.448422 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.448533 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.448637 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.448760 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.448865 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.448976 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.449162 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.449286 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.449401 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.449502 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.449601 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.449697 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.449802 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.449916 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.450017 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.450150 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.450251 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.450345 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.450437 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.450608 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.450709 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.450815 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.450952 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.451063 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.451210 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.451325 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.451439 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.451552 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.451676 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.451798 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.451910 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.452022 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.452158 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.452268 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.452394 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.452506 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.452613 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.452719 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.452833 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.452940 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.453057 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.453192 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.453304 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.453415 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.453536 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.453649 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.453757 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.453863 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.453959 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.454055 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.454211 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.454303 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.454384 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.454462 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.454537 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.454615 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.454702 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.454795 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.454624 4933 manager.go:324] Recovery completed Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.454909 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455019 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455040 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455054 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455065 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455114 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455128 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455141 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455152 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455182 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455193 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455206 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455219 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455235 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455268 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455281 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455293 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455306 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455318 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455344 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.455358 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456224 4933 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456278 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456296 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456309 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456343 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456356 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456368 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456381 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456409 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456422 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456434 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456446 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456459 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456471 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456499 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456511 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456524 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456536 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456548 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456577 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456591 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456603 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456614 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456628 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456657 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456669 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456681 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456692 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456705 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456737 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456750 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456762 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456773 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456785 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456814 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456825 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456838 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456849 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456862 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456891 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456902 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456914 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456926 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456941 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456970 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456983 4933 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.456993 4933 reconstruct.go:97] "Volume reconstruction finished" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.457000 4933 reconciler.go:26] "Reconciler: start to sync state" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.466730 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.468480 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.468517 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.468529 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.469410 4933 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.469437 4933 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.469464 4933 state_mem.go:36] "Initialized new in-memory state store" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.479313 4933 policy_none.go:49] "None policy: Start" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.480202 4933 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.480235 4933 state_mem.go:35] "Initializing new in-memory state store" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.486147 4933 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.489370 4933 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.489435 4933 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.489480 4933 kubelet.go:2335] "Starting kubelet main sync loop" Jan 22 05:45:52 crc kubenswrapper[4933]: E0122 05:45:52.489562 4933 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.492448 4933 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.163:6443: connect: connection refused Jan 22 05:45:52 crc kubenswrapper[4933]: E0122 05:45:52.492569 4933 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.163:6443: connect: connection refused" logger="UnhandledError" Jan 22 05:45:52 crc kubenswrapper[4933]: E0122 05:45:52.529234 4933 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.533970 4933 manager.go:334] "Starting Device Plugin manager" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.534047 4933 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.534101 4933 server.go:79] "Starting device plugin registration server" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.534609 4933 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.534630 4933 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.534830 4933 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.534933 4933 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.534942 4933 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 22 05:45:52 crc kubenswrapper[4933]: E0122 05:45:52.543738 4933 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.590005 4933 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.590179 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.592152 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.592201 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.592218 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.592422 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.593001 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.593164 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.593493 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.593536 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.593554 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.593684 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.593896 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.593976 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.594891 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.594949 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.594975 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.595133 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.595189 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.595217 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.595264 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.595455 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.595516 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.595457 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.595592 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.595604 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.596625 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.596677 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.596703 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.596741 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.596766 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.596782 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.596947 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.596959 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.596981 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.598251 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.598312 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.598334 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.598275 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.598404 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.598424 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.598675 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.598733 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.600127 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.600184 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.600197 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:52 crc kubenswrapper[4933]: E0122 05:45:52.629516 4933 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.163:6443: connect: connection refused" interval="400ms" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.635778 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.637463 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.637517 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.637536 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.637569 4933 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 05:45:52 crc kubenswrapper[4933]: E0122 05:45:52.638223 4933 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.163:6443: connect: connection refused" node="crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.659617 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.659654 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.659702 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.659722 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.659748 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.659879 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.659929 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.659966 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.659997 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.660038 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.660068 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.660145 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.660176 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.660249 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.660309 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761169 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761264 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761309 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761372 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761385 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761429 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761462 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761488 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761494 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761529 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761539 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761608 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761657 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761562 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761489 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761636 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761524 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761630 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761812 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761904 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761934 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.761948 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.762028 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.762038 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.762150 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.762155 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.762237 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.762329 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.762343 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.762451 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.839153 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.845858 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.845938 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.845957 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.845997 4933 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 05:45:52 crc kubenswrapper[4933]: E0122 05:45:52.846771 4933 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.163:6443: connect: connection refused" node="crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.926841 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.957394 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.979764 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:45:52 crc kubenswrapper[4933]: W0122 05:45:52.985506 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-d6efd21e5e8f1d40de0be212109b14b0500921a849c15c13176cb20fb68c4975 WatchSource:0}: Error finding container d6efd21e5e8f1d40de0be212109b14b0500921a849c15c13176cb20fb68c4975: Status 404 returned error can't find the container with id d6efd21e5e8f1d40de0be212109b14b0500921a849c15c13176cb20fb68c4975 Jan 22 05:45:52 crc kubenswrapper[4933]: I0122 05:45:52.994286 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:45:53 crc kubenswrapper[4933]: W0122 05:45:53.002208 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-11ab01564ff3d8b845b7a4d54c1b0ad3655507bc62b2bf5ba152dbf5e9ed815c WatchSource:0}: Error finding container 11ab01564ff3d8b845b7a4d54c1b0ad3655507bc62b2bf5ba152dbf5e9ed815c: Status 404 returned error can't find the container with id 11ab01564ff3d8b845b7a4d54c1b0ad3655507bc62b2bf5ba152dbf5e9ed815c Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.003137 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 05:45:53 crc kubenswrapper[4933]: W0122 05:45:53.006782 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-cf9316c7399230e06e436b902d5d98f3b38812722a580084a89d33916709a69a WatchSource:0}: Error finding container cf9316c7399230e06e436b902d5d98f3b38812722a580084a89d33916709a69a: Status 404 returned error can't find the container with id cf9316c7399230e06e436b902d5d98f3b38812722a580084a89d33916709a69a Jan 22 05:45:53 crc kubenswrapper[4933]: W0122 05:45:53.019277 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-afd09104a75766405e447a23569553f0421ca778d723411dde48b4151b389c3f WatchSource:0}: Error finding container afd09104a75766405e447a23569553f0421ca778d723411dde48b4151b389c3f: Status 404 returned error can't find the container with id afd09104a75766405e447a23569553f0421ca778d723411dde48b4151b389c3f Jan 22 05:45:53 crc kubenswrapper[4933]: E0122 05:45:53.031112 4933 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.163:6443: connect: connection refused" interval="800ms" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.247421 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.248534 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.248577 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.248587 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.248613 4933 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 05:45:53 crc kubenswrapper[4933]: E0122 05:45:53.249097 4933 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.163:6443: connect: connection refused" node="crc" Jan 22 05:45:53 crc kubenswrapper[4933]: W0122 05:45:53.270113 4933 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.163:6443: connect: connection refused Jan 22 05:45:53 crc kubenswrapper[4933]: E0122 05:45:53.270192 4933 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.163:6443: connect: connection refused" logger="UnhandledError" Jan 22 05:45:53 crc kubenswrapper[4933]: W0122 05:45:53.345332 4933 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.163:6443: connect: connection refused Jan 22 05:45:53 crc kubenswrapper[4933]: E0122 05:45:53.345402 4933 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.163:6443: connect: connection refused" logger="UnhandledError" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.424167 4933 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.163:6443: connect: connection refused Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.427277 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 22:54:02.599818035 +0000 UTC Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.495679 4933 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="08156c9622b99efbd889b625313f0f379f47a773fad5948ac981abe484c7c0f4" exitCode=0 Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.495709 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"08156c9622b99efbd889b625313f0f379f47a773fad5948ac981abe484c7c0f4"} Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.495821 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e584f34ffc5a9a182e33862dab04f8afff0b9ff425cc602cdf34d7f771e6fecc"} Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.496050 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.496884 4933 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="053a5f03773a50e6add05b0c1d8de76035b560baaa24f51d573ca1f8305a1048" exitCode=0 Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.496947 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"053a5f03773a50e6add05b0c1d8de76035b560baaa24f51d573ca1f8305a1048"} Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.496966 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"afd09104a75766405e447a23569553f0421ca778d723411dde48b4151b389c3f"} Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.497033 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.498278 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.498344 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.498362 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.498985 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.499018 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.499028 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.500593 4933 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="171c23458dabebaa0ddea08c10f662edcab7797ef6dffaab101c75d6a0737dff" exitCode=0 Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.500682 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"171c23458dabebaa0ddea08c10f662edcab7797ef6dffaab101c75d6a0737dff"} Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.500740 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"cf9316c7399230e06e436b902d5d98f3b38812722a580084a89d33916709a69a"} Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.500866 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.502393 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.502421 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.502415 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731"} Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.502438 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.502456 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"11ab01564ff3d8b845b7a4d54c1b0ad3655507bc62b2bf5ba152dbf5e9ed815c"} Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.503937 4933 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d" exitCode=0 Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.503983 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d"} Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.504011 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d6efd21e5e8f1d40de0be212109b14b0500921a849c15c13176cb20fb68c4975"} Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.504152 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.504898 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.504924 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.504934 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.506633 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.510841 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.510887 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:53 crc kubenswrapper[4933]: I0122 05:45:53.510899 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:53 crc kubenswrapper[4933]: W0122 05:45:53.746670 4933 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.163:6443: connect: connection refused Jan 22 05:45:53 crc kubenswrapper[4933]: E0122 05:45:53.746757 4933 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.163:6443: connect: connection refused" logger="UnhandledError" Jan 22 05:45:53 crc kubenswrapper[4933]: W0122 05:45:53.759859 4933 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.163:6443: connect: connection refused Jan 22 05:45:53 crc kubenswrapper[4933]: E0122 05:45:53.759939 4933 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.163:6443: connect: connection refused" logger="UnhandledError" Jan 22 05:45:53 crc kubenswrapper[4933]: E0122 05:45:53.832683 4933 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.163:6443: connect: connection refused" interval="1.6s" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.049431 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.051503 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.051546 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.051557 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.051582 4933 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.351462 4933 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.428287 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 19:28:23.578668752 +0000 UTC Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.507705 4933 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="451b9933a3e62cdb17e1a005e85f7468428be6fe4794e85bf504fe4babba4d76" exitCode=0 Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.507755 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"451b9933a3e62cdb17e1a005e85f7468428be6fe4794e85bf504fe4babba4d76"} Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.507968 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.508822 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.508840 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.508849 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.509738 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"2a7388e9b8023ce1ca28ede29592438fb4cd2f85800d46b027cb5b4e8c100ebd"} Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.510093 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.520299 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.520334 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.520345 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.521942 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"aa57c9c8f2cbba4f1282222cf6f3a29fd59efe4a378222e613eaeb32f0b71877"} Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.521978 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"9aa4ed2d41484a7f47e481ae23e10c3a264edfdaddeb4c8f6ffd946694c88481"} Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.521991 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"8018d50ee76b7bed70197f4085cb2157cbad29ecbb0f9c3ea6cb4ed0621877ec"} Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.522089 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.522918 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.522944 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.522953 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.525039 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61"} Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.525086 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54"} Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.525099 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873"} Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.525431 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.526262 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.526290 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.526301 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.527269 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28"} Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.527288 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9"} Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.527297 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164"} Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.527304 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336"} Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.527313 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb"} Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.527375 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.527941 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.527995 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:54 crc kubenswrapper[4933]: I0122 05:45:54.528012 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:55 crc kubenswrapper[4933]: I0122 05:45:55.428435 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 12:50:33.864044806 +0000 UTC Jan 22 05:45:55 crc kubenswrapper[4933]: I0122 05:45:55.532556 4933 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="26018c608fced436269a1857fe893c9210881ae39cdcc48c6534e24079fa6507" exitCode=0 Jan 22 05:45:55 crc kubenswrapper[4933]: I0122 05:45:55.532665 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:55 crc kubenswrapper[4933]: I0122 05:45:55.532685 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"26018c608fced436269a1857fe893c9210881ae39cdcc48c6534e24079fa6507"} Jan 22 05:45:55 crc kubenswrapper[4933]: I0122 05:45:55.532965 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:55 crc kubenswrapper[4933]: I0122 05:45:55.533889 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:55 crc kubenswrapper[4933]: I0122 05:45:55.533919 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:55 crc kubenswrapper[4933]: I0122 05:45:55.533928 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:55 crc kubenswrapper[4933]: I0122 05:45:55.534780 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:55 crc kubenswrapper[4933]: I0122 05:45:55.534808 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:55 crc kubenswrapper[4933]: I0122 05:45:55.534819 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:56 crc kubenswrapper[4933]: I0122 05:45:56.429530 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 00:01:26.843914395 +0000 UTC Jan 22 05:45:56 crc kubenswrapper[4933]: I0122 05:45:56.538460 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"db47dea8321ccb957b59de56172e2d7e36d4f3795f30adceaa5605918abf6c02"} Jan 22 05:45:56 crc kubenswrapper[4933]: I0122 05:45:56.538510 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f5e7e3c7ae4efd23a9e3d50b71099bfb496356e403987b9025b04ae54f860f76"} Jan 22 05:45:56 crc kubenswrapper[4933]: I0122 05:45:56.538523 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"91d6c19be8c8d868bd689e496bd60b78576f74f2069a629c04d40002892f931e"} Jan 22 05:45:56 crc kubenswrapper[4933]: I0122 05:45:56.538534 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"42c1ecae3ef4a37dc0fcfeefe8dd047e5a11b8e24bd18b3fa3f2a5e66a3a1043"} Jan 22 05:45:56 crc kubenswrapper[4933]: I0122 05:45:56.895117 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:45:56 crc kubenswrapper[4933]: I0122 05:45:56.895352 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:56 crc kubenswrapper[4933]: I0122 05:45:56.897369 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:56 crc kubenswrapper[4933]: I0122 05:45:56.897441 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:56 crc kubenswrapper[4933]: I0122 05:45:56.897466 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:57 crc kubenswrapper[4933]: I0122 05:45:57.060396 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:45:57 crc kubenswrapper[4933]: I0122 05:45:57.060642 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:57 crc kubenswrapper[4933]: I0122 05:45:57.062069 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:57 crc kubenswrapper[4933]: I0122 05:45:57.062174 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:57 crc kubenswrapper[4933]: I0122 05:45:57.062190 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:57 crc kubenswrapper[4933]: I0122 05:45:57.063249 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:45:57 crc kubenswrapper[4933]: I0122 05:45:57.430360 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 16:24:09.363581102 +0000 UTC Jan 22 05:45:57 crc kubenswrapper[4933]: I0122 05:45:57.545458 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"804b3614ee1b337e4921aef7fd70a0322531f83f0ca48fa80a2af954a862c050"} Jan 22 05:45:57 crc kubenswrapper[4933]: I0122 05:45:57.545484 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:57 crc kubenswrapper[4933]: I0122 05:45:57.545484 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:57 crc kubenswrapper[4933]: I0122 05:45:57.547011 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:57 crc kubenswrapper[4933]: I0122 05:45:57.547048 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:57 crc kubenswrapper[4933]: I0122 05:45:57.547059 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:57 crc kubenswrapper[4933]: I0122 05:45:57.547122 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:57 crc kubenswrapper[4933]: I0122 05:45:57.547198 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:57 crc kubenswrapper[4933]: I0122 05:45:57.547218 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:57 crc kubenswrapper[4933]: I0122 05:45:57.631385 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:45:57 crc kubenswrapper[4933]: I0122 05:45:57.642366 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:45:58 crc kubenswrapper[4933]: I0122 05:45:58.386343 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:45:58 crc kubenswrapper[4933]: I0122 05:45:58.386655 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:58 crc kubenswrapper[4933]: I0122 05:45:58.387684 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:58 crc kubenswrapper[4933]: I0122 05:45:58.387737 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:58 crc kubenswrapper[4933]: I0122 05:45:58.387753 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:58 crc kubenswrapper[4933]: I0122 05:45:58.393941 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:45:58 crc kubenswrapper[4933]: I0122 05:45:58.431039 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 09:27:17.503885917 +0000 UTC Jan 22 05:45:58 crc kubenswrapper[4933]: I0122 05:45:58.548044 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:58 crc kubenswrapper[4933]: I0122 05:45:58.548158 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:58 crc kubenswrapper[4933]: I0122 05:45:58.548223 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:58 crc kubenswrapper[4933]: I0122 05:45:58.549640 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:58 crc kubenswrapper[4933]: I0122 05:45:58.549678 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:58 crc kubenswrapper[4933]: I0122 05:45:58.549688 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:58 crc kubenswrapper[4933]: I0122 05:45:58.549724 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:58 crc kubenswrapper[4933]: I0122 05:45:58.549759 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:58 crc kubenswrapper[4933]: I0122 05:45:58.549775 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:58 crc kubenswrapper[4933]: I0122 05:45:58.550766 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:58 crc kubenswrapper[4933]: I0122 05:45:58.550823 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:58 crc kubenswrapper[4933]: I0122 05:45:58.550850 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:59 crc kubenswrapper[4933]: I0122 05:45:59.431799 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 12:21:08.459785894 +0000 UTC Jan 22 05:45:59 crc kubenswrapper[4933]: I0122 05:45:59.550240 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:45:59 crc kubenswrapper[4933]: I0122 05:45:59.551764 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:45:59 crc kubenswrapper[4933]: I0122 05:45:59.551839 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:45:59 crc kubenswrapper[4933]: I0122 05:45:59.551863 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:45:59 crc kubenswrapper[4933]: I0122 05:45:59.895174 4933 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 22 05:45:59 crc kubenswrapper[4933]: I0122 05:45:59.895296 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 22 05:46:00 crc kubenswrapper[4933]: I0122 05:46:00.096500 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:46:00 crc kubenswrapper[4933]: I0122 05:46:00.432674 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 02:33:20.216423718 +0000 UTC Jan 22 05:46:00 crc kubenswrapper[4933]: I0122 05:46:00.553105 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:46:00 crc kubenswrapper[4933]: I0122 05:46:00.554030 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:00 crc kubenswrapper[4933]: I0122 05:46:00.554107 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:00 crc kubenswrapper[4933]: I0122 05:46:00.554121 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:01 crc kubenswrapper[4933]: I0122 05:46:01.433959 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 14:35:48.603820501 +0000 UTC Jan 22 05:46:02 crc kubenswrapper[4933]: I0122 05:46:02.101870 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 22 05:46:02 crc kubenswrapper[4933]: I0122 05:46:02.102284 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:46:02 crc kubenswrapper[4933]: I0122 05:46:02.104106 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:02 crc kubenswrapper[4933]: I0122 05:46:02.104161 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:02 crc kubenswrapper[4933]: I0122 05:46:02.104184 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:02 crc kubenswrapper[4933]: I0122 05:46:02.417525 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:46:02 crc kubenswrapper[4933]: I0122 05:46:02.417734 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:46:02 crc kubenswrapper[4933]: I0122 05:46:02.420872 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:02 crc kubenswrapper[4933]: I0122 05:46:02.421004 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:02 crc kubenswrapper[4933]: I0122 05:46:02.421119 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:02 crc kubenswrapper[4933]: I0122 05:46:02.434463 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 02:46:00.385977386 +0000 UTC Jan 22 05:46:02 crc kubenswrapper[4933]: E0122 05:46:02.544001 4933 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 22 05:46:03 crc kubenswrapper[4933]: I0122 05:46:03.434786 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 09:58:24.406834696 +0000 UTC Jan 22 05:46:04 crc kubenswrapper[4933]: E0122 05:46:04.053531 4933 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": net/http: TLS handshake timeout" node="crc" Jan 22 05:46:04 crc kubenswrapper[4933]: E0122 05:46:04.353263 4933 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 22 05:46:04 crc kubenswrapper[4933]: I0122 05:46:04.424359 4933 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Jan 22 05:46:04 crc kubenswrapper[4933]: I0122 05:46:04.434987 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 22:30:01.256197289 +0000 UTC Jan 22 05:46:05 crc kubenswrapper[4933]: E0122 05:46:05.434454 4933 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" interval="3.2s" Jan 22 05:46:05 crc kubenswrapper[4933]: I0122 05:46:05.435572 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 19:54:11.060437317 +0000 UTC Jan 22 05:46:05 crc kubenswrapper[4933]: I0122 05:46:05.653924 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:46:05 crc kubenswrapper[4933]: I0122 05:46:05.655545 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:05 crc kubenswrapper[4933]: I0122 05:46:05.655585 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:05 crc kubenswrapper[4933]: I0122 05:46:05.655603 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:05 crc kubenswrapper[4933]: I0122 05:46:05.655635 4933 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 05:46:05 crc kubenswrapper[4933]: I0122 05:46:05.673011 4933 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 22 05:46:05 crc kubenswrapper[4933]: I0122 05:46:05.673090 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 22 05:46:05 crc kubenswrapper[4933]: I0122 05:46:05.687635 4933 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]log ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]etcd ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/openshift.io-api-request-count-filter ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/openshift.io-startkubeinformers ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/generic-apiserver-start-informers ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/priority-and-fairness-config-consumer ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/priority-and-fairness-filter ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/start-apiextensions-informers ok Jan 22 05:46:05 crc kubenswrapper[4933]: [-]poststarthook/start-apiextensions-controllers failed: reason withheld Jan 22 05:46:05 crc kubenswrapper[4933]: [-]poststarthook/crd-informer-synced failed: reason withheld Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/start-system-namespaces-controller ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/start-cluster-authentication-info-controller ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/start-legacy-token-tracking-controller ok Jan 22 05:46:05 crc kubenswrapper[4933]: [-]poststarthook/start-service-ip-repair-controllers failed: reason withheld Jan 22 05:46:05 crc kubenswrapper[4933]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Jan 22 05:46:05 crc kubenswrapper[4933]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Jan 22 05:46:05 crc kubenswrapper[4933]: [-]poststarthook/priority-and-fairness-config-producer failed: reason withheld Jan 22 05:46:05 crc kubenswrapper[4933]: [-]poststarthook/bootstrap-controller failed: reason withheld Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/start-kube-aggregator-informers ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/apiservice-status-local-available-controller ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/apiservice-status-remote-available-controller ok Jan 22 05:46:05 crc kubenswrapper[4933]: [-]poststarthook/apiservice-registration-controller failed: reason withheld Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/apiservice-wait-for-first-sync ok Jan 22 05:46:05 crc kubenswrapper[4933]: [-]poststarthook/apiservice-discovery-controller failed: reason withheld Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/kube-apiserver-autoregistration ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]autoregister-completion ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/apiservice-openapi-controller ok Jan 22 05:46:05 crc kubenswrapper[4933]: [+]poststarthook/apiservice-openapiv3-controller ok Jan 22 05:46:05 crc kubenswrapper[4933]: livez check failed Jan 22 05:46:05 crc kubenswrapper[4933]: I0122 05:46:05.687701 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:46:06 crc kubenswrapper[4933]: I0122 05:46:06.436064 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 21:58:57.901270265 +0000 UTC Jan 22 05:46:07 crc kubenswrapper[4933]: I0122 05:46:07.068305 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:46:07 crc kubenswrapper[4933]: I0122 05:46:07.068474 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:46:07 crc kubenswrapper[4933]: I0122 05:46:07.069896 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:07 crc kubenswrapper[4933]: I0122 05:46:07.069953 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:07 crc kubenswrapper[4933]: I0122 05:46:07.069975 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:07 crc kubenswrapper[4933]: I0122 05:46:07.236935 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 22 05:46:07 crc kubenswrapper[4933]: I0122 05:46:07.237289 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:46:07 crc kubenswrapper[4933]: I0122 05:46:07.238997 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:07 crc kubenswrapper[4933]: I0122 05:46:07.239062 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:07 crc kubenswrapper[4933]: I0122 05:46:07.239118 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:07 crc kubenswrapper[4933]: I0122 05:46:07.271780 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 22 05:46:07 crc kubenswrapper[4933]: I0122 05:46:07.436507 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 06:07:25.558960858 +0000 UTC Jan 22 05:46:07 crc kubenswrapper[4933]: I0122 05:46:07.570935 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:46:07 crc kubenswrapper[4933]: I0122 05:46:07.572024 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:07 crc kubenswrapper[4933]: I0122 05:46:07.572056 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:07 crc kubenswrapper[4933]: I0122 05:46:07.572064 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:07 crc kubenswrapper[4933]: I0122 05:46:07.592584 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 22 05:46:08 crc kubenswrapper[4933]: I0122 05:46:08.390563 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:46:08 crc kubenswrapper[4933]: I0122 05:46:08.390908 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:46:08 crc kubenswrapper[4933]: I0122 05:46:08.392658 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:08 crc kubenswrapper[4933]: I0122 05:46:08.392754 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:08 crc kubenswrapper[4933]: I0122 05:46:08.392783 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:08 crc kubenswrapper[4933]: I0122 05:46:08.395194 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:46:08 crc kubenswrapper[4933]: I0122 05:46:08.437634 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 16:11:00.2385572 +0000 UTC Jan 22 05:46:08 crc kubenswrapper[4933]: I0122 05:46:08.459219 4933 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 22 05:46:08 crc kubenswrapper[4933]: I0122 05:46:08.477185 4933 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 22 05:46:08 crc kubenswrapper[4933]: I0122 05:46:08.572841 4933 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 05:46:08 crc kubenswrapper[4933]: I0122 05:46:08.572910 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:46:08 crc kubenswrapper[4933]: I0122 05:46:08.572956 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:46:08 crc kubenswrapper[4933]: I0122 05:46:08.574458 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:08 crc kubenswrapper[4933]: I0122 05:46:08.574517 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:08 crc kubenswrapper[4933]: I0122 05:46:08.574536 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:08 crc kubenswrapper[4933]: I0122 05:46:08.574744 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:08 crc kubenswrapper[4933]: I0122 05:46:08.574785 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:08 crc kubenswrapper[4933]: I0122 05:46:08.574796 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:09 crc kubenswrapper[4933]: I0122 05:46:09.438377 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 17:05:17.854411758 +0000 UTC Jan 22 05:46:09 crc kubenswrapper[4933]: I0122 05:46:09.896605 4933 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 22 05:46:09 crc kubenswrapper[4933]: I0122 05:46:09.896720 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 22 05:46:10 crc kubenswrapper[4933]: I0122 05:46:10.438917 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 22:14:44.208252142 +0000 UTC Jan 22 05:46:10 crc kubenswrapper[4933]: I0122 05:46:10.661405 4933 trace.go:236] Trace[1040930271]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Jan-2026 05:45:55.705) (total time: 14956ms): Jan 22 05:46:10 crc kubenswrapper[4933]: Trace[1040930271]: ---"Objects listed" error: 14955ms (05:46:10.661) Jan 22 05:46:10 crc kubenswrapper[4933]: Trace[1040930271]: [14.95600265s] [14.95600265s] END Jan 22 05:46:10 crc kubenswrapper[4933]: I0122 05:46:10.661456 4933 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 22 05:46:10 crc kubenswrapper[4933]: I0122 05:46:10.662780 4933 trace.go:236] Trace[1354592339]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Jan-2026 05:45:56.841) (total time: 13820ms): Jan 22 05:46:10 crc kubenswrapper[4933]: Trace[1354592339]: ---"Objects listed" error: 13820ms (05:46:10.662) Jan 22 05:46:10 crc kubenswrapper[4933]: Trace[1354592339]: [13.82084716s] [13.82084716s] END Jan 22 05:46:10 crc kubenswrapper[4933]: I0122 05:46:10.662829 4933 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 22 05:46:10 crc kubenswrapper[4933]: I0122 05:46:10.665539 4933 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 22 05:46:10 crc kubenswrapper[4933]: I0122 05:46:10.666559 4933 trace.go:236] Trace[1991426121]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Jan-2026 05:45:56.127) (total time: 14539ms): Jan 22 05:46:10 crc kubenswrapper[4933]: Trace[1991426121]: ---"Objects listed" error: 14538ms (05:46:10.665) Jan 22 05:46:10 crc kubenswrapper[4933]: Trace[1991426121]: [14.539256012s] [14.539256012s] END Jan 22 05:46:10 crc kubenswrapper[4933]: I0122 05:46:10.666875 4933 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 22 05:46:10 crc kubenswrapper[4933]: E0122 05:46:10.668744 4933 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 22 05:46:10 crc kubenswrapper[4933]: I0122 05:46:10.672047 4933 trace.go:236] Trace[71943888]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Jan-2026 05:45:56.171) (total time: 14500ms): Jan 22 05:46:10 crc kubenswrapper[4933]: Trace[71943888]: ---"Objects listed" error: 14500ms (05:46:10.671) Jan 22 05:46:10 crc kubenswrapper[4933]: Trace[71943888]: [14.500736538s] [14.500736538s] END Jan 22 05:46:10 crc kubenswrapper[4933]: I0122 05:46:10.672117 4933 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 22 05:46:10 crc kubenswrapper[4933]: I0122 05:46:10.715802 4933 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:45104->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 22 05:46:10 crc kubenswrapper[4933]: I0122 05:46:10.715855 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:45104->192.168.126.11:17697: read: connection reset by peer" Jan 22 05:46:10 crc kubenswrapper[4933]: I0122 05:46:10.716147 4933 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 22 05:46:10 crc kubenswrapper[4933]: I0122 05:46:10.716169 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 22 05:46:10 crc kubenswrapper[4933]: I0122 05:46:10.850924 4933 csr.go:261] certificate signing request csr-d67xv is approved, waiting to be issued Jan 22 05:46:10 crc kubenswrapper[4933]: I0122 05:46:10.910377 4933 csr.go:257] certificate signing request csr-d67xv is issued Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.159419 4933 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.159483 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.427917 4933 apiserver.go:52] "Watching apiserver" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.430424 4933 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.430614 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb"] Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.430884 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.430998 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.431046 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.431101 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:11 crc kubenswrapper[4933]: E0122 05:46:11.431251 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.431388 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 05:46:11 crc kubenswrapper[4933]: E0122 05:46:11.431390 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.431911 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:11 crc kubenswrapper[4933]: E0122 05:46:11.431976 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.435063 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.435427 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.435638 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.435636 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.438565 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.438569 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.438611 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.438883 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.439012 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 15:12:29.415559997 +0000 UTC Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.440530 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.459179 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.479551 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.490437 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.499308 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.507439 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.520161 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.528610 4933 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.540269 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.570743 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.570809 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.570840 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.570868 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.570889 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.570912 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.570941 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.570968 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.570998 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571021 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571041 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571061 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571103 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571168 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571191 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571243 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571273 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571297 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571319 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571340 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571361 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571382 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571403 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571423 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571446 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571507 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571528 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571544 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571559 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571574 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571588 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571631 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571651 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571671 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571223 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571289 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571406 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571761 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571456 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571678 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571796 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571868 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571910 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572028 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572111 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.571697 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572144 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572194 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572211 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572234 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572237 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572268 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572277 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572287 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572308 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572348 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572415 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572429 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572450 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572674 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572696 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572728 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572735 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572839 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572905 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572925 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.572924 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.573046 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.573301 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.573309 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.573415 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.573486 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.573560 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.574203 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.574251 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.574290 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.574325 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.574363 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.574394 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575249 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575305 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575342 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575376 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575412 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575447 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575490 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575523 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575590 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575633 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575663 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.573586 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.573638 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.573827 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.573905 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.574136 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.574245 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.574256 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.574442 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575757 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575797 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575838 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575872 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575907 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575943 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575976 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.576009 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.574637 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.574797 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.574967 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575416 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575688 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577105 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575900 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.575944 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.576208 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.576400 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.576516 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.576602 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.576725 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.576997 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577012 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.576805 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577308 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577420 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577456 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577489 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577523 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577556 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577590 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577623 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577657 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577691 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577724 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577759 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577799 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577831 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577867 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577901 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577936 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.577972 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578008 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578042 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578080 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578149 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578192 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578243 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578288 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578325 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578363 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578405 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578437 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578473 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578508 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578551 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578585 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578625 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578669 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578710 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578744 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578779 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578810 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578842 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578874 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578906 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578941 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.578974 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579007 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579041 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579079 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579147 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579181 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579216 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579254 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579295 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579331 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579365 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579405 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579439 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579476 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579511 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579545 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579579 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579619 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579653 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579686 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579717 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579750 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579784 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579826 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579860 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579892 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579926 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579964 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.579998 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580030 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580186 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580229 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580265 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580301 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580339 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580374 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580410 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580457 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580496 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580535 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580569 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580600 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580634 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580670 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580704 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580739 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580771 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580806 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580841 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580874 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580908 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580942 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.580975 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581006 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581040 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581166 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581212 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581270 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581308 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581339 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581376 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581411 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581445 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581479 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581514 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581548 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581582 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581615 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581648 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581681 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581713 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581745 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581777 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581814 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581848 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581884 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581918 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581959 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581994 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582033 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582067 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582139 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582173 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582206 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582240 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582306 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582355 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582401 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582438 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582479 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582514 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582551 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582594 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582635 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582671 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582709 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582743 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582779 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582812 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582913 4933 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582940 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582961 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582980 4933 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583006 4933 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583023 4933 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583040 4933 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583058 4933 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583081 4933 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583123 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583141 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583158 4933 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583177 4933 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583197 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583217 4933 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583235 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583254 4933 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583305 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583319 4933 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583334 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583348 4933 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583361 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583374 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583387 4933 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583400 4933 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583414 4933 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583427 4933 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583442 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583457 4933 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583470 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583484 4933 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583497 4933 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583510 4933 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583524 4933 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583537 4933 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583551 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583565 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583579 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583592 4933 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583605 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583619 4933 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583632 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583645 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583657 4933 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583671 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583684 4933 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583698 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583711 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583725 4933 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583741 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583755 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583770 4933 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583788 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583806 4933 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583824 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.591033 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.591836 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.595298 4933 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581337 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.581492 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.582269 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583256 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583412 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583533 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.583886 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.584022 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.584029 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.584053 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.584157 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.584306 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.584497 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.584843 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.584903 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.584999 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.585178 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.585199 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.585396 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.585552 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.585651 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.585708 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.585840 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.585896 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.586318 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.586499 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.586668 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.586740 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.586923 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.587057 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.588502 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.588974 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.589218 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.589672 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.589838 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.590169 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.590371 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.590461 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.590522 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.590534 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.590741 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.590877 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.603146 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.590935 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.591135 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.591202 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.591264 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: E0122 05:46:11.591291 4933 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.603265 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.603480 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.604139 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.604200 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.604596 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.604732 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.604999 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.605181 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.606869 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.607359 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.607713 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.608161 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.608470 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.609821 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.610108 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.610196 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.610239 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.591591 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.592206 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.592695 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.592763 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.593024 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.593233 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.593241 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.593423 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.593603 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.610390 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.595598 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.596148 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.610417 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.596252 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.596331 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.596709 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.596897 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.596904 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.597079 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.597182 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.597481 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.597604 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.597654 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.597755 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.597837 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.610541 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.593354 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.598211 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.598264 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.598311 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.598499 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.598643 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.598917 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.599118 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: E0122 05:46:11.610599 4933 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.599572 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.599845 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.600010 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.600137 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.600405 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.600578 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: E0122 05:46:11.600752 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:46:12.100671211 +0000 UTC m=+19.937796664 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.600763 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.600977 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.601490 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.601587 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.601683 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.601830 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.601947 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.602031 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.602215 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.602282 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.602339 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.602339 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.602566 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.602722 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.602762 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.602786 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.602887 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.602951 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: E0122 05:46:11.610977 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:12.110935393 +0000 UTC m=+19.948060746 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:46:11 crc kubenswrapper[4933]: E0122 05:46:11.611129 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:12.111120387 +0000 UTC m=+19.948245740 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.611398 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.611523 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.611582 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.611639 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.612055 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.612491 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.612601 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.612774 4933 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28" exitCode=255 Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.612801 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.612942 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28"} Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.613145 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.613166 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: E0122 05:46:11.613199 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:46:11 crc kubenswrapper[4933]: E0122 05:46:11.613244 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:46:11 crc kubenswrapper[4933]: E0122 05:46:11.613260 4933 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:11 crc kubenswrapper[4933]: E0122 05:46:11.613355 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:12.113324175 +0000 UTC m=+19.950449728 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.613500 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.613504 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.613752 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.614944 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.616264 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.618307 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.620795 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 05:46:11 crc kubenswrapper[4933]: E0122 05:46:11.625548 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:46:11 crc kubenswrapper[4933]: E0122 05:46:11.625602 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:46:11 crc kubenswrapper[4933]: E0122 05:46:11.625632 4933 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:11 crc kubenswrapper[4933]: E0122 05:46:11.625749 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:12.125713494 +0000 UTC m=+19.962838847 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.626855 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.631227 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.632344 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.633906 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.636060 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.637446 4933 scope.go:117] "RemoveContainer" containerID="73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.637706 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.638145 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.650658 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.652029 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.656404 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.664554 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.682720 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686471 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686528 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686636 4933 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686652 4933 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686664 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686676 4933 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686687 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686699 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686710 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686721 4933 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686732 4933 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686743 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686754 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686774 4933 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686788 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686801 4933 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686812 4933 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686824 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686836 4933 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686847 4933 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686865 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686876 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686887 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686899 4933 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686910 4933 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686921 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686931 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686942 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686953 4933 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686964 4933 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686976 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686986 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.686996 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687007 4933 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687018 4933 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687029 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687041 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687030 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687054 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687171 4933 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687186 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687199 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687211 4933 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687222 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687223 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687235 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687247 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687257 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687269 4933 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687278 4933 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687288 4933 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687298 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687309 4933 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687328 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687356 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687372 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687390 4933 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687404 4933 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687422 4933 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687433 4933 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687442 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687453 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687462 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687473 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687483 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687494 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687506 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687516 4933 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687526 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687535 4933 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687545 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687554 4933 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687564 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687574 4933 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687583 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687593 4933 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687602 4933 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687612 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687621 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687630 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687640 4933 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687649 4933 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687659 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687671 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687682 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687693 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687706 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687715 4933 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687724 4933 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687735 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687745 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687755 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687768 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687779 4933 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687790 4933 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687800 4933 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687811 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687821 4933 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687831 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687853 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687866 4933 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687875 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687884 4933 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687893 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687903 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687912 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687921 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687930 4933 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687939 4933 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687948 4933 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687958 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687969 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687980 4933 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.687989 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688000 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688010 4933 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688020 4933 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688030 4933 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688040 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688049 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688058 4933 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688072 4933 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688098 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688108 4933 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688119 4933 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688130 4933 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688139 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688154 4933 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688164 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688175 4933 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688183 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688193 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688202 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688213 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688222 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688231 4933 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688240 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688250 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688259 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688268 4933 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688277 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688289 4933 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.688309 4933 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.695359 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.704429 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.719769 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-zfnsx"] Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.720392 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.721266 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-q8l78"] Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.721822 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-nvpgt"] Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.721940 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-jr6rw"] Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.722138 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.722378 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.722408 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-nvpgt" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.722924 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.723125 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.723358 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.725045 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.725747 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.726187 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 22 05:46:11 crc kubenswrapper[4933]: W0122 05:46:11.726818 4933 reflector.go:561] object-"openshift-multus"/"multus-daemon-config": failed to list *v1.ConfigMap: configmaps "multus-daemon-config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-multus": no relationship found between node 'crc' and this object Jan 22 05:46:11 crc kubenswrapper[4933]: E0122 05:46:11.726849 4933 reflector.go:158] "Unhandled Error" err="object-\"openshift-multus\"/\"multus-daemon-config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"multus-daemon-config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-multus\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.726980 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.727169 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 22 05:46:11 crc kubenswrapper[4933]: W0122 05:46:11.727319 4933 reflector.go:561] object-"openshift-multus"/"default-dockercfg-2q5b6": failed to list *v1.Secret: secrets "default-dockercfg-2q5b6" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-multus": no relationship found between node 'crc' and this object Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.727360 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.727471 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 22 05:46:11 crc kubenswrapper[4933]: E0122 05:46:11.727347 4933 reflector.go:158] "Unhandled Error" err="object-\"openshift-multus\"/\"default-dockercfg-2q5b6\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"default-dockercfg-2q5b6\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-multus\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.727488 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.727667 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 22 05:46:11 crc kubenswrapper[4933]: W0122 05:46:11.728467 4933 reflector.go:561] object-"openshift-dns"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-dns": no relationship found between node 'crc' and this object Jan 22 05:46:11 crc kubenswrapper[4933]: E0122 05:46:11.728518 4933 reflector.go:158] "Unhandled Error" err="object-\"openshift-dns\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-dns\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.735889 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.747526 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.757209 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.764523 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.766055 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 05:46:11 crc kubenswrapper[4933]: W0122 05:46:11.777991 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-1fc76e1b57914e2fb04e7c51bcf608859b1b0977eeafb88bc20d59c6a330a443 WatchSource:0}: Error finding container 1fc76e1b57914e2fb04e7c51bcf608859b1b0977eeafb88bc20d59c6a330a443: Status 404 returned error can't find the container with id 1fc76e1b57914e2fb04e7c51bcf608859b1b0977eeafb88bc20d59c6a330a443 Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789444 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/70f2db1d-40cb-4864-917b-3b99f69cdafb-proxy-tls\") pod \"machine-config-daemon-zfnsx\" (UID: \"70f2db1d-40cb-4864-917b-3b99f69cdafb\") " pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789491 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-hostroot\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789515 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/83dfdde7-cd49-49e0-85a0-0165d464b2c7-cni-binary-copy\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789541 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-multus-socket-dir-parent\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789564 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-host-var-lib-cni-bin\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789587 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-etc-kubernetes\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789609 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/70f2db1d-40cb-4864-917b-3b99f69cdafb-mcd-auth-proxy-config\") pod \"machine-config-daemon-zfnsx\" (UID: \"70f2db1d-40cb-4864-917b-3b99f69cdafb\") " pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789629 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/126473c5-96ff-4ca9-83c0-7548d7e219c0-hosts-file\") pod \"node-resolver-nvpgt\" (UID: \"126473c5-96ff-4ca9-83c0-7548d7e219c0\") " pod="openshift-dns/node-resolver-nvpgt" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789652 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/83dfdde7-cd49-49e0-85a0-0165d464b2c7-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789673 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/83dfdde7-cd49-49e0-85a0-0165d464b2c7-cnibin\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789694 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-system-cni-dir\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789731 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/83dfdde7-cd49-49e0-85a0-0165d464b2c7-os-release\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789753 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-multus-cni-dir\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789794 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-host-run-netns\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789827 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4gxn\" (UniqueName: \"kubernetes.io/projected/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-kube-api-access-v4gxn\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789848 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcfhz\" (UniqueName: \"kubernetes.io/projected/70f2db1d-40cb-4864-917b-3b99f69cdafb-kube-api-access-dcfhz\") pod \"machine-config-daemon-zfnsx\" (UID: \"70f2db1d-40cb-4864-917b-3b99f69cdafb\") " pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789888 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/83dfdde7-cd49-49e0-85a0-0165d464b2c7-system-cni-dir\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789915 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/83dfdde7-cd49-49e0-85a0-0165d464b2c7-tuning-conf-dir\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789938 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-cnibin\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789964 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-os-release\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.789990 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-cni-binary-copy\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.790014 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-host-var-lib-cni-multus\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.790034 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-host-var-lib-kubelet\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.790057 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-multus-conf-dir\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.790080 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/70f2db1d-40cb-4864-917b-3b99f69cdafb-rootfs\") pod \"machine-config-daemon-zfnsx\" (UID: \"70f2db1d-40cb-4864-917b-3b99f69cdafb\") " pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.790128 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqn9c\" (UniqueName: \"kubernetes.io/projected/83dfdde7-cd49-49e0-85a0-0165d464b2c7-kube-api-access-kqn9c\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.790153 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6xgn\" (UniqueName: \"kubernetes.io/projected/126473c5-96ff-4ca9-83c0-7548d7e219c0-kube-api-access-w6xgn\") pod \"node-resolver-nvpgt\" (UID: \"126473c5-96ff-4ca9-83c0-7548d7e219c0\") " pod="openshift-dns/node-resolver-nvpgt" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.790213 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-multus-daemon-config\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.790435 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-host-run-multus-certs\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.790468 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-host-run-k8s-cni-cncf-io\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.790486 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.802611 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.814589 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.827373 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.842839 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.854628 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.875587 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891113 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-host-var-lib-cni-bin\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891156 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-etc-kubernetes\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891180 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/70f2db1d-40cb-4864-917b-3b99f69cdafb-mcd-auth-proxy-config\") pod \"machine-config-daemon-zfnsx\" (UID: \"70f2db1d-40cb-4864-917b-3b99f69cdafb\") " pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891205 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/126473c5-96ff-4ca9-83c0-7548d7e219c0-hosts-file\") pod \"node-resolver-nvpgt\" (UID: \"126473c5-96ff-4ca9-83c0-7548d7e219c0\") " pod="openshift-dns/node-resolver-nvpgt" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891257 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/83dfdde7-cd49-49e0-85a0-0165d464b2c7-cnibin\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891257 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-host-var-lib-cni-bin\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891280 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/83dfdde7-cd49-49e0-85a0-0165d464b2c7-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891303 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-system-cni-dir\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891326 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-host-run-netns\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891319 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-etc-kubernetes\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891363 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/126473c5-96ff-4ca9-83c0-7548d7e219c0-hosts-file\") pod \"node-resolver-nvpgt\" (UID: \"126473c5-96ff-4ca9-83c0-7548d7e219c0\") " pod="openshift-dns/node-resolver-nvpgt" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891349 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4gxn\" (UniqueName: \"kubernetes.io/projected/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-kube-api-access-v4gxn\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891523 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-host-run-netns\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891605 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcfhz\" (UniqueName: \"kubernetes.io/projected/70f2db1d-40cb-4864-917b-3b99f69cdafb-kube-api-access-dcfhz\") pod \"machine-config-daemon-zfnsx\" (UID: \"70f2db1d-40cb-4864-917b-3b99f69cdafb\") " pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891673 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/83dfdde7-cd49-49e0-85a0-0165d464b2c7-os-release\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891694 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-multus-cni-dir\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891719 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/83dfdde7-cd49-49e0-85a0-0165d464b2c7-system-cni-dir\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891741 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/83dfdde7-cd49-49e0-85a0-0165d464b2c7-tuning-conf-dir\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891760 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-cnibin\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891783 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-host-var-lib-kubelet\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891806 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-multus-conf-dir\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891824 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/70f2db1d-40cb-4864-917b-3b99f69cdafb-rootfs\") pod \"machine-config-daemon-zfnsx\" (UID: \"70f2db1d-40cb-4864-917b-3b99f69cdafb\") " pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891862 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqn9c\" (UniqueName: \"kubernetes.io/projected/83dfdde7-cd49-49e0-85a0-0165d464b2c7-kube-api-access-kqn9c\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891887 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-os-release\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891906 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-cni-binary-copy\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891921 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-host-var-lib-cni-multus\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891940 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6xgn\" (UniqueName: \"kubernetes.io/projected/126473c5-96ff-4ca9-83c0-7548d7e219c0-kube-api-access-w6xgn\") pod \"node-resolver-nvpgt\" (UID: \"126473c5-96ff-4ca9-83c0-7548d7e219c0\") " pod="openshift-dns/node-resolver-nvpgt" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891956 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-multus-daemon-config\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891972 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-host-run-multus-certs\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.891980 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/70f2db1d-40cb-4864-917b-3b99f69cdafb-mcd-auth-proxy-config\") pod \"machine-config-daemon-zfnsx\" (UID: \"70f2db1d-40cb-4864-917b-3b99f69cdafb\") " pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892002 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-host-run-k8s-cni-cncf-io\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892020 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-host-var-lib-kubelet\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892039 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/70f2db1d-40cb-4864-917b-3b99f69cdafb-proxy-tls\") pod \"machine-config-daemon-zfnsx\" (UID: \"70f2db1d-40cb-4864-917b-3b99f69cdafb\") " pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892071 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-hostroot\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892118 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/83dfdde7-cd49-49e0-85a0-0165d464b2c7-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892125 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/83dfdde7-cd49-49e0-85a0-0165d464b2c7-cni-binary-copy\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892182 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-multus-socket-dir-parent\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892270 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892332 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-multus-socket-dir-parent\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892377 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-multus-cni-dir\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892362 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/70f2db1d-40cb-4864-917b-3b99f69cdafb-rootfs\") pod \"machine-config-daemon-zfnsx\" (UID: \"70f2db1d-40cb-4864-917b-3b99f69cdafb\") " pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892406 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/83dfdde7-cd49-49e0-85a0-0165d464b2c7-cnibin\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892417 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/83dfdde7-cd49-49e0-85a0-0165d464b2c7-system-cni-dir\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892479 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-system-cni-dir\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892450 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-multus-conf-dir\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892533 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-host-var-lib-cni-multus\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892567 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-host-run-multus-certs\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892601 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/83dfdde7-cd49-49e0-85a0-0165d464b2c7-os-release\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892631 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-cnibin\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892683 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/83dfdde7-cd49-49e0-85a0-0165d464b2c7-tuning-conf-dir\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892719 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-host-run-k8s-cni-cncf-io\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892747 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-os-release\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892761 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-hostroot\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.892862 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/83dfdde7-cd49-49e0-85a0-0165d464b2c7-cni-binary-copy\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.893431 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-cni-binary-copy\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.897339 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/70f2db1d-40cb-4864-917b-3b99f69cdafb-proxy-tls\") pod \"machine-config-daemon-zfnsx\" (UID: \"70f2db1d-40cb-4864-917b-3b99f69cdafb\") " pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.912321 4933 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-22 05:41:10 +0000 UTC, rotation deadline is 2026-10-31 18:19:28.611583746 +0000 UTC Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.912384 4933 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 6780h33m16.699202333s for next certificate rotation Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.912531 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.915215 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4gxn\" (UniqueName: \"kubernetes.io/projected/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-kube-api-access-v4gxn\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.915302 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqn9c\" (UniqueName: \"kubernetes.io/projected/83dfdde7-cd49-49e0-85a0-0165d464b2c7-kube-api-access-kqn9c\") pod \"multus-additional-cni-plugins-q8l78\" (UID: \"83dfdde7-cd49-49e0-85a0-0165d464b2c7\") " pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.917403 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcfhz\" (UniqueName: \"kubernetes.io/projected/70f2db1d-40cb-4864-917b-3b99f69cdafb-kube-api-access-dcfhz\") pod \"machine-config-daemon-zfnsx\" (UID: \"70f2db1d-40cb-4864-917b-3b99f69cdafb\") " pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.933431 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.957305 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:11 crc kubenswrapper[4933]: I0122 05:46:11.987117 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.001900 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.017527 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.029393 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.038812 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.042231 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.055715 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.058023 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod70f2db1d_40cb_4864_917b_3b99f69cdafb.slice/crio-f8e6c4be9e8d59749385216040abab12d72aea48e0d52e365c7725a6da9d0944 WatchSource:0}: Error finding container f8e6c4be9e8d59749385216040abab12d72aea48e0d52e365c7725a6da9d0944: Status 404 returned error can't find the container with id f8e6c4be9e8d59749385216040abab12d72aea48e0d52e365c7725a6da9d0944 Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.060770 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-q8l78" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.085278 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-z88sj"] Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.088443 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.091495 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.091517 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.093510 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.093515 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.093632 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.094400 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.097292 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.103729 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.125393 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.146207 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.156431 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.167552 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.189282 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195074 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195192 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-run-systemd\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: E0122 05:46:12.195282 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:46:13.1952425 +0000 UTC m=+21.032367853 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195333 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-run-ovn\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195379 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-var-lib-openvswitch\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195400 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6a721333-1932-4bb0-b384-c034492e59c4-ovnkube-config\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195418 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7lwp\" (UniqueName: \"kubernetes.io/projected/6a721333-1932-4bb0-b384-c034492e59c4-kube-api-access-s7lwp\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195435 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-node-log\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195451 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-cni-netd\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195469 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-run-netns\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195508 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-cni-bin\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195539 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-run-openvswitch\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195569 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195594 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195616 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-etc-openvswitch\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195637 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6a721333-1932-4bb0-b384-c034492e59c4-ovn-node-metrics-cert\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195662 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195687 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6a721333-1932-4bb0-b384-c034492e59c4-ovnkube-script-lib\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195709 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-log-socket\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: E0122 05:46:12.195712 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195725 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-run-ovn-kubernetes\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: E0122 05:46:12.195733 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195744 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-kubelet\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195763 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-systemd-units\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195781 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195801 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6a721333-1932-4bb0-b384-c034492e59c4-env-overrides\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195823 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.195844 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-slash\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: E0122 05:46:12.195749 4933 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:12 crc kubenswrapper[4933]: E0122 05:46:12.195806 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:46:12 crc kubenswrapper[4933]: E0122 05:46:12.195880 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:46:12 crc kubenswrapper[4933]: E0122 05:46:12.195889 4933 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:12 crc kubenswrapper[4933]: E0122 05:46:12.195841 4933 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:46:12 crc kubenswrapper[4933]: E0122 05:46:12.195927 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:13.195911665 +0000 UTC m=+21.033037018 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:12 crc kubenswrapper[4933]: E0122 05:46:12.195954 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:13.195937046 +0000 UTC m=+21.033062589 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:46:12 crc kubenswrapper[4933]: E0122 05:46:12.195974 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:13.195966017 +0000 UTC m=+21.033091520 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:12 crc kubenswrapper[4933]: E0122 05:46:12.196027 4933 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:46:12 crc kubenswrapper[4933]: E0122 05:46:12.196055 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:13.196047618 +0000 UTC m=+21.033172971 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.204675 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.219494 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.242994 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.257577 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.272032 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.287931 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.296600 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-slash\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.296717 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-slash\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.296866 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-run-ovn\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.296926 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-run-systemd\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.296948 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-var-lib-openvswitch\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.296966 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6a721333-1932-4bb0-b384-c034492e59c4-ovnkube-config\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297023 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-node-log\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297045 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-cni-netd\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297063 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-run-systemd\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297123 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-node-log\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297127 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-var-lib-openvswitch\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297176 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-cni-netd\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297067 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7lwp\" (UniqueName: \"kubernetes.io/projected/6a721333-1932-4bb0-b384-c034492e59c4-kube-api-access-s7lwp\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297320 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-run-netns\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297349 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-cni-bin\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297377 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-run-openvswitch\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297434 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-etc-openvswitch\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297432 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-run-netns\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297465 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-cni-bin\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297464 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6a721333-1932-4bb0-b384-c034492e59c4-ovn-node-metrics-cert\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297513 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-run-openvswitch\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297580 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-etc-openvswitch\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297630 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-log-socket\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297649 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-log-socket\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297660 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-run-ovn-kubernetes\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297687 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6a721333-1932-4bb0-b384-c034492e59c4-ovnkube-script-lib\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297744 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-kubelet\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297766 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-run-ovn-kubernetes\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297772 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-systemd-units\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297807 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-systemd-units\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297826 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297852 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6a721333-1932-4bb0-b384-c034492e59c4-env-overrides\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297866 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-kubelet\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.297952 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.298352 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6a721333-1932-4bb0-b384-c034492e59c4-ovnkube-script-lib\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.298379 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6a721333-1932-4bb0-b384-c034492e59c4-ovnkube-config\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.298532 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6a721333-1932-4bb0-b384-c034492e59c4-env-overrides\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.298592 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-run-ovn\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.303530 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6a721333-1932-4bb0-b384-c034492e59c4-ovn-node-metrics-cert\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.315878 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7lwp\" (UniqueName: \"kubernetes.io/projected/6a721333-1932-4bb0-b384-c034492e59c4-kube-api-access-s7lwp\") pod \"ovnkube-node-z88sj\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.332122 4933 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332355 4933 reflector.go:484] object-"openshift-network-node-identity"/"env-overrides": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-node-identity"/"env-overrides": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332383 4933 reflector.go:484] object-"openshift-multus"/"cni-copy-resources": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-multus"/"cni-copy-resources": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332394 4933 reflector.go:484] object-"openshift-machine-config-operator"/"kube-rbac-proxy": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-machine-config-operator"/"kube-rbac-proxy": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332418 4933 reflector.go:484] object-"openshift-network-node-identity"/"ovnkube-identity-cm": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-node-identity"/"ovnkube-identity-cm": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332421 4933 reflector.go:484] object-"openshift-ovn-kubernetes"/"env-overrides": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-ovn-kubernetes"/"env-overrides": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332445 4933 reflector.go:484] object-"openshift-machine-config-operator"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-machine-config-operator"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332420 4933 reflector.go:484] object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq": watch of *v1.Secret ended with: very short watch: object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332468 4933 reflector.go:484] object-"openshift-network-node-identity"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-node-identity"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332436 4933 reflector.go:484] object-"openshift-dns"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-dns"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332462 4933 reflector.go:484] object-"openshift-multus"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-multus"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332482 4933 reflector.go:484] object-"openshift-machine-config-operator"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-machine-config-operator"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332520 4933 reflector.go:484] object-"openshift-network-node-identity"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-node-identity"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332545 4933 reflector.go:484] object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz": watch of *v1.Secret ended with: very short watch: object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332569 4933 reflector.go:484] object-"openshift-ovn-kubernetes"/"ovnkube-config": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-ovn-kubernetes"/"ovnkube-config": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332593 4933 reflector.go:484] object-"openshift-network-node-identity"/"network-node-identity-cert": watch of *v1.Secret ended with: very short watch: object-"openshift-network-node-identity"/"network-node-identity-cert": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332598 4933 reflector.go:484] object-"openshift-network-operator"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-operator"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332421 4933 reflector.go:484] object-"openshift-machine-config-operator"/"proxy-tls": watch of *v1.Secret ended with: very short watch: object-"openshift-machine-config-operator"/"proxy-tls": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332613 4933 reflector.go:484] object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl": watch of *v1.Secret ended with: very short watch: object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332613 4933 reflector.go:484] object-"openshift-network-operator"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-operator"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332597 4933 reflector.go:484] object-"openshift-ovn-kubernetes"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-ovn-kubernetes"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332640 4933 reflector.go:484] object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert": watch of *v1.Secret ended with: very short watch: object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332658 4933 reflector.go:484] object-"openshift-dns"/"node-resolver-dockercfg-kz9s7": watch of *v1.Secret ended with: very short watch: object-"openshift-dns"/"node-resolver-dockercfg-kz9s7": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332670 4933 reflector.go:484] object-"openshift-network-operator"/"iptables-alerter-script": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-operator"/"iptables-alerter-script": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332697 4933 reflector.go:484] object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332726 4933 reflector.go:484] object-"openshift-network-operator"/"metrics-tls": watch of *v1.Secret ended with: very short watch: object-"openshift-network-operator"/"metrics-tls": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332356 4933 reflector.go:484] object-"openshift-multus"/"default-cni-sysctl-allowlist": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-multus"/"default-cni-sysctl-allowlist": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332847 4933 reflector.go:484] object-"openshift-multus"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-multus"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.332455 4933 reflector.go:484] object-"openshift-ovn-kubernetes"/"ovnkube-script-lib": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-ovn-kubernetes"/"ovnkube-script-lib": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.441025 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 02:19:33.282318231 +0000 UTC Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.444742 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:12 crc kubenswrapper[4933]: W0122 05:46:12.478462 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a721333_1932_4bb0_b384_c034492e59c4.slice/crio-36713c23b47eada3b139ae8f72ead2f7d6fb6f847054c31344d8b18e868400f9 WatchSource:0}: Error finding container 36713c23b47eada3b139ae8f72ead2f7d6fb6f847054c31344d8b18e868400f9: Status 404 returned error can't find the container with id 36713c23b47eada3b139ae8f72ead2f7d6fb6f847054c31344d8b18e868400f9 Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.492844 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.493451 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.494521 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.495116 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.496927 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.497830 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.498472 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.502731 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.503531 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.504561 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.505095 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.507426 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.507922 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.508437 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.509854 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.510444 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.511748 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.512713 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.513896 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.514542 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.515501 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.516229 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.516721 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.518154 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.518704 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.519380 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.519862 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.520611 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.521772 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.522563 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.523616 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.524332 4933 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.524467 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.526692 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.527902 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.528426 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.530636 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.531605 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.532788 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.533737 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.536226 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.536682 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.537385 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.540732 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.541455 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.541724 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.542709 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.543323 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.544020 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.544767 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.546804 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.547396 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.550672 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.551757 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.552494 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.553824 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.561259 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.578276 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.593119 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.617542 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.619218 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6"} Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.619258 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"ffa3e2aae6ed4524ee3a742ef063a965359fdea1188322b53882b33cf26620bc"} Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.621911 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4"} Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.621954 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0"} Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.621965 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"f8e6c4be9e8d59749385216040abab12d72aea48e0d52e365c7725a6da9d0944"} Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.634931 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.640284 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.641307 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f"} Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.641481 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.643023 4933 generic.go:334] "Generic (PLEG): container finished" podID="6a721333-1932-4bb0-b384-c034492e59c4" containerID="e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d" exitCode=0 Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.643089 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerDied","Data":"e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d"} Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.643109 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerStarted","Data":"36713c23b47eada3b139ae8f72ead2f7d6fb6f847054c31344d8b18e868400f9"} Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.648406 4933 generic.go:334] "Generic (PLEG): container finished" podID="83dfdde7-cd49-49e0-85a0-0165d464b2c7" containerID="08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691" exitCode=0 Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.648480 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" event={"ID":"83dfdde7-cd49-49e0-85a0-0165d464b2c7","Type":"ContainerDied","Data":"08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691"} Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.648503 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" event={"ID":"83dfdde7-cd49-49e0-85a0-0165d464b2c7","Type":"ContainerStarted","Data":"b2edd1e08b5bea73eadb4c10cfcf4a9b78d11882e53eb48ac6001764132774bd"} Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.649758 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"c184a2cb2d58683d2f48cc3dd7c8ce4d399883d9e0649b79951b95b087cdbbcb"} Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.652053 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f"} Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.652104 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60"} Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.652115 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"1fc76e1b57914e2fb04e7c51bcf608859b1b0977eeafb88bc20d59c6a330a443"} Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.662706 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.684652 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.694397 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.717192 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.731959 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.758897 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.791061 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.817383 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.837261 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.848495 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.861000 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.874263 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.886630 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: E0122 05:46:12.893871 4933 configmap.go:193] Couldn't get configMap openshift-multus/multus-daemon-config: failed to sync configmap cache: timed out waiting for the condition Jan 22 05:46:12 crc kubenswrapper[4933]: E0122 05:46:12.894140 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-multus-daemon-config podName:f066dd84-0cd5-4e8c-8411-cf12cc83ea7d nodeName:}" failed. No retries permitted until 2026-01-22 05:46:13.394110299 +0000 UTC m=+21.231235652 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "multus-daemon-config" (UniqueName: "kubernetes.io/configmap/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-multus-daemon-config") pod "multus-jr6rw" (UID: "f066dd84-0cd5-4e8c-8411-cf12cc83ea7d") : failed to sync configmap cache: timed out waiting for the condition Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.901003 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: E0122 05:46:12.908236 4933 projected.go:288] Couldn't get configMap openshift-dns/openshift-service-ca.crt: failed to sync configmap cache: timed out waiting for the condition Jan 22 05:46:12 crc kubenswrapper[4933]: E0122 05:46:12.908292 4933 projected.go:194] Error preparing data for projected volume kube-api-access-w6xgn for pod openshift-dns/node-resolver-nvpgt: failed to sync configmap cache: timed out waiting for the condition Jan 22 05:46:12 crc kubenswrapper[4933]: E0122 05:46:12.908401 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/126473c5-96ff-4ca9-83c0-7548d7e219c0-kube-api-access-w6xgn podName:126473c5-96ff-4ca9-83c0-7548d7e219c0 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:13.408369899 +0000 UTC m=+21.245495252 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-w6xgn" (UniqueName: "kubernetes.io/projected/126473c5-96ff-4ca9-83c0-7548d7e219c0-kube-api-access-w6xgn") pod "node-resolver-nvpgt" (UID: "126473c5-96ff-4ca9-83c0-7548d7e219c0") : failed to sync configmap cache: timed out waiting for the condition Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.967873 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.989778 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 22 05:46:12 crc kubenswrapper[4933]: I0122 05:46:12.998542 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.039271 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:13Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.076437 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.207518 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.207660 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:13 crc kubenswrapper[4933]: E0122 05:46:13.207739 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:46:15.207708703 +0000 UTC m=+23.044834066 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:46:13 crc kubenswrapper[4933]: E0122 05:46:13.207748 4933 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.207775 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.207803 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:13 crc kubenswrapper[4933]: E0122 05:46:13.207810 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:15.207799186 +0000 UTC m=+23.044924669 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:46:13 crc kubenswrapper[4933]: E0122 05:46:13.207872 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:46:13 crc kubenswrapper[4933]: E0122 05:46:13.207886 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.207885 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:13 crc kubenswrapper[4933]: E0122 05:46:13.207897 4933 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:13 crc kubenswrapper[4933]: E0122 05:46:13.207940 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:15.207927488 +0000 UTC m=+23.045052841 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:13 crc kubenswrapper[4933]: E0122 05:46:13.207979 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:46:13 crc kubenswrapper[4933]: E0122 05:46:13.207989 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:46:13 crc kubenswrapper[4933]: E0122 05:46:13.207995 4933 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:13 crc kubenswrapper[4933]: E0122 05:46:13.208014 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:15.20800826 +0000 UTC m=+23.045133613 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:13 crc kubenswrapper[4933]: E0122 05:46:13.208054 4933 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:46:13 crc kubenswrapper[4933]: E0122 05:46:13.208107 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:15.208097542 +0000 UTC m=+23.045222995 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.211808 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.230811 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.255293 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.280361 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.293835 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.329586 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.329846 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.379710 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.395687 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.408349 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.414230 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.426900 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6xgn\" (UniqueName: \"kubernetes.io/projected/126473c5-96ff-4ca9-83c0-7548d7e219c0-kube-api-access-w6xgn\") pod \"node-resolver-nvpgt\" (UID: \"126473c5-96ff-4ca9-83c0-7548d7e219c0\") " pod="openshift-dns/node-resolver-nvpgt" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.426935 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-multus-daemon-config\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.427538 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/f066dd84-0cd5-4e8c-8411-cf12cc83ea7d-multus-daemon-config\") pod \"multus-jr6rw\" (UID: \"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\") " pod="openshift-multus/multus-jr6rw" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.432589 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6xgn\" (UniqueName: \"kubernetes.io/projected/126473c5-96ff-4ca9-83c0-7548d7e219c0-kube-api-access-w6xgn\") pod \"node-resolver-nvpgt\" (UID: \"126473c5-96ff-4ca9-83c0-7548d7e219c0\") " pod="openshift-dns/node-resolver-nvpgt" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.441212 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 18:20:27.220898119 +0000 UTC Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.454285 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.489929 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.489929 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:13 crc kubenswrapper[4933]: E0122 05:46:13.490075 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:13 crc kubenswrapper[4933]: E0122 05:46:13.490167 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.490377 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:13 crc kubenswrapper[4933]: E0122 05:46:13.490549 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.526969 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.545931 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-jr6rw" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.556222 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-nvpgt" Jan 22 05:46:13 crc kubenswrapper[4933]: W0122 05:46:13.564308 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf066dd84_0cd5_4e8c_8411_cf12cc83ea7d.slice/crio-edbc713af90031912c4625687ec0888edcb97249e36274a7ba3e112f23bae3f2 WatchSource:0}: Error finding container edbc713af90031912c4625687ec0888edcb97249e36274a7ba3e112f23bae3f2: Status 404 returned error can't find the container with id edbc713af90031912c4625687ec0888edcb97249e36274a7ba3e112f23bae3f2 Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.571008 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 22 05:46:13 crc kubenswrapper[4933]: W0122 05:46:13.571440 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod126473c5_96ff_4ca9_83c0_7548d7e219c0.slice/crio-fa1dbcf615468191437f764ee2dfa39d7216c52d2b7682f9bb0bbbd1fcb51263 WatchSource:0}: Error finding container fa1dbcf615468191437f764ee2dfa39d7216c52d2b7682f9bb0bbbd1fcb51263: Status 404 returned error can't find the container with id fa1dbcf615468191437f764ee2dfa39d7216c52d2b7682f9bb0bbbd1fcb51263 Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.622489 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.626964 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.633634 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.635568 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.637780 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.659327 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerStarted","Data":"39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac"} Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.659375 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerStarted","Data":"a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68"} Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.659391 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerStarted","Data":"c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741"} Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.659416 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerStarted","Data":"a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1"} Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.661550 4933 generic.go:334] "Generic (PLEG): container finished" podID="83dfdde7-cd49-49e0-85a0-0165d464b2c7" containerID="eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02" exitCode=0 Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.661603 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" event={"ID":"83dfdde7-cd49-49e0-85a0-0165d464b2c7","Type":"ContainerDied","Data":"eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02"} Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.662613 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jr6rw" event={"ID":"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d","Type":"ContainerStarted","Data":"edbc713af90031912c4625687ec0888edcb97249e36274a7ba3e112f23bae3f2"} Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.663807 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-nvpgt" event={"ID":"126473c5-96ff-4ca9-83c0-7548d7e219c0","Type":"ContainerStarted","Data":"fa1dbcf615468191437f764ee2dfa39d7216c52d2b7682f9bb0bbbd1fcb51263"} Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.675012 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:13Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.686167 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:13Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.690576 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.698190 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:13Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.710766 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:13Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.731641 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:13Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.738421 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.747318 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:13Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.765994 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:13Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.797267 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:13Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.810007 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.829726 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.868810 4933 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.870339 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.870580 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.870609 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.870617 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.870724 4933 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.894904 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.910720 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.946473 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:13Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.949464 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.970849 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 22 05:46:13 crc kubenswrapper[4933]: I0122 05:46:13.990697 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.001504 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-7r526"] Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.002135 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-7r526" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.050569 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.070746 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.090671 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.110765 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.131943 4933 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.132462 4933 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.134186 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.134224 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.134234 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.134250 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.134261 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:14Z","lastTransitionTime":"2026-01-22T05:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.134775 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a2b23423-6793-44cd-b47e-dc4d25bbe3ae-host\") pod \"node-ca-7r526\" (UID: \"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\") " pod="openshift-image-registry/node-ca-7r526" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.134826 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/a2b23423-6793-44cd-b47e-dc4d25bbe3ae-serviceca\") pod \"node-ca-7r526\" (UID: \"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\") " pod="openshift-image-registry/node-ca-7r526" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.134861 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfvwc\" (UniqueName: \"kubernetes.io/projected/a2b23423-6793-44cd-b47e-dc4d25bbe3ae-kube-api-access-hfvwc\") pod \"node-ca-7r526\" (UID: \"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\") " pod="openshift-image-registry/node-ca-7r526" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.160488 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: E0122 05:46:14.182425 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.187037 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.187120 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.187130 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.187150 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.187162 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:14Z","lastTransitionTime":"2026-01-22T05:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.207575 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: E0122 05:46:14.207916 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.213727 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.213803 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.213817 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.213835 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.213846 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:14Z","lastTransitionTime":"2026-01-22T05:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.236561 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfvwc\" (UniqueName: \"kubernetes.io/projected/a2b23423-6793-44cd-b47e-dc4d25bbe3ae-kube-api-access-hfvwc\") pod \"node-ca-7r526\" (UID: \"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\") " pod="openshift-image-registry/node-ca-7r526" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.237386 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a2b23423-6793-44cd-b47e-dc4d25bbe3ae-host\") pod \"node-ca-7r526\" (UID: \"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\") " pod="openshift-image-registry/node-ca-7r526" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.237581 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/a2b23423-6793-44cd-b47e-dc4d25bbe3ae-serviceca\") pod \"node-ca-7r526\" (UID: \"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\") " pod="openshift-image-registry/node-ca-7r526" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.237529 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a2b23423-6793-44cd-b47e-dc4d25bbe3ae-host\") pod \"node-ca-7r526\" (UID: \"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\") " pod="openshift-image-registry/node-ca-7r526" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.239320 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/a2b23423-6793-44cd-b47e-dc4d25bbe3ae-serviceca\") pod \"node-ca-7r526\" (UID: \"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\") " pod="openshift-image-registry/node-ca-7r526" Jan 22 05:46:14 crc kubenswrapper[4933]: E0122 05:46:14.243552 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.247871 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.249217 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.249387 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.249494 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.249625 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.249745 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:14Z","lastTransitionTime":"2026-01-22T05:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:14 crc kubenswrapper[4933]: E0122 05:46:14.270021 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.274825 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.274975 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.275061 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.275165 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.275245 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:14Z","lastTransitionTime":"2026-01-22T05:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.277672 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfvwc\" (UniqueName: \"kubernetes.io/projected/a2b23423-6793-44cd-b47e-dc4d25bbe3ae-kube-api-access-hfvwc\") pod \"node-ca-7r526\" (UID: \"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\") " pod="openshift-image-registry/node-ca-7r526" Jan 22 05:46:14 crc kubenswrapper[4933]: E0122 05:46:14.295635 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: E0122 05:46:14.295795 4933 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.297747 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.297799 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.297814 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.297834 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.297846 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:14Z","lastTransitionTime":"2026-01-22T05:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.301572 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.344409 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.348844 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-7r526" Jan 22 05:46:14 crc kubenswrapper[4933]: W0122 05:46:14.367153 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2b23423_6793_44cd_b47e_dc4d25bbe3ae.slice/crio-088bdc5ca510cdd400a6e6bfdf8f3ac2a7d100c061dbacaba03a0516fc002220 WatchSource:0}: Error finding container 088bdc5ca510cdd400a6e6bfdf8f3ac2a7d100c061dbacaba03a0516fc002220: Status 404 returned error can't find the container with id 088bdc5ca510cdd400a6e6bfdf8f3ac2a7d100c061dbacaba03a0516fc002220 Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.383103 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.400627 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.401004 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.401017 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.401039 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.401062 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:14Z","lastTransitionTime":"2026-01-22T05:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.427878 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.441563 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 14:21:00.796766064 +0000 UTC Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.482018 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.504085 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.504129 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.504139 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.504158 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.504170 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:14Z","lastTransitionTime":"2026-01-22T05:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.518787 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.535484 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.576407 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.605993 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.606029 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.606040 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.606056 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.606070 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:14Z","lastTransitionTime":"2026-01-22T05:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.618911 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.663405 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.670801 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-7r526" event={"ID":"a2b23423-6793-44cd-b47e-dc4d25bbe3ae","Type":"ContainerStarted","Data":"78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc"} Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.670851 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-7r526" event={"ID":"a2b23423-6793-44cd-b47e-dc4d25bbe3ae","Type":"ContainerStarted","Data":"088bdc5ca510cdd400a6e6bfdf8f3ac2a7d100c061dbacaba03a0516fc002220"} Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.675023 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerStarted","Data":"8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0"} Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.675072 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerStarted","Data":"c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15"} Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.677847 4933 generic.go:334] "Generic (PLEG): container finished" podID="83dfdde7-cd49-49e0-85a0-0165d464b2c7" containerID="10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365" exitCode=0 Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.677915 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" event={"ID":"83dfdde7-cd49-49e0-85a0-0165d464b2c7","Type":"ContainerDied","Data":"10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365"} Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.679695 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e"} Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.681353 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-nvpgt" event={"ID":"126473c5-96ff-4ca9-83c0-7548d7e219c0","Type":"ContainerStarted","Data":"64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82"} Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.683505 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jr6rw" event={"ID":"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d","Type":"ContainerStarted","Data":"3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31"} Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.702165 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.710246 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.710326 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.710379 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.710396 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.710409 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:14Z","lastTransitionTime":"2026-01-22T05:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.737446 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.778103 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.812705 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.812740 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.812749 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.812762 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.812771 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:14Z","lastTransitionTime":"2026-01-22T05:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.821249 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.856053 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.900154 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.915539 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.915568 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.915576 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.915589 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.915597 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:14Z","lastTransitionTime":"2026-01-22T05:46:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.939153 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:14 crc kubenswrapper[4933]: I0122 05:46:14.979638 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.018339 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.018401 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.018418 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.018439 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.018454 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:15Z","lastTransitionTime":"2026-01-22T05:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.021762 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.067019 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.102580 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.120762 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.120809 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.120824 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.120842 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.120855 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:15Z","lastTransitionTime":"2026-01-22T05:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.139236 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.179657 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.217570 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.223235 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.223266 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.223276 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.223290 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.223300 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:15Z","lastTransitionTime":"2026-01-22T05:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.249292 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.249407 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.249440 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:15 crc kubenswrapper[4933]: E0122 05:46:15.249483 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:46:19.249448492 +0000 UTC m=+27.086573885 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:46:15 crc kubenswrapper[4933]: E0122 05:46:15.249552 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.249564 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:15 crc kubenswrapper[4933]: E0122 05:46:15.249574 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.249629 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:15 crc kubenswrapper[4933]: E0122 05:46:15.249647 4933 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:15 crc kubenswrapper[4933]: E0122 05:46:15.249583 4933 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:46:15 crc kubenswrapper[4933]: E0122 05:46:15.249727 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:19.249710738 +0000 UTC m=+27.086836111 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:15 crc kubenswrapper[4933]: E0122 05:46:15.249752 4933 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:46:15 crc kubenswrapper[4933]: E0122 05:46:15.249772 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:19.249757379 +0000 UTC m=+27.086882772 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:46:15 crc kubenswrapper[4933]: E0122 05:46:15.249622 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:46:15 crc kubenswrapper[4933]: E0122 05:46:15.249793 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:19.249782429 +0000 UTC m=+27.086907822 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:46:15 crc kubenswrapper[4933]: E0122 05:46:15.249798 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:46:15 crc kubenswrapper[4933]: E0122 05:46:15.249812 4933 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:15 crc kubenswrapper[4933]: E0122 05:46:15.249866 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:19.249855241 +0000 UTC m=+27.086980674 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.266874 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.301646 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.325420 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.325483 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.325500 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.325522 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.325538 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:15Z","lastTransitionTime":"2026-01-22T05:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.428196 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.428244 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.428254 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.428270 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.428284 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:15Z","lastTransitionTime":"2026-01-22T05:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.441736 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 18:21:13.05174383 +0000 UTC Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.490515 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.490546 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.490647 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:15 crc kubenswrapper[4933]: E0122 05:46:15.490757 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:15 crc kubenswrapper[4933]: E0122 05:46:15.490893 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:15 crc kubenswrapper[4933]: E0122 05:46:15.491023 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.530697 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.530732 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.530741 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.530756 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.530766 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:15Z","lastTransitionTime":"2026-01-22T05:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.633280 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.633323 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.633335 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.633352 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.633363 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:15Z","lastTransitionTime":"2026-01-22T05:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.688806 4933 generic.go:334] "Generic (PLEG): container finished" podID="83dfdde7-cd49-49e0-85a0-0165d464b2c7" containerID="4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691" exitCode=0 Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.688962 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" event={"ID":"83dfdde7-cd49-49e0-85a0-0165d464b2c7","Type":"ContainerDied","Data":"4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691"} Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.702201 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.714950 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.727145 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.740453 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.740512 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.740525 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.740562 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.740578 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:15Z","lastTransitionTime":"2026-01-22T05:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.743168 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.757141 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.768367 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.782183 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.792987 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.811788 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.821728 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.838823 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.844010 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.844042 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.844050 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.844064 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.844091 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:15Z","lastTransitionTime":"2026-01-22T05:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.854187 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.869510 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:15Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.946717 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.946776 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.946793 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.946814 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:15 crc kubenswrapper[4933]: I0122 05:46:15.946830 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:15Z","lastTransitionTime":"2026-01-22T05:46:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.048648 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.048692 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.048703 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.048720 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.048738 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:16Z","lastTransitionTime":"2026-01-22T05:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.151167 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.151211 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.151221 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.151236 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.151246 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:16Z","lastTransitionTime":"2026-01-22T05:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.253713 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.253748 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.253759 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.253774 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.253785 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:16Z","lastTransitionTime":"2026-01-22T05:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.357359 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.357421 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.357444 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.357474 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.357499 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:16Z","lastTransitionTime":"2026-01-22T05:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.442705 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 19:34:04.996681588 +0000 UTC Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.459320 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.459347 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.459355 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.459367 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.459376 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:16Z","lastTransitionTime":"2026-01-22T05:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.561455 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.561517 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.561530 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.561549 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.561561 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:16Z","lastTransitionTime":"2026-01-22T05:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.663980 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.664046 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.664066 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.664161 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.664178 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:16Z","lastTransitionTime":"2026-01-22T05:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.701180 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerStarted","Data":"e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed"} Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.707683 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" event={"ID":"83dfdde7-cd49-49e0-85a0-0165d464b2c7","Type":"ContainerStarted","Data":"a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6"} Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.721804 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.735863 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.749824 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.782287 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.782376 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.782400 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.782429 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.782452 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:16Z","lastTransitionTime":"2026-01-22T05:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.791415 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.820421 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.838810 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.852784 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.866942 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.878199 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.890891 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.891021 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.891041 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.891303 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.891336 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:16Z","lastTransitionTime":"2026-01-22T05:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.898645 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.901745 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.904745 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.906488 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.924051 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.936681 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.952750 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.969886 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.984269 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.994128 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.994159 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.994168 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.994182 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.994191 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:16Z","lastTransitionTime":"2026-01-22T05:46:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:16 crc kubenswrapper[4933]: I0122 05:46:16.998832 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.009267 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.024129 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.035893 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.047425 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.056973 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.075591 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.091114 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.097601 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.097637 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.097648 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.097664 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.097679 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:17Z","lastTransitionTime":"2026-01-22T05:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.106356 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.118152 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.133118 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.146576 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.200138 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.200267 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.200279 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.200292 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.200302 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:17Z","lastTransitionTime":"2026-01-22T05:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.302713 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.302771 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.302782 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.302800 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.302811 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:17Z","lastTransitionTime":"2026-01-22T05:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.405759 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.406135 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.406147 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.406165 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.406176 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:17Z","lastTransitionTime":"2026-01-22T05:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.443263 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 04:28:55.928946477 +0000 UTC Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.490131 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.490144 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:17 crc kubenswrapper[4933]: E0122 05:46:17.490308 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:17 crc kubenswrapper[4933]: E0122 05:46:17.490398 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.490633 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:17 crc kubenswrapper[4933]: E0122 05:46:17.491720 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.508716 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.508751 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.508760 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.508773 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.508782 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:17Z","lastTransitionTime":"2026-01-22T05:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.611221 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.611270 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.611288 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.611314 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.611334 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:17Z","lastTransitionTime":"2026-01-22T05:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.712940 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.713006 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.713023 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.713045 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.713062 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:17Z","lastTransitionTime":"2026-01-22T05:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.714825 4933 generic.go:334] "Generic (PLEG): container finished" podID="83dfdde7-cd49-49e0-85a0-0165d464b2c7" containerID="a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6" exitCode=0 Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.714867 4933 generic.go:334] "Generic (PLEG): container finished" podID="83dfdde7-cd49-49e0-85a0-0165d464b2c7" containerID="7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986" exitCode=0 Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.714926 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" event={"ID":"83dfdde7-cd49-49e0-85a0-0165d464b2c7","Type":"ContainerDied","Data":"a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6"} Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.715016 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" event={"ID":"83dfdde7-cd49-49e0-85a0-0165d464b2c7","Type":"ContainerDied","Data":"7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986"} Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.737972 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.752813 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.778264 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.798301 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.814035 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.816157 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.816184 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.816194 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.816210 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.816222 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:17Z","lastTransitionTime":"2026-01-22T05:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.826532 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.843344 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.868551 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.885037 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.897638 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.909177 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.919330 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.919363 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.919372 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.919385 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.919396 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:17Z","lastTransitionTime":"2026-01-22T05:46:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.923778 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.937818 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:17 crc kubenswrapper[4933]: I0122 05:46:17.947317 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.021491 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.021518 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.021525 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.021536 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.021544 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:18Z","lastTransitionTime":"2026-01-22T05:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.124129 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.124177 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.124187 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.124205 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.124217 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:18Z","lastTransitionTime":"2026-01-22T05:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.491255 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 07:35:59.537487264 +0000 UTC Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.493529 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.493591 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.493606 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.493973 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.494062 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:18Z","lastTransitionTime":"2026-01-22T05:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.597205 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.597273 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.597289 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.597309 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.597324 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:18Z","lastTransitionTime":"2026-01-22T05:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.701330 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.701385 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.701401 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.701423 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.701440 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:18Z","lastTransitionTime":"2026-01-22T05:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.723168 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerStarted","Data":"21bc7bfef528e4377c153a93235d9860e5ceb63659dc39bb29fb587a0f5b6b11"} Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.723642 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.723689 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.728267 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" event={"ID":"83dfdde7-cd49-49e0-85a0-0165d464b2c7","Type":"ContainerStarted","Data":"e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac"} Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.740513 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.752896 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.753526 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.755437 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.768061 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.782574 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.798209 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.804231 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.804264 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.804280 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.804302 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.804320 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:18Z","lastTransitionTime":"2026-01-22T05:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.816210 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.831550 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.859495 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21bc7bfef528e4377c153a93235d9860e5ceb63659dc39bb29fb587a0f5b6b11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.871626 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.885360 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.897982 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.906805 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.906844 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.906855 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.906873 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.906889 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:18Z","lastTransitionTime":"2026-01-22T05:46:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.912639 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.925029 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.938031 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.952519 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.963694 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.981776 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21bc7bfef528e4377c153a93235d9860e5ceb63659dc39bb29fb587a0f5b6b11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:18 crc kubenswrapper[4933]: I0122 05:46:18.996943 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.009869 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.010116 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.010155 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.010184 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.010205 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:19Z","lastTransitionTime":"2026-01-22T05:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.011794 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.024048 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.035154 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.046328 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.057221 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.069725 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.081322 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.091965 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.103578 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.113420 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.113452 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.113463 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.113478 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.113489 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:19Z","lastTransitionTime":"2026-01-22T05:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.115790 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.217200 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.217257 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.217269 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.217288 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.217301 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:19Z","lastTransitionTime":"2026-01-22T05:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.299694 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:46:19 crc kubenswrapper[4933]: E0122 05:46:19.299895 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:46:27.299870023 +0000 UTC m=+35.136995376 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.299946 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.299985 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.300020 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.300051 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:19 crc kubenswrapper[4933]: E0122 05:46:19.300146 4933 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:46:19 crc kubenswrapper[4933]: E0122 05:46:19.300190 4933 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:46:19 crc kubenswrapper[4933]: E0122 05:46:19.300216 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:46:19 crc kubenswrapper[4933]: E0122 05:46:19.300255 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:46:19 crc kubenswrapper[4933]: E0122 05:46:19.300262 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:46:19 crc kubenswrapper[4933]: E0122 05:46:19.300193 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:27.30018178 +0000 UTC m=+35.137307133 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:46:19 crc kubenswrapper[4933]: E0122 05:46:19.300274 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:46:19 crc kubenswrapper[4933]: E0122 05:46:19.300274 4933 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:19 crc kubenswrapper[4933]: E0122 05:46:19.300287 4933 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:19 crc kubenswrapper[4933]: E0122 05:46:19.300300 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:27.300280992 +0000 UTC m=+35.137406345 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:46:19 crc kubenswrapper[4933]: E0122 05:46:19.300315 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:27.300308513 +0000 UTC m=+35.137433866 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:19 crc kubenswrapper[4933]: E0122 05:46:19.300350 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:27.300328963 +0000 UTC m=+35.137454346 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.319869 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.319950 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.319967 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.319994 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.320012 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:19Z","lastTransitionTime":"2026-01-22T05:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.423517 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.423566 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.423581 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.423601 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.423617 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:19Z","lastTransitionTime":"2026-01-22T05:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.490053 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.490091 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:19 crc kubenswrapper[4933]: E0122 05:46:19.490221 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.490335 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:19 crc kubenswrapper[4933]: E0122 05:46:19.490479 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:19 crc kubenswrapper[4933]: E0122 05:46:19.490567 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.492374 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 01:02:30.682243237 +0000 UTC Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.526856 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.526905 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.529800 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.529998 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.530046 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:19Z","lastTransitionTime":"2026-01-22T05:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.633121 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.633177 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.633195 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.633245 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.633264 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:19Z","lastTransitionTime":"2026-01-22T05:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.731787 4933 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.735994 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.736038 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.736054 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.736071 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.736102 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:19Z","lastTransitionTime":"2026-01-22T05:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.839359 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.839447 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.839500 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.839525 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.839542 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:19Z","lastTransitionTime":"2026-01-22T05:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.942589 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.942637 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.942650 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.942667 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:19 crc kubenswrapper[4933]: I0122 05:46:19.942679 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:19Z","lastTransitionTime":"2026-01-22T05:46:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.045917 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.045990 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.046032 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.046063 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.046120 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:20Z","lastTransitionTime":"2026-01-22T05:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.149030 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.149089 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.149100 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.149118 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.149128 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:20Z","lastTransitionTime":"2026-01-22T05:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.251715 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.251765 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.251779 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.251797 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.251808 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:20Z","lastTransitionTime":"2026-01-22T05:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.354363 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.354410 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.354421 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.354436 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.354447 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:20Z","lastTransitionTime":"2026-01-22T05:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.456857 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.456895 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.456908 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.456926 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.456938 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:20Z","lastTransitionTime":"2026-01-22T05:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.492802 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 23:52:19.733797863 +0000 UTC Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.559371 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.559421 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.559436 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.559455 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.559467 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:20Z","lastTransitionTime":"2026-01-22T05:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.661583 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.661635 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.661650 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.661668 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.661682 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:20Z","lastTransitionTime":"2026-01-22T05:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.734367 4933 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.763728 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.763769 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.763779 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.763797 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.763809 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:20Z","lastTransitionTime":"2026-01-22T05:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.866760 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.866832 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.866847 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.866865 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.866879 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:20Z","lastTransitionTime":"2026-01-22T05:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.969933 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.969969 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.969977 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.969992 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:20 crc kubenswrapper[4933]: I0122 05:46:20.970001 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:20Z","lastTransitionTime":"2026-01-22T05:46:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.073644 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.073707 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.073730 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.073759 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.073781 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:21Z","lastTransitionTime":"2026-01-22T05:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.176226 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.176272 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.176290 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.176316 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.176334 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:21Z","lastTransitionTime":"2026-01-22T05:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.278596 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.278638 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.278648 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.278662 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.278682 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:21Z","lastTransitionTime":"2026-01-22T05:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.381396 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.381438 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.381451 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.381468 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.381480 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:21Z","lastTransitionTime":"2026-01-22T05:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.485134 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.485170 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.485181 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.485201 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.485211 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:21Z","lastTransitionTime":"2026-01-22T05:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.490322 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.490366 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:21 crc kubenswrapper[4933]: E0122 05:46:21.490423 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:21 crc kubenswrapper[4933]: E0122 05:46:21.490538 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.490331 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:21 crc kubenswrapper[4933]: E0122 05:46:21.490614 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.493449 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 03:30:29.077844248 +0000 UTC Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.588565 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.588615 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.588628 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.588647 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.588659 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:21Z","lastTransitionTime":"2026-01-22T05:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.691513 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.691585 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.691598 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.691617 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.691634 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:21Z","lastTransitionTime":"2026-01-22T05:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.738972 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovnkube-controller/0.log" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.742426 4933 generic.go:334] "Generic (PLEG): container finished" podID="6a721333-1932-4bb0-b384-c034492e59c4" containerID="21bc7bfef528e4377c153a93235d9860e5ceb63659dc39bb29fb587a0f5b6b11" exitCode=1 Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.742478 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerDied","Data":"21bc7bfef528e4377c153a93235d9860e5ceb63659dc39bb29fb587a0f5b6b11"} Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.743266 4933 scope.go:117] "RemoveContainer" containerID="21bc7bfef528e4377c153a93235d9860e5ceb63659dc39bb29fb587a0f5b6b11" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.761159 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.783511 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.795245 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.795315 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.795331 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.795351 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.795367 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:21Z","lastTransitionTime":"2026-01-22T05:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.801578 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.815503 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.830275 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.849629 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.865534 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.887261 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.899203 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.899658 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.899764 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.900048 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.900181 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:21Z","lastTransitionTime":"2026-01-22T05:46:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.903428 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.917557 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.928580 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.946377 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21bc7bfef528e4377c153a93235d9860e5ceb63659dc39bb29fb587a0f5b6b11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21bc7bfef528e4377c153a93235d9860e5ceb63659dc39bb29fb587a0f5b6b11\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:21Z\\\",\\\"message\\\":\\\" 6209 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.903955 6209 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.903543 6209 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 05:46:20.904349 6209 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 05:46:20.904405 6209 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 05:46:20.904460 6209 factory.go:656] Stopping watch factory\\\\nI0122 05:46:20.904508 6209 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 05:46:20.904297 6209 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.904599 6209 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.904684 6209 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 05:46:20.904750 6209 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.962498 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:21 crc kubenswrapper[4933]: I0122 05:46:21.973555 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.004058 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.004142 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.004162 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.004188 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.004206 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:22Z","lastTransitionTime":"2026-01-22T05:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.009339 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.107608 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.107652 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.107666 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.107684 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.107697 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:22Z","lastTransitionTime":"2026-01-22T05:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.209756 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.209802 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.209813 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.209829 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.209840 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:22Z","lastTransitionTime":"2026-01-22T05:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.311832 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.312223 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.312362 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.312514 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.312840 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:22Z","lastTransitionTime":"2026-01-22T05:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.415600 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.415884 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.415978 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.416093 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.416234 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:22Z","lastTransitionTime":"2026-01-22T05:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.493670 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 02:02:34.009015232 +0000 UTC Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.505820 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.518770 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.518815 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.518826 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.518844 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.518857 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:22Z","lastTransitionTime":"2026-01-22T05:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.522990 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.545805 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.562481 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.579901 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.593601 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.606810 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.620576 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.620617 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.620631 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.620647 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.620660 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:22Z","lastTransitionTime":"2026-01-22T05:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.631237 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21bc7bfef528e4377c153a93235d9860e5ceb63659dc39bb29fb587a0f5b6b11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21bc7bfef528e4377c153a93235d9860e5ceb63659dc39bb29fb587a0f5b6b11\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:21Z\\\",\\\"message\\\":\\\" 6209 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.903955 6209 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.903543 6209 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 05:46:20.904349 6209 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 05:46:20.904405 6209 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 05:46:20.904460 6209 factory.go:656] Stopping watch factory\\\\nI0122 05:46:20.904508 6209 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 05:46:20.904297 6209 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.904599 6209 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.904684 6209 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 05:46:20.904750 6209 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.644732 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.658655 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.669409 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.682489 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.693482 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.707772 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.723828 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.723860 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.723870 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.723884 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.723893 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:22Z","lastTransitionTime":"2026-01-22T05:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.746565 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovnkube-controller/0.log" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.749583 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerStarted","Data":"894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db"} Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.749948 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.763347 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.777278 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.789787 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.801460 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.809527 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.826733 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.826766 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.826774 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.826789 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.826799 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:22Z","lastTransitionTime":"2026-01-22T05:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.832588 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21bc7bfef528e4377c153a93235d9860e5ceb63659dc39bb29fb587a0f5b6b11\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:21Z\\\",\\\"message\\\":\\\" 6209 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.903955 6209 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.903543 6209 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 05:46:20.904349 6209 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 05:46:20.904405 6209 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 05:46:20.904460 6209 factory.go:656] Stopping watch factory\\\\nI0122 05:46:20.904508 6209 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 05:46:20.904297 6209 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.904599 6209 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.904684 6209 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 05:46:20.904750 6209 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.849429 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.859334 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.868995 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.886278 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.898428 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.909200 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.918501 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.928762 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.928811 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.928828 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.928851 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.928868 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:22Z","lastTransitionTime":"2026-01-22T05:46:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:22 crc kubenswrapper[4933]: I0122 05:46:22.935243 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.030726 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.030780 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.030797 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.030818 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.030836 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:23Z","lastTransitionTime":"2026-01-22T05:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.133559 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.133607 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.133618 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.133633 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.133645 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:23Z","lastTransitionTime":"2026-01-22T05:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.236257 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.236306 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.236321 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.236343 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.236358 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:23Z","lastTransitionTime":"2026-01-22T05:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.339757 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.339852 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.339871 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.339896 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.339914 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:23Z","lastTransitionTime":"2026-01-22T05:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.442732 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.442776 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.442787 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.442803 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.442815 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:23Z","lastTransitionTime":"2026-01-22T05:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.490142 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.490184 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.490242 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:23 crc kubenswrapper[4933]: E0122 05:46:23.490335 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:23 crc kubenswrapper[4933]: E0122 05:46:23.490430 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:23 crc kubenswrapper[4933]: E0122 05:46:23.490553 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.494766 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 05:17:05.543207496 +0000 UTC Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.546270 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.546374 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.546395 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.546421 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.546441 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:23Z","lastTransitionTime":"2026-01-22T05:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.649225 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.649275 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.649291 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.649310 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.649324 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:23Z","lastTransitionTime":"2026-01-22T05:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.751866 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.753191 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.753407 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.753639 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.753861 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:23Z","lastTransitionTime":"2026-01-22T05:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.755753 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovnkube-controller/1.log" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.756404 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovnkube-controller/0.log" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.760782 4933 generic.go:334] "Generic (PLEG): container finished" podID="6a721333-1932-4bb0-b384-c034492e59c4" containerID="894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db" exitCode=1 Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.760837 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerDied","Data":"894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db"} Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.760900 4933 scope.go:117] "RemoveContainer" containerID="21bc7bfef528e4377c153a93235d9860e5ceb63659dc39bb29fb587a0f5b6b11" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.761974 4933 scope.go:117] "RemoveContainer" containerID="894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db" Jan 22 05:46:23 crc kubenswrapper[4933]: E0122 05:46:23.762347 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" podUID="6a721333-1932-4bb0-b384-c034492e59c4" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.786425 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:23Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.811713 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:23Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.831378 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:23Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.855607 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:23Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.858036 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.858113 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.858141 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.858172 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.858194 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:23Z","lastTransitionTime":"2026-01-22T05:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.874158 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:23Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.895512 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:23Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.931386 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:23Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.962809 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.962808 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:23Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.962858 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.963005 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.963025 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.963040 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:23Z","lastTransitionTime":"2026-01-22T05:46:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:23 crc kubenswrapper[4933]: I0122 05:46:23.981655 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:23Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.002176 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21bc7bfef528e4377c153a93235d9860e5ceb63659dc39bb29fb587a0f5b6b11\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:21Z\\\",\\\"message\\\":\\\" 6209 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.903955 6209 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.903543 6209 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 05:46:20.904349 6209 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 05:46:20.904405 6209 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 05:46:20.904460 6209 factory.go:656] Stopping watch factory\\\\nI0122 05:46:20.904508 6209 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 05:46:20.904297 6209 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.904599 6209 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.904684 6209 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 05:46:20.904750 6209 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:22Z\\\",\\\"message\\\":\\\".org/owner\\\\\\\":\\\\\\\"openshift-marketplace/marketplace-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.53\\\\\\\", Port:8383, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.53\\\\\\\", Port:8081, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:22.695952 6325 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:23Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.013941 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.024379 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.034155 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.042258 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.065286 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.065337 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.065353 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.065376 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.065392 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:24Z","lastTransitionTime":"2026-01-22T05:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.124462 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv"] Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.125361 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.130351 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.131495 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.147660 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.147749 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b9a82407-aef2-4209-bb3f-6c89e11387e3-env-overrides\") pod \"ovnkube-control-plane-749d76644c-28bzv\" (UID: \"b9a82407-aef2-4209-bb3f-6c89e11387e3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.147817 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vkg8\" (UniqueName: \"kubernetes.io/projected/b9a82407-aef2-4209-bb3f-6c89e11387e3-kube-api-access-2vkg8\") pod \"ovnkube-control-plane-749d76644c-28bzv\" (UID: \"b9a82407-aef2-4209-bb3f-6c89e11387e3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.147868 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b9a82407-aef2-4209-bb3f-6c89e11387e3-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-28bzv\" (UID: \"b9a82407-aef2-4209-bb3f-6c89e11387e3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.147893 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b9a82407-aef2-4209-bb3f-6c89e11387e3-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-28bzv\" (UID: \"b9a82407-aef2-4209-bb3f-6c89e11387e3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.161267 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.167908 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.167956 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.167965 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.167979 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.167988 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:24Z","lastTransitionTime":"2026-01-22T05:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.173832 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.186791 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.208577 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.219872 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.237268 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21bc7bfef528e4377c153a93235d9860e5ceb63659dc39bb29fb587a0f5b6b11\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:21Z\\\",\\\"message\\\":\\\" 6209 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.903955 6209 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.903543 6209 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 05:46:20.904349 6209 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 05:46:20.904405 6209 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI0122 05:46:20.904460 6209 factory.go:656] Stopping watch factory\\\\nI0122 05:46:20.904508 6209 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 05:46:20.904297 6209 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.904599 6209 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:46:20.904684 6209 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 05:46:20.904750 6209 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:22Z\\\",\\\"message\\\":\\\".org/owner\\\\\\\":\\\\\\\"openshift-marketplace/marketplace-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.53\\\\\\\", Port:8383, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.53\\\\\\\", Port:8081, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:22.695952 6325 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.249160 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b9a82407-aef2-4209-bb3f-6c89e11387e3-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-28bzv\" (UID: \"b9a82407-aef2-4209-bb3f-6c89e11387e3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.249213 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b9a82407-aef2-4209-bb3f-6c89e11387e3-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-28bzv\" (UID: \"b9a82407-aef2-4209-bb3f-6c89e11387e3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.249253 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b9a82407-aef2-4209-bb3f-6c89e11387e3-env-overrides\") pod \"ovnkube-control-plane-749d76644c-28bzv\" (UID: \"b9a82407-aef2-4209-bb3f-6c89e11387e3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.249297 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vkg8\" (UniqueName: \"kubernetes.io/projected/b9a82407-aef2-4209-bb3f-6c89e11387e3-kube-api-access-2vkg8\") pod \"ovnkube-control-plane-749d76644c-28bzv\" (UID: \"b9a82407-aef2-4209-bb3f-6c89e11387e3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.249892 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b9a82407-aef2-4209-bb3f-6c89e11387e3-env-overrides\") pod \"ovnkube-control-plane-749d76644c-28bzv\" (UID: \"b9a82407-aef2-4209-bb3f-6c89e11387e3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.250281 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b9a82407-aef2-4209-bb3f-6c89e11387e3-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-28bzv\" (UID: \"b9a82407-aef2-4209-bb3f-6c89e11387e3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.253726 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.255258 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b9a82407-aef2-4209-bb3f-6c89e11387e3-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-28bzv\" (UID: \"b9a82407-aef2-4209-bb3f-6c89e11387e3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.265459 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vkg8\" (UniqueName: \"kubernetes.io/projected/b9a82407-aef2-4209-bb3f-6c89e11387e3-kube-api-access-2vkg8\") pod \"ovnkube-control-plane-749d76644c-28bzv\" (UID: \"b9a82407-aef2-4209-bb3f-6c89e11387e3\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.266162 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.270295 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.270324 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.270334 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.270349 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.270360 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:24Z","lastTransitionTime":"2026-01-22T05:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.284486 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.296321 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.310523 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.321093 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.333451 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.347311 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a82407-aef2-4209-bb3f-6c89e11387e3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-28bzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.372873 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.372935 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.372947 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.372964 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.372976 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:24Z","lastTransitionTime":"2026-01-22T05:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.443862 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.475718 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.475765 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.475777 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.476512 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.476555 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:24Z","lastTransitionTime":"2026-01-22T05:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.495292 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 18:36:42.118525531 +0000 UTC Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.578799 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.578841 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.578854 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.578873 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.578888 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:24Z","lastTransitionTime":"2026-01-22T05:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.595889 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.595932 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.595943 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.595960 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.595972 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:24Z","lastTransitionTime":"2026-01-22T05:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:24 crc kubenswrapper[4933]: E0122 05:46:24.615489 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.619584 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.619633 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.619650 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.619672 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.619687 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:24Z","lastTransitionTime":"2026-01-22T05:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:24 crc kubenswrapper[4933]: E0122 05:46:24.639782 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.643582 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.643635 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.643651 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.643671 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.643687 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:24Z","lastTransitionTime":"2026-01-22T05:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:24 crc kubenswrapper[4933]: E0122 05:46:24.658651 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.664651 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.664852 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.664949 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.665206 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.665298 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:24Z","lastTransitionTime":"2026-01-22T05:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:24 crc kubenswrapper[4933]: E0122 05:46:24.678321 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.682599 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.682630 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.682642 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.682658 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.682670 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:24Z","lastTransitionTime":"2026-01-22T05:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:24 crc kubenswrapper[4933]: E0122 05:46:24.697095 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: E0122 05:46:24.697233 4933 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.699658 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.699711 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.699726 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.699747 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.699762 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:24Z","lastTransitionTime":"2026-01-22T05:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.768625 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovnkube-controller/1.log" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.773189 4933 scope.go:117] "RemoveContainer" containerID="894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db" Jan 22 05:46:24 crc kubenswrapper[4933]: E0122 05:46:24.773366 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" podUID="6a721333-1932-4bb0-b384-c034492e59c4" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.773957 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" event={"ID":"b9a82407-aef2-4209-bb3f-6c89e11387e3","Type":"ContainerStarted","Data":"d8a9dc50bf9eb2abe7332195d4883af4a3b269da0f32c2ef9ee6072886d45245"} Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.790659 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.801931 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.801957 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.801964 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.801978 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.801986 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:24Z","lastTransitionTime":"2026-01-22T05:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.811141 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.828142 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.837990 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.848729 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.865323 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:22Z\\\",\\\"message\\\":\\\".org/owner\\\\\\\":\\\\\\\"openshift-marketplace/marketplace-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.53\\\\\\\", Port:8383, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.53\\\\\\\", Port:8081, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:22.695952 6325 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.886863 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.896690 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.904209 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.904247 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.904257 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.904272 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.904283 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:24Z","lastTransitionTime":"2026-01-22T05:46:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.910760 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.920785 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.929932 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.938135 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.949004 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.963715 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:24 crc kubenswrapper[4933]: I0122 05:46:24.973551 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a82407-aef2-4209-bb3f-6c89e11387e3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-28bzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.006004 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.006028 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.006039 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.006054 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.006064 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:25Z","lastTransitionTime":"2026-01-22T05:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.108999 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.109053 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.109070 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.109120 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.109139 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:25Z","lastTransitionTime":"2026-01-22T05:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.212156 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.212220 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.212237 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.212261 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.212278 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:25Z","lastTransitionTime":"2026-01-22T05:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.314912 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.315002 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.315022 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.315047 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.315065 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:25Z","lastTransitionTime":"2026-01-22T05:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.419020 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.419099 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.419120 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.419142 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.419159 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:25Z","lastTransitionTime":"2026-01-22T05:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.489828 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.489961 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.490142 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:25 crc kubenswrapper[4933]: E0122 05:46:25.490140 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:25 crc kubenswrapper[4933]: E0122 05:46:25.490394 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:25 crc kubenswrapper[4933]: E0122 05:46:25.490647 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.496385 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 14:55:02.279340868 +0000 UTC Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.526123 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.526208 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.526236 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.526268 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.526303 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:25Z","lastTransitionTime":"2026-01-22T05:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.631773 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-t8rgm"] Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.632570 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:25 crc kubenswrapper[4933]: E0122 05:46:25.632655 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.638726 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.638780 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.638799 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.638824 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.638843 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:25Z","lastTransitionTime":"2026-01-22T05:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.652573 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:25Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.667666 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ghgv\" (UniqueName: \"kubernetes.io/projected/0902347a-c5e2-4891-812b-cfe6efc32261-kube-api-access-4ghgv\") pod \"network-metrics-daemon-t8rgm\" (UID: \"0902347a-c5e2-4891-812b-cfe6efc32261\") " pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.667757 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs\") pod \"network-metrics-daemon-t8rgm\" (UID: \"0902347a-c5e2-4891-812b-cfe6efc32261\") " pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.670526 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:25Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.690175 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:25Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.709242 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:25Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.730120 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:25Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.742243 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.742313 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.742326 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.742349 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.742364 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:25Z","lastTransitionTime":"2026-01-22T05:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.761688 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:22Z\\\",\\\"message\\\":\\\".org/owner\\\\\\\":\\\\\\\"openshift-marketplace/marketplace-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.53\\\\\\\", Port:8383, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.53\\\\\\\", Port:8081, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:22.695952 6325 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:25Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.768936 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs\") pod \"network-metrics-daemon-t8rgm\" (UID: \"0902347a-c5e2-4891-812b-cfe6efc32261\") " pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.769052 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ghgv\" (UniqueName: \"kubernetes.io/projected/0902347a-c5e2-4891-812b-cfe6efc32261-kube-api-access-4ghgv\") pod \"network-metrics-daemon-t8rgm\" (UID: \"0902347a-c5e2-4891-812b-cfe6efc32261\") " pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:25 crc kubenswrapper[4933]: E0122 05:46:25.769330 4933 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:46:25 crc kubenswrapper[4933]: E0122 05:46:25.769475 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs podName:0902347a-c5e2-4891-812b-cfe6efc32261 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:26.269435548 +0000 UTC m=+34.106560901 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs") pod "network-metrics-daemon-t8rgm" (UID: "0902347a-c5e2-4891-812b-cfe6efc32261") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.786508 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" event={"ID":"b9a82407-aef2-4209-bb3f-6c89e11387e3","Type":"ContainerStarted","Data":"66a005229530dfbb93c94f5b93260aeb17f8852e46eed9c67d1558c04d1552a1"} Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.787870 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:25Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.799266 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ghgv\" (UniqueName: \"kubernetes.io/projected/0902347a-c5e2-4891-812b-cfe6efc32261-kube-api-access-4ghgv\") pod \"network-metrics-daemon-t8rgm\" (UID: \"0902347a-c5e2-4891-812b-cfe6efc32261\") " pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.808523 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:25Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.830658 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:25Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.845256 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.845333 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.845347 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.845373 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.845391 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:25Z","lastTransitionTime":"2026-01-22T05:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.851510 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:25Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.868278 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a82407-aef2-4209-bb3f-6c89e11387e3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-28bzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:25Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.884302 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t8rgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0902347a-c5e2-4891-812b-cfe6efc32261\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t8rgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:25Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.904150 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:25Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.919005 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:25Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.935101 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:25Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.948940 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.949012 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.949028 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.949054 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.949066 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:25Z","lastTransitionTime":"2026-01-22T05:46:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:25 crc kubenswrapper[4933]: I0122 05:46:25.954575 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:25Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.051770 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.051830 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.051849 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.051874 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.051894 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:26Z","lastTransitionTime":"2026-01-22T05:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.154444 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.154514 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.154535 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.154565 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.154587 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:26Z","lastTransitionTime":"2026-01-22T05:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.258845 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.259314 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.259346 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.259373 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.259391 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:26Z","lastTransitionTime":"2026-01-22T05:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.274206 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs\") pod \"network-metrics-daemon-t8rgm\" (UID: \"0902347a-c5e2-4891-812b-cfe6efc32261\") " pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:26 crc kubenswrapper[4933]: E0122 05:46:26.274417 4933 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:46:26 crc kubenswrapper[4933]: E0122 05:46:26.274526 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs podName:0902347a-c5e2-4891-812b-cfe6efc32261 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:27.274496979 +0000 UTC m=+35.111622362 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs") pod "network-metrics-daemon-t8rgm" (UID: "0902347a-c5e2-4891-812b-cfe6efc32261") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.362270 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.362333 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.362350 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.362379 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.362397 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:26Z","lastTransitionTime":"2026-01-22T05:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.464629 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.464669 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.464680 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.464697 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.464708 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:26Z","lastTransitionTime":"2026-01-22T05:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.497388 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 02:42:37.337372896 +0000 UTC Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.566527 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.566558 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.566566 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.566578 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.566587 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:26Z","lastTransitionTime":"2026-01-22T05:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.669142 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.669195 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.669218 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.669237 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.669250 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:26Z","lastTransitionTime":"2026-01-22T05:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.772270 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.772921 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.773102 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.773243 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.773365 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:26Z","lastTransitionTime":"2026-01-22T05:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.801164 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" event={"ID":"b9a82407-aef2-4209-bb3f-6c89e11387e3","Type":"ContainerStarted","Data":"5ad051b09250b7060a688905e48c62f57620af633bc9a1e302e91eb076c92981"} Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.820934 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:26Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.833152 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:26Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.850991 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a82407-aef2-4209-bb3f-6c89e11387e3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66a005229530dfbb93c94f5b93260aeb17f8852e46eed9c67d1558c04d1552a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ad051b09250b7060a688905e48c62f57620af633bc9a1e302e91eb076c92981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-28bzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:26Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.865839 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t8rgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0902347a-c5e2-4891-812b-cfe6efc32261\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t8rgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:26Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.875790 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.875869 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.875904 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.875934 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.875956 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:26Z","lastTransitionTime":"2026-01-22T05:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.884919 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:26Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.907127 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:26Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.924871 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:26Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.942287 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:26Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.955411 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:26Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.978292 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.978343 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.978355 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.978370 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.978383 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:26Z","lastTransitionTime":"2026-01-22T05:46:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:26 crc kubenswrapper[4933]: I0122 05:46:26.985672 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:22Z\\\",\\\"message\\\":\\\".org/owner\\\\\\\":\\\\\\\"openshift-marketplace/marketplace-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.53\\\\\\\", Port:8383, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.53\\\\\\\", Port:8081, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:22.695952 6325 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:26Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.006801 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.025052 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.043567 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.062303 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.071213 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.080721 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.080717 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.080792 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.080818 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.080844 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.080863 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:27Z","lastTransitionTime":"2026-01-22T05:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.095887 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.117945 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.138970 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.152824 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.170975 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.183587 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.183627 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.183638 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.183653 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.183663 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:27Z","lastTransitionTime":"2026-01-22T05:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.188104 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.207687 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.221371 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.234790 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.249386 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.278634 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:22Z\\\",\\\"message\\\":\\\".org/owner\\\\\\\":\\\\\\\"openshift-marketplace/marketplace-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.53\\\\\\\", Port:8383, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.53\\\\\\\", Port:8081, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:22.695952 6325 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.285630 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.285689 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.285706 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.285731 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.285750 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:27Z","lastTransitionTime":"2026-01-22T05:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.290060 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs\") pod \"network-metrics-daemon-t8rgm\" (UID: \"0902347a-c5e2-4891-812b-cfe6efc32261\") " pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:27 crc kubenswrapper[4933]: E0122 05:46:27.290337 4933 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:46:27 crc kubenswrapper[4933]: E0122 05:46:27.291022 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs podName:0902347a-c5e2-4891-812b-cfe6efc32261 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:29.291000919 +0000 UTC m=+37.128126282 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs") pod "network-metrics-daemon-t8rgm" (UID: "0902347a-c5e2-4891-812b-cfe6efc32261") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.299664 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.315491 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.327644 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.342460 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.358030 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a82407-aef2-4209-bb3f-6c89e11387e3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66a005229530dfbb93c94f5b93260aeb17f8852e46eed9c67d1558c04d1552a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ad051b09250b7060a688905e48c62f57620af633bc9a1e302e91eb076c92981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-28bzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.371429 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t8rgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0902347a-c5e2-4891-812b-cfe6efc32261\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t8rgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:27Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.389622 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.389669 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.389685 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.389701 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.389715 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:27Z","lastTransitionTime":"2026-01-22T05:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.391562 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.391740 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.391821 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.391941 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.391989 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:27 crc kubenswrapper[4933]: E0122 05:46:27.392037 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:46:27 crc kubenswrapper[4933]: E0122 05:46:27.392150 4933 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:46:27 crc kubenswrapper[4933]: E0122 05:46:27.392169 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:46:27 crc kubenswrapper[4933]: E0122 05:46:27.392208 4933 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:27 crc kubenswrapper[4933]: E0122 05:46:27.392214 4933 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:46:27 crc kubenswrapper[4933]: E0122 05:46:27.392247 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:43.392218742 +0000 UTC m=+51.229344135 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:46:27 crc kubenswrapper[4933]: E0122 05:46:27.392289 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:43.392264082 +0000 UTC m=+51.229389476 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:27 crc kubenswrapper[4933]: E0122 05:46:27.392208 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:46:27 crc kubenswrapper[4933]: E0122 05:46:27.392321 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:43.392306813 +0000 UTC m=+51.229432206 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:46:27 crc kubenswrapper[4933]: E0122 05:46:27.392346 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:46:27 crc kubenswrapper[4933]: E0122 05:46:27.392372 4933 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:27 crc kubenswrapper[4933]: E0122 05:46:27.392432 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:43.392405865 +0000 UTC m=+51.229531258 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:27 crc kubenswrapper[4933]: E0122 05:46:27.392477 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:46:43.392460027 +0000 UTC m=+51.229585500 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.490351 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.490455 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:27 crc kubenswrapper[4933]: E0122 05:46:27.490543 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:27 crc kubenswrapper[4933]: E0122 05:46:27.490715 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.490684 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.490753 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:27 crc kubenswrapper[4933]: E0122 05:46:27.490887 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:27 crc kubenswrapper[4933]: E0122 05:46:27.491140 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.492688 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.492765 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.492791 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.492820 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.492843 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:27Z","lastTransitionTime":"2026-01-22T05:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.497698 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 00:06:08.756781874 +0000 UTC Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.595953 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.596012 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.596029 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.596052 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.596068 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:27Z","lastTransitionTime":"2026-01-22T05:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.699401 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.699461 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.699478 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.699501 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.699519 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:27Z","lastTransitionTime":"2026-01-22T05:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.802978 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.803044 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.803062 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.803194 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.803242 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:27Z","lastTransitionTime":"2026-01-22T05:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.905848 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.905912 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.905928 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.905955 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:27 crc kubenswrapper[4933]: I0122 05:46:27.905979 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:27Z","lastTransitionTime":"2026-01-22T05:46:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.009243 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.009379 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.009403 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.009433 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.009456 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:28Z","lastTransitionTime":"2026-01-22T05:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.112246 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.112330 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.112350 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.112640 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.112669 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:28Z","lastTransitionTime":"2026-01-22T05:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.215264 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.215325 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.215342 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.215366 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.215385 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:28Z","lastTransitionTime":"2026-01-22T05:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.318909 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.318980 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.318999 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.319027 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.319045 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:28Z","lastTransitionTime":"2026-01-22T05:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.422049 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.422163 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.422182 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.422208 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.422228 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:28Z","lastTransitionTime":"2026-01-22T05:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.498101 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 06:23:22.93370255 +0000 UTC Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.524772 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.524808 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.524820 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.524835 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.524846 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:28Z","lastTransitionTime":"2026-01-22T05:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.627453 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.627620 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.627640 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.627671 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.627693 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:28Z","lastTransitionTime":"2026-01-22T05:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.730725 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.730781 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.730799 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.730827 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.730845 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:28Z","lastTransitionTime":"2026-01-22T05:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.833601 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.833653 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.833669 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.833692 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.833710 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:28Z","lastTransitionTime":"2026-01-22T05:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.936226 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.936302 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.936324 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.936356 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:28 crc kubenswrapper[4933]: I0122 05:46:28.936381 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:28Z","lastTransitionTime":"2026-01-22T05:46:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.039507 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.039593 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.039610 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.039634 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.039652 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:29Z","lastTransitionTime":"2026-01-22T05:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.142830 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.142903 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.142921 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.142948 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.142967 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:29Z","lastTransitionTime":"2026-01-22T05:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.246407 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.246451 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.246463 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.246478 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.246490 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:29Z","lastTransitionTime":"2026-01-22T05:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.314660 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs\") pod \"network-metrics-daemon-t8rgm\" (UID: \"0902347a-c5e2-4891-812b-cfe6efc32261\") " pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:29 crc kubenswrapper[4933]: E0122 05:46:29.314812 4933 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:46:29 crc kubenswrapper[4933]: E0122 05:46:29.314867 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs podName:0902347a-c5e2-4891-812b-cfe6efc32261 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:33.31485081 +0000 UTC m=+41.151976163 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs") pod "network-metrics-daemon-t8rgm" (UID: "0902347a-c5e2-4891-812b-cfe6efc32261") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.348291 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.348330 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.348341 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.348355 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.348366 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:29Z","lastTransitionTime":"2026-01-22T05:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.451657 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.451722 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.451740 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.451768 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.451785 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:29Z","lastTransitionTime":"2026-01-22T05:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.490540 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.490586 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.490599 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:29 crc kubenswrapper[4933]: E0122 05:46:29.490680 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.490563 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:29 crc kubenswrapper[4933]: E0122 05:46:29.490879 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:46:29 crc kubenswrapper[4933]: E0122 05:46:29.491136 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:29 crc kubenswrapper[4933]: E0122 05:46:29.491228 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.498995 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 18:55:47.647552466 +0000 UTC Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.554587 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.554675 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.554700 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.554734 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.554758 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:29Z","lastTransitionTime":"2026-01-22T05:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.657897 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.657969 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.657992 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.658016 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.658034 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:29Z","lastTransitionTime":"2026-01-22T05:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.760811 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.760880 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.760902 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.760934 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.760955 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:29Z","lastTransitionTime":"2026-01-22T05:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.863901 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.863971 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.863991 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.864018 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.864035 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:29Z","lastTransitionTime":"2026-01-22T05:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.967616 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.967688 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.967751 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.967786 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:29 crc kubenswrapper[4933]: I0122 05:46:29.967808 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:29Z","lastTransitionTime":"2026-01-22T05:46:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.070507 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.070586 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.070603 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.070630 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.070652 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:30Z","lastTransitionTime":"2026-01-22T05:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.174016 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.174101 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.174120 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.174143 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.174162 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:30Z","lastTransitionTime":"2026-01-22T05:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.277298 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.277361 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.277378 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.277402 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.277420 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:30Z","lastTransitionTime":"2026-01-22T05:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.380277 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.380404 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.380424 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.380447 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.380464 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:30Z","lastTransitionTime":"2026-01-22T05:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.483166 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.483219 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.483236 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.483262 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.483286 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:30Z","lastTransitionTime":"2026-01-22T05:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.499591 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 12:19:34.539983609 +0000 UTC Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.586279 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.586401 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.586417 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.586435 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.586449 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:30Z","lastTransitionTime":"2026-01-22T05:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.688339 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.688378 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.688387 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.688402 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.688412 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:30Z","lastTransitionTime":"2026-01-22T05:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.791468 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.791511 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.791521 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.791536 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.791546 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:30Z","lastTransitionTime":"2026-01-22T05:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.893719 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.893794 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.893820 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.893851 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.893874 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:30Z","lastTransitionTime":"2026-01-22T05:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.996953 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.997026 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.997048 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.997114 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:30 crc kubenswrapper[4933]: I0122 05:46:30.997140 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:30Z","lastTransitionTime":"2026-01-22T05:46:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.100521 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.100599 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.100621 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.100650 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.100671 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:31Z","lastTransitionTime":"2026-01-22T05:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.204701 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.204763 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.204781 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.204804 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.204822 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:31Z","lastTransitionTime":"2026-01-22T05:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.307282 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.307362 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.307386 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.307413 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.307433 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:31Z","lastTransitionTime":"2026-01-22T05:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.410448 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.410493 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.410505 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.410522 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.410534 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:31Z","lastTransitionTime":"2026-01-22T05:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.489995 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.490059 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.490001 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:31 crc kubenswrapper[4933]: E0122 05:46:31.490181 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.490241 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:31 crc kubenswrapper[4933]: E0122 05:46:31.490362 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:31 crc kubenswrapper[4933]: E0122 05:46:31.490426 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:31 crc kubenswrapper[4933]: E0122 05:46:31.490509 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.499862 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 07:21:35.319331434 +0000 UTC Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.512838 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.512878 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.512889 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.512907 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.512919 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:31Z","lastTransitionTime":"2026-01-22T05:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.616472 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.616518 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.616540 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.616571 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.616596 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:31Z","lastTransitionTime":"2026-01-22T05:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.719859 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.719930 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.719949 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.719975 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.719994 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:31Z","lastTransitionTime":"2026-01-22T05:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.822306 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.822372 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.822395 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.822455 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.822478 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:31Z","lastTransitionTime":"2026-01-22T05:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.925411 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.925527 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.925545 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.925574 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:31 crc kubenswrapper[4933]: I0122 05:46:31.925592 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:31Z","lastTransitionTime":"2026-01-22T05:46:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.028010 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.028055 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.028066 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.028107 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.028120 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:32Z","lastTransitionTime":"2026-01-22T05:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.130902 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.130965 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.130987 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.131012 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.131031 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:32Z","lastTransitionTime":"2026-01-22T05:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.235017 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.235109 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.235127 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.235150 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.235166 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:32Z","lastTransitionTime":"2026-01-22T05:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.336901 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.336945 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.336958 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.336976 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.336988 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:32Z","lastTransitionTime":"2026-01-22T05:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.443570 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.443660 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.443674 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.443703 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.443720 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:32Z","lastTransitionTime":"2026-01-22T05:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.500254 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 23:37:42.037838974 +0000 UTC Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.507804 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:32Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.525932 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:32Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.541809 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:32Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.547583 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.547620 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.547632 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.547651 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.547665 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:32Z","lastTransitionTime":"2026-01-22T05:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.556019 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:32Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.568747 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:32Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.578338 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:32Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.603309 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:22Z\\\",\\\"message\\\":\\\".org/owner\\\\\\\":\\\\\\\"openshift-marketplace/marketplace-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.53\\\\\\\", Port:8383, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.53\\\\\\\", Port:8081, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:22.695952 6325 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:32Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.616546 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:32Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.630640 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:32Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.648020 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a82407-aef2-4209-bb3f-6c89e11387e3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66a005229530dfbb93c94f5b93260aeb17f8852e46eed9c67d1558c04d1552a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ad051b09250b7060a688905e48c62f57620af633bc9a1e302e91eb076c92981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-28bzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:32Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.651313 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.651460 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.651631 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.651726 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.651828 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:32Z","lastTransitionTime":"2026-01-22T05:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.664223 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t8rgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0902347a-c5e2-4891-812b-cfe6efc32261\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t8rgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:32Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.683370 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:32Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.699992 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:32Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.716513 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:32Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.728968 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:32Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.743546 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:32Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.754573 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.754777 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.754911 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.755043 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.755215 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:32Z","lastTransitionTime":"2026-01-22T05:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.858395 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.858482 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.858506 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.858534 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.858558 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:32Z","lastTransitionTime":"2026-01-22T05:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.961543 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.961598 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.961615 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.961637 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:32 crc kubenswrapper[4933]: I0122 05:46:32.961655 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:32Z","lastTransitionTime":"2026-01-22T05:46:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.064515 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.064590 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.064613 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.064644 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.064670 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:33Z","lastTransitionTime":"2026-01-22T05:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.167264 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.167316 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.167325 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.167341 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.167351 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:33Z","lastTransitionTime":"2026-01-22T05:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.270828 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.270905 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.270926 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.270949 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.270971 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:33Z","lastTransitionTime":"2026-01-22T05:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.359301 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs\") pod \"network-metrics-daemon-t8rgm\" (UID: \"0902347a-c5e2-4891-812b-cfe6efc32261\") " pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:33 crc kubenswrapper[4933]: E0122 05:46:33.359611 4933 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:46:33 crc kubenswrapper[4933]: E0122 05:46:33.359720 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs podName:0902347a-c5e2-4891-812b-cfe6efc32261 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:41.35968812 +0000 UTC m=+49.196813513 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs") pod "network-metrics-daemon-t8rgm" (UID: "0902347a-c5e2-4891-812b-cfe6efc32261") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.374352 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.374423 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.374447 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.374474 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.374494 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:33Z","lastTransitionTime":"2026-01-22T05:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.477268 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.477346 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.477370 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.477397 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.477414 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:33Z","lastTransitionTime":"2026-01-22T05:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.490831 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:33 crc kubenswrapper[4933]: E0122 05:46:33.491269 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.490881 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:33 crc kubenswrapper[4933]: E0122 05:46:33.491770 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.490865 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.490940 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:33 crc kubenswrapper[4933]: E0122 05:46:33.492214 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:46:33 crc kubenswrapper[4933]: E0122 05:46:33.492370 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.500461 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 10:18:30.560403791 +0000 UTC Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.579966 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.580379 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.580608 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.580810 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.580951 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:33Z","lastTransitionTime":"2026-01-22T05:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.684252 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.684348 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.684369 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.684391 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.684408 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:33Z","lastTransitionTime":"2026-01-22T05:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.787156 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.787210 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.787231 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.787256 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.787273 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:33Z","lastTransitionTime":"2026-01-22T05:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.889503 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.889559 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.889578 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.889602 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.889621 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:33Z","lastTransitionTime":"2026-01-22T05:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.992340 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.993338 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.993497 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.993677 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:33 crc kubenswrapper[4933]: I0122 05:46:33.993816 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:33Z","lastTransitionTime":"2026-01-22T05:46:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.096717 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.096755 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.096767 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.096783 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.096793 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:34Z","lastTransitionTime":"2026-01-22T05:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.199922 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.199959 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.199970 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.199985 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.199997 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:34Z","lastTransitionTime":"2026-01-22T05:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.302960 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.303010 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.303029 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.303052 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.303069 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:34Z","lastTransitionTime":"2026-01-22T05:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.406485 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.406523 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.406534 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.406551 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.406564 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:34Z","lastTransitionTime":"2026-01-22T05:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.501394 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 20:46:56.754642236 +0000 UTC Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.509439 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.509483 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.509500 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.509523 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.509540 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:34Z","lastTransitionTime":"2026-01-22T05:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.612153 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.612457 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.612685 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.612888 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.613100 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:34Z","lastTransitionTime":"2026-01-22T05:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.715746 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.715782 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.715793 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.715808 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.715822 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:34Z","lastTransitionTime":"2026-01-22T05:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.818732 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.818793 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.818810 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.818835 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.818852 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:34Z","lastTransitionTime":"2026-01-22T05:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.921003 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.921067 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.921110 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.921186 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:34 crc kubenswrapper[4933]: I0122 05:46:34.921204 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:34Z","lastTransitionTime":"2026-01-22T05:46:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.024459 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.025261 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.025299 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.025323 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.025340 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:35Z","lastTransitionTime":"2026-01-22T05:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.067129 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.067208 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.067227 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.067253 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.067270 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:35Z","lastTransitionTime":"2026-01-22T05:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:35 crc kubenswrapper[4933]: E0122 05:46:35.085540 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:35Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.091374 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.091563 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.091670 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.091775 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.091874 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:35Z","lastTransitionTime":"2026-01-22T05:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:35 crc kubenswrapper[4933]: E0122 05:46:35.111806 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:35Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.122447 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.123204 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.123242 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.123277 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.123303 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:35Z","lastTransitionTime":"2026-01-22T05:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:35 crc kubenswrapper[4933]: E0122 05:46:35.145254 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:35Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.150709 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.150769 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.150787 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.150810 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.150830 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:35Z","lastTransitionTime":"2026-01-22T05:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:35 crc kubenswrapper[4933]: E0122 05:46:35.170262 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:35Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.174814 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.174901 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.174919 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.174943 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.174960 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:35Z","lastTransitionTime":"2026-01-22T05:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:35 crc kubenswrapper[4933]: E0122 05:46:35.195582 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:35Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:35 crc kubenswrapper[4933]: E0122 05:46:35.195772 4933 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.197820 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.197925 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.197947 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.197977 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.197999 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:35Z","lastTransitionTime":"2026-01-22T05:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.299975 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.300031 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.300041 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.300054 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.300063 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:35Z","lastTransitionTime":"2026-01-22T05:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.401877 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.401908 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.401918 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.401931 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.401940 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:35Z","lastTransitionTime":"2026-01-22T05:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.490020 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.490058 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.490136 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.490246 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:35 crc kubenswrapper[4933]: E0122 05:46:35.490238 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:35 crc kubenswrapper[4933]: E0122 05:46:35.490339 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:35 crc kubenswrapper[4933]: E0122 05:46:35.490500 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:46:35 crc kubenswrapper[4933]: E0122 05:46:35.490574 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.502345 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 10:47:15.363377273 +0000 UTC Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.504341 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.504402 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.504421 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.504443 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.504459 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:35Z","lastTransitionTime":"2026-01-22T05:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.608042 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.608104 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.608117 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.608145 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.608173 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:35Z","lastTransitionTime":"2026-01-22T05:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.710837 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.710874 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.710886 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.710900 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.710913 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:35Z","lastTransitionTime":"2026-01-22T05:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.813818 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.813872 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.813894 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.813923 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.813945 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:35Z","lastTransitionTime":"2026-01-22T05:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.917273 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.917328 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.917346 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.917372 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:35 crc kubenswrapper[4933]: I0122 05:46:35.917389 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:35Z","lastTransitionTime":"2026-01-22T05:46:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.021180 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.021226 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.021246 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.021268 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.021285 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:36Z","lastTransitionTime":"2026-01-22T05:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.124629 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.125536 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.125760 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.125985 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.126171 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:36Z","lastTransitionTime":"2026-01-22T05:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.228731 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.228796 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.228814 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.228839 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.228859 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:36Z","lastTransitionTime":"2026-01-22T05:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.331311 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.331356 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.331368 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.331385 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.331397 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:36Z","lastTransitionTime":"2026-01-22T05:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.434982 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.435052 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.435070 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.435146 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.435165 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:36Z","lastTransitionTime":"2026-01-22T05:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.502996 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 14:33:47.366440088 +0000 UTC Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.537618 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.537694 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.537717 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.537748 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.537772 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:36Z","lastTransitionTime":"2026-01-22T05:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.640928 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.640980 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.640997 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.641019 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.641035 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:36Z","lastTransitionTime":"2026-01-22T05:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.744251 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.744337 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.744363 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.744394 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.744416 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:36Z","lastTransitionTime":"2026-01-22T05:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.849598 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.849929 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.850114 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.850261 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.850436 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:36Z","lastTransitionTime":"2026-01-22T05:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.953748 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.953822 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.953845 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.953873 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:36 crc kubenswrapper[4933]: I0122 05:46:36.953894 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:36Z","lastTransitionTime":"2026-01-22T05:46:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.056903 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.056955 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.056966 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.056983 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.056995 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:37Z","lastTransitionTime":"2026-01-22T05:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.160499 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.160554 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.160571 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.160594 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.160611 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:37Z","lastTransitionTime":"2026-01-22T05:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.263678 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.263965 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.263999 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.264034 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.264099 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:37Z","lastTransitionTime":"2026-01-22T05:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.366980 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.367040 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.367111 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.367154 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.367178 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:37Z","lastTransitionTime":"2026-01-22T05:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.470573 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.470648 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.470669 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.470702 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.470726 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:37Z","lastTransitionTime":"2026-01-22T05:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.489885 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:37 crc kubenswrapper[4933]: E0122 05:46:37.490134 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.490348 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.490409 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:37 crc kubenswrapper[4933]: E0122 05:46:37.490885 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:37 crc kubenswrapper[4933]: E0122 05:46:37.490992 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.490636 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:37 crc kubenswrapper[4933]: E0122 05:46:37.492242 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.503797 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 07:22:33.989189722 +0000 UTC Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.573218 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.573293 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.573312 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.573339 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.573360 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:37Z","lastTransitionTime":"2026-01-22T05:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.676984 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.677055 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.677122 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.677157 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.677179 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:37Z","lastTransitionTime":"2026-01-22T05:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.780606 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.780659 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.780676 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.780700 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.780719 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:37Z","lastTransitionTime":"2026-01-22T05:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.883617 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.883689 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.883707 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.883730 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.883747 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:37Z","lastTransitionTime":"2026-01-22T05:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.987156 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.987201 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.987211 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.987231 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:37 crc kubenswrapper[4933]: I0122 05:46:37.987244 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:37Z","lastTransitionTime":"2026-01-22T05:46:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.090171 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.090262 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.090289 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.090322 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.090345 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:38Z","lastTransitionTime":"2026-01-22T05:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.194555 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.194630 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.194655 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.194685 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.194708 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:38Z","lastTransitionTime":"2026-01-22T05:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.298639 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.298699 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.298722 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.298751 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.298774 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:38Z","lastTransitionTime":"2026-01-22T05:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.401767 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.401830 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.401855 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.401883 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.401905 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:38Z","lastTransitionTime":"2026-01-22T05:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.503954 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 22:40:18.150379929 +0000 UTC Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.504518 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.504570 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.504589 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.504615 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.504633 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:38Z","lastTransitionTime":"2026-01-22T05:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.607676 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.607749 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.607773 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.607804 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.607829 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:38Z","lastTransitionTime":"2026-01-22T05:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.711064 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.711180 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.711661 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.711711 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.711731 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:38Z","lastTransitionTime":"2026-01-22T05:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.815425 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.815498 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.815522 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.815551 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.815573 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:38Z","lastTransitionTime":"2026-01-22T05:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.918736 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.919214 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.919373 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.919523 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:38 crc kubenswrapper[4933]: I0122 05:46:38.919692 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:38Z","lastTransitionTime":"2026-01-22T05:46:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.022887 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.023041 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.023117 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.023151 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.023169 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:39Z","lastTransitionTime":"2026-01-22T05:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.126355 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.126426 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.126447 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.126476 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.126493 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:39Z","lastTransitionTime":"2026-01-22T05:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.232609 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.232685 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.232703 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.232729 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.232746 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:39Z","lastTransitionTime":"2026-01-22T05:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.335611 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.335652 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.335669 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.335694 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.335710 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:39Z","lastTransitionTime":"2026-01-22T05:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.439245 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.439305 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.439322 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.439346 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.439363 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:39Z","lastTransitionTime":"2026-01-22T05:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.489692 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.489737 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.489775 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:39 crc kubenswrapper[4933]: E0122 05:46:39.489883 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.489919 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:39 crc kubenswrapper[4933]: E0122 05:46:39.490275 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:46:39 crc kubenswrapper[4933]: E0122 05:46:39.490914 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:39 crc kubenswrapper[4933]: E0122 05:46:39.491062 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.491506 4933 scope.go:117] "RemoveContainer" containerID="894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.504458 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 11:15:15.622612876 +0000 UTC Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.542745 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.542801 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.542819 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.542841 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.542860 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:39Z","lastTransitionTime":"2026-01-22T05:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.646431 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.646854 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.646874 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.646901 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.646922 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:39Z","lastTransitionTime":"2026-01-22T05:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.750955 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.751022 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.751045 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.751107 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.751133 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:39Z","lastTransitionTime":"2026-01-22T05:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.853023 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.853102 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.853110 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovnkube-controller/1.log" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.853123 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.853221 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.853238 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:39Z","lastTransitionTime":"2026-01-22T05:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.856432 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerStarted","Data":"cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0"} Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.857258 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.867463 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t8rgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0902347a-c5e2-4891-812b-cfe6efc32261\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t8rgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:39Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.884293 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:39Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.893629 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:39Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.904425 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a82407-aef2-4209-bb3f-6c89e11387e3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66a005229530dfbb93c94f5b93260aeb17f8852e46eed9c67d1558c04d1552a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ad051b09250b7060a688905e48c62f57620af633bc9a1e302e91eb076c92981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-28bzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:39Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.917468 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:39Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.931105 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:39Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.945858 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:39Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.956435 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:39Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.957032 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.957091 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.957103 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.957118 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.957127 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:39Z","lastTransitionTime":"2026-01-22T05:46:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.970641 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:39Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:39 crc kubenswrapper[4933]: I0122 05:46:39.985371 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:39Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.006440 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:22Z\\\",\\\"message\\\":\\\".org/owner\\\\\\\":\\\\\\\"openshift-marketplace/marketplace-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.53\\\\\\\", Port:8383, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.53\\\\\\\", Port:8081, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:22.695952 6325 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.022769 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.040465 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.059488 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.059572 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.059589 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.059640 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.059657 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:40Z","lastTransitionTime":"2026-01-22T05:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.064356 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.076480 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.088966 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.162197 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.162255 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.162266 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.162285 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.162299 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:40Z","lastTransitionTime":"2026-01-22T05:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.264492 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.264529 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.264541 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.264556 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.264566 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:40Z","lastTransitionTime":"2026-01-22T05:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.366557 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.366630 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.366641 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.366665 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.366679 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:40Z","lastTransitionTime":"2026-01-22T05:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.469676 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.469740 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.469750 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.469764 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.469772 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:40Z","lastTransitionTime":"2026-01-22T05:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.505323 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 05:54:15.635491452 +0000 UTC Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.571476 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.571549 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.571566 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.571594 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.571611 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:40Z","lastTransitionTime":"2026-01-22T05:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.674117 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.674169 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.674180 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.674196 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.674208 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:40Z","lastTransitionTime":"2026-01-22T05:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.777426 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.777482 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.777505 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.777533 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.777554 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:40Z","lastTransitionTime":"2026-01-22T05:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.863596 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovnkube-controller/2.log" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.865150 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovnkube-controller/1.log" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.869944 4933 generic.go:334] "Generic (PLEG): container finished" podID="6a721333-1932-4bb0-b384-c034492e59c4" containerID="cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0" exitCode=1 Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.869997 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerDied","Data":"cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0"} Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.870047 4933 scope.go:117] "RemoveContainer" containerID="894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.871326 4933 scope.go:117] "RemoveContainer" containerID="cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0" Jan 22 05:46:40 crc kubenswrapper[4933]: E0122 05:46:40.871817 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" podUID="6a721333-1932-4bb0-b384-c034492e59c4" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.881798 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.882223 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.882448 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.882626 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.882792 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:40Z","lastTransitionTime":"2026-01-22T05:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.899475 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t8rgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0902347a-c5e2-4891-812b-cfe6efc32261\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t8rgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.922236 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.937587 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.955187 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a82407-aef2-4209-bb3f-6c89e11387e3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66a005229530dfbb93c94f5b93260aeb17f8852e46eed9c67d1558c04d1552a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ad051b09250b7060a688905e48c62f57620af633bc9a1e302e91eb076c92981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-28bzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.972282 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.987418 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.987460 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.987473 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.987491 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.987504 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:40Z","lastTransitionTime":"2026-01-22T05:46:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:40 crc kubenswrapper[4933]: I0122 05:46:40.987899 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.002383 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.051173 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.068394 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.077835 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.090015 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.090051 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.090063 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.090104 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.090114 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:41Z","lastTransitionTime":"2026-01-22T05:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.095515 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://894c889f406ee573cc2056089469b86d80c2b1de365691296bd7125d706522db\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:22Z\\\",\\\"message\\\":\\\".org/owner\\\\\\\":\\\\\\\"openshift-marketplace/marketplace-operator-metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.53\\\\\\\", Port:8383, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.53\\\\\\\", Port:8081, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:22.695952 6325 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:40Z\\\",\\\"message\\\":\\\"rce:services.Addr{IP:\\\\\\\"10.217.4.153\\\\\\\", Port:5443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:40.416204 6546 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z]\\\\nI0122 05:46:40.416173 6546 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-operator-lifecycle-manager/package-server-manager-metrics]} name:Service_openshift-operator-lifecycle-manage\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.107604 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.120057 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.130504 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.143778 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.159487 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.192439 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.192472 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.192488 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.192506 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.192517 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:41Z","lastTransitionTime":"2026-01-22T05:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.294500 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.294544 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.294556 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.294572 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.294584 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:41Z","lastTransitionTime":"2026-01-22T05:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.397108 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.397152 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.397164 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.397181 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.397192 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:41Z","lastTransitionTime":"2026-01-22T05:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.452797 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs\") pod \"network-metrics-daemon-t8rgm\" (UID: \"0902347a-c5e2-4891-812b-cfe6efc32261\") " pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:41 crc kubenswrapper[4933]: E0122 05:46:41.453015 4933 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:46:41 crc kubenswrapper[4933]: E0122 05:46:41.453168 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs podName:0902347a-c5e2-4891-812b-cfe6efc32261 nodeName:}" failed. No retries permitted until 2026-01-22 05:46:57.453138698 +0000 UTC m=+65.290264091 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs") pod "network-metrics-daemon-t8rgm" (UID: "0902347a-c5e2-4891-812b-cfe6efc32261") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.490051 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.490158 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.490213 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.490245 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:41 crc kubenswrapper[4933]: E0122 05:46:41.490384 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:41 crc kubenswrapper[4933]: E0122 05:46:41.490557 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:46:41 crc kubenswrapper[4933]: E0122 05:46:41.490693 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:41 crc kubenswrapper[4933]: E0122 05:46:41.490860 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.499769 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.499826 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.499846 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.499871 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.499892 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:41Z","lastTransitionTime":"2026-01-22T05:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.506241 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 17:02:14.658770354 +0000 UTC Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.603795 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.603850 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.603886 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.603914 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.603934 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:41Z","lastTransitionTime":"2026-01-22T05:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.709630 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.709660 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.709673 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.709686 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.709695 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:41Z","lastTransitionTime":"2026-01-22T05:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.811971 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.812021 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.812031 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.812046 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.812057 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:41Z","lastTransitionTime":"2026-01-22T05:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.876389 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovnkube-controller/2.log" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.881937 4933 scope.go:117] "RemoveContainer" containerID="cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0" Jan 22 05:46:41 crc kubenswrapper[4933]: E0122 05:46:41.882229 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" podUID="6a721333-1932-4bb0-b384-c034492e59c4" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.902455 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.914335 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.914423 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.914448 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.914479 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.914505 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:41Z","lastTransitionTime":"2026-01-22T05:46:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.922825 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.942366 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.956707 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:41 crc kubenswrapper[4933]: I0122 05:46:41.986920 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:40Z\\\",\\\"message\\\":\\\"rce:services.Addr{IP:\\\\\\\"10.217.4.153\\\\\\\", Port:5443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:40.416204 6546 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z]\\\\nI0122 05:46:40.416173 6546 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-operator-lifecycle-manager/package-server-manager-metrics]} name:Service_openshift-operator-lifecycle-manage\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.010196 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.018681 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.018843 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.018860 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.018877 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.018888 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:42Z","lastTransitionTime":"2026-01-22T05:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.030348 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.049135 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.061477 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.074860 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a82407-aef2-4209-bb3f-6c89e11387e3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66a005229530dfbb93c94f5b93260aeb17f8852e46eed9c67d1558c04d1552a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ad051b09250b7060a688905e48c62f57620af633bc9a1e302e91eb076c92981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-28bzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.085757 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t8rgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0902347a-c5e2-4891-812b-cfe6efc32261\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t8rgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.107436 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.122093 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.122144 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.122157 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.122180 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.122198 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:42Z","lastTransitionTime":"2026-01-22T05:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.135003 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.151379 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.166564 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.185367 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.225100 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.225395 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.225583 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.225724 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.225842 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:42Z","lastTransitionTime":"2026-01-22T05:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.329436 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.329505 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.329522 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.329548 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.329571 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:42Z","lastTransitionTime":"2026-01-22T05:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.425655 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.435623 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.436034 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.436302 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.436581 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.436849 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:42Z","lastTransitionTime":"2026-01-22T05:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.439726 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.451282 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.471358 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.488960 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.506598 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 02:29:34.085354476 +0000 UTC Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.512331 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.530663 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.540471 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.540530 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.540544 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.540562 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.540576 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:42Z","lastTransitionTime":"2026-01-22T05:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.545505 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.572199 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:40Z\\\",\\\"message\\\":\\\"rce:services.Addr{IP:\\\\\\\"10.217.4.153\\\\\\\", Port:5443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:40.416204 6546 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z]\\\\nI0122 05:46:40.416173 6546 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-operator-lifecycle-manager/package-server-manager-metrics]} name:Service_openshift-operator-lifecycle-manage\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.586550 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.598809 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.618766 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.632493 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.642903 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.642937 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.642946 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.642972 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.642981 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:42Z","lastTransitionTime":"2026-01-22T05:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.648404 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a82407-aef2-4209-bb3f-6c89e11387e3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66a005229530dfbb93c94f5b93260aeb17f8852e46eed9c67d1558c04d1552a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ad051b09250b7060a688905e48c62f57620af633bc9a1e302e91eb076c92981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-28bzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.658450 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t8rgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0902347a-c5e2-4891-812b-cfe6efc32261\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t8rgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.672181 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.690789 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.707307 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.721468 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t8rgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0902347a-c5e2-4891-812b-cfe6efc32261\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t8rgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.734274 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b01760-735d-4991-8e66-28149847868b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8018d50ee76b7bed70197f4085cb2157cbad29ecbb0f9c3ea6cb4ed0621877ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aa4ed2d41484a7f47e481ae23e10c3a264edfdaddeb4c8f6ffd946694c88481\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa57c9c8f2cbba4f1282222cf6f3a29fd59efe4a378222e613eaeb32f0b71877\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://171c23458dabebaa0ddea08c10f662edcab7797ef6dffaab101c75d6a0737dff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://171c23458dabebaa0ddea08c10f662edcab7797ef6dffaab101c75d6a0737dff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.745462 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.745493 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.745501 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.745516 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.745528 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:42Z","lastTransitionTime":"2026-01-22T05:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.748389 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.759030 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.771595 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a82407-aef2-4209-bb3f-6c89e11387e3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66a005229530dfbb93c94f5b93260aeb17f8852e46eed9c67d1558c04d1552a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ad051b09250b7060a688905e48c62f57620af633bc9a1e302e91eb076c92981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-28bzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.790593 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.811352 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.833951 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.850828 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.850912 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.850949 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.850981 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.851005 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:42Z","lastTransitionTime":"2026-01-22T05:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.855647 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.876927 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.896784 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.937437 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:40Z\\\",\\\"message\\\":\\\"rce:services.Addr{IP:\\\\\\\"10.217.4.153\\\\\\\", Port:5443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:40.416204 6546 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z]\\\\nI0122 05:46:40.416173 6546 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-operator-lifecycle-manager/package-server-manager-metrics]} name:Service_openshift-operator-lifecycle-manage\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.954715 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.954770 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.954787 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.954809 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.954826 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:42Z","lastTransitionTime":"2026-01-22T05:46:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.958232 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.976889 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:42 crc kubenswrapper[4933]: I0122 05:46:42.993645 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.013930 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:43Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.034867 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:43Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.057711 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.057761 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.057776 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.057796 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.057810 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:43Z","lastTransitionTime":"2026-01-22T05:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.161030 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.161490 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.161508 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.161534 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.161559 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:43Z","lastTransitionTime":"2026-01-22T05:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.264572 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.264638 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.264658 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.264682 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.264700 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:43Z","lastTransitionTime":"2026-01-22T05:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.367827 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.367861 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.367869 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.367882 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.367891 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:43Z","lastTransitionTime":"2026-01-22T05:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.471707 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.471765 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.471782 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.471807 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.471824 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:43Z","lastTransitionTime":"2026-01-22T05:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.476203 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.476325 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.476438 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:43 crc kubenswrapper[4933]: E0122 05:46:43.476488 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:47:15.476450376 +0000 UTC m=+83.313575769 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.476547 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:43 crc kubenswrapper[4933]: E0122 05:46:43.476601 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:46:43 crc kubenswrapper[4933]: E0122 05:46:43.476628 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.476638 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:43 crc kubenswrapper[4933]: E0122 05:46:43.476647 4933 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:43 crc kubenswrapper[4933]: E0122 05:46:43.476723 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:46:43 crc kubenswrapper[4933]: E0122 05:46:43.476779 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:46:43 crc kubenswrapper[4933]: E0122 05:46:43.476772 4933 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:46:43 crc kubenswrapper[4933]: E0122 05:46:43.476764 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 05:47:15.476742773 +0000 UTC m=+83.313868166 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:43 crc kubenswrapper[4933]: E0122 05:46:43.476801 4933 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:43 crc kubenswrapper[4933]: E0122 05:46:43.476912 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:47:15.476880056 +0000 UTC m=+83.314005449 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:46:43 crc kubenswrapper[4933]: E0122 05:46:43.476672 4933 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:46:43 crc kubenswrapper[4933]: E0122 05:46:43.476973 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:47:15.476960587 +0000 UTC m=+83.314085970 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:46:43 crc kubenswrapper[4933]: E0122 05:46:43.476995 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 05:47:15.476984008 +0000 UTC m=+83.314109401 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.489927 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.489972 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:43 crc kubenswrapper[4933]: E0122 05:46:43.490111 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.490142 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:43 crc kubenswrapper[4933]: E0122 05:46:43.490293 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.490384 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:43 crc kubenswrapper[4933]: E0122 05:46:43.490478 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:43 crc kubenswrapper[4933]: E0122 05:46:43.490541 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.507543 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 20:32:21.219772088 +0000 UTC Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.574698 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.574748 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.574763 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.574787 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.574805 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:43Z","lastTransitionTime":"2026-01-22T05:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.678550 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.678608 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.678626 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.678649 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.678669 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:43Z","lastTransitionTime":"2026-01-22T05:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.781127 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.781194 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.781213 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.781239 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.781256 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:43Z","lastTransitionTime":"2026-01-22T05:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.885598 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.885666 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.885684 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.885708 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.885725 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:43Z","lastTransitionTime":"2026-01-22T05:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.989188 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.989248 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.989265 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.989291 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:43 crc kubenswrapper[4933]: I0122 05:46:43.989308 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:43Z","lastTransitionTime":"2026-01-22T05:46:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.091861 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.091902 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.091913 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.091928 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.091940 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:44Z","lastTransitionTime":"2026-01-22T05:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.194740 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.194803 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.194822 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.194846 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.194865 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:44Z","lastTransitionTime":"2026-01-22T05:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.297562 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.297624 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.297642 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.297671 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.297695 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:44Z","lastTransitionTime":"2026-01-22T05:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.400908 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.401032 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.401062 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.401124 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.401143 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:44Z","lastTransitionTime":"2026-01-22T05:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.504382 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.504475 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.504500 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.504526 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.504548 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:44Z","lastTransitionTime":"2026-01-22T05:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.508343 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 10:05:04.050895607 +0000 UTC Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.607496 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.607575 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.607592 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.607618 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.607640 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:44Z","lastTransitionTime":"2026-01-22T05:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.710723 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.710804 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.710829 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.710857 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.710879 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:44Z","lastTransitionTime":"2026-01-22T05:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.813970 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.814041 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.814060 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.814117 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.814138 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:44Z","lastTransitionTime":"2026-01-22T05:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.916309 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.916380 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.916403 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.916433 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:44 crc kubenswrapper[4933]: I0122 05:46:44.916459 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:44Z","lastTransitionTime":"2026-01-22T05:46:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.019864 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.019931 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.019971 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.020007 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.020033 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:45Z","lastTransitionTime":"2026-01-22T05:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.122586 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.122648 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.122665 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.122687 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.122704 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:45Z","lastTransitionTime":"2026-01-22T05:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.225903 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.225968 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.225986 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.226011 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.226029 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:45Z","lastTransitionTime":"2026-01-22T05:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.329143 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.329194 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.329210 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.329232 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.329249 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:45Z","lastTransitionTime":"2026-01-22T05:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.432616 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.432680 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.432700 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.432725 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.432742 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:45Z","lastTransitionTime":"2026-01-22T05:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.490588 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.490628 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.490707 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.490742 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:45 crc kubenswrapper[4933]: E0122 05:46:45.490950 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:45 crc kubenswrapper[4933]: E0122 05:46:45.491184 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:46:45 crc kubenswrapper[4933]: E0122 05:46:45.491327 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:45 crc kubenswrapper[4933]: E0122 05:46:45.491480 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.508420 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.508454 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.508463 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.508478 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.508491 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:45Z","lastTransitionTime":"2026-01-22T05:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.508461 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 23:02:08.514427011 +0000 UTC Jan 22 05:46:45 crc kubenswrapper[4933]: E0122 05:46:45.525747 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:45Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.531337 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.531369 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.531379 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.531393 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.531404 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:45Z","lastTransitionTime":"2026-01-22T05:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:45 crc kubenswrapper[4933]: E0122 05:46:45.546095 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:45Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.550571 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.550637 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.550655 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.550680 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.550697 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:45Z","lastTransitionTime":"2026-01-22T05:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:45 crc kubenswrapper[4933]: E0122 05:46:45.568901 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:45Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.573796 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.573868 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.573894 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.573922 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.573938 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:45Z","lastTransitionTime":"2026-01-22T05:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:45 crc kubenswrapper[4933]: E0122 05:46:45.591141 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:45Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.596263 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.596556 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.596755 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.596964 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.597241 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:45Z","lastTransitionTime":"2026-01-22T05:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:45 crc kubenswrapper[4933]: E0122 05:46:45.617946 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:45Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:45 crc kubenswrapper[4933]: E0122 05:46:45.618329 4933 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.620833 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.620896 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.620916 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.620943 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.620961 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:45Z","lastTransitionTime":"2026-01-22T05:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.723562 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.723931 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.724026 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.724142 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.724238 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:45Z","lastTransitionTime":"2026-01-22T05:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.827593 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.827664 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.827685 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.827714 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.827738 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:45Z","lastTransitionTime":"2026-01-22T05:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.930805 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.930859 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.930875 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.930897 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:45 crc kubenswrapper[4933]: I0122 05:46:45.930916 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:45Z","lastTransitionTime":"2026-01-22T05:46:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.034060 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.034170 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.034206 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.034240 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.034263 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:46Z","lastTransitionTime":"2026-01-22T05:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.137378 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.137463 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.137485 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.137515 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.137538 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:46Z","lastTransitionTime":"2026-01-22T05:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.241417 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.241490 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.241511 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.241534 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.241552 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:46Z","lastTransitionTime":"2026-01-22T05:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.344521 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.344587 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.344605 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.344633 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.344652 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:46Z","lastTransitionTime":"2026-01-22T05:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.448436 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.448504 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.448529 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.448562 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.448588 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:46Z","lastTransitionTime":"2026-01-22T05:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.508687 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 20:19:28.410087444 +0000 UTC Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.551134 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.551216 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.551241 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.551271 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.551295 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:46Z","lastTransitionTime":"2026-01-22T05:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.653678 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.653725 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.653734 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.653748 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.653757 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:46Z","lastTransitionTime":"2026-01-22T05:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.755386 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.755423 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.755430 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.755445 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.755456 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:46Z","lastTransitionTime":"2026-01-22T05:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.857639 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.857700 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.857717 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.857742 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.857759 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:46Z","lastTransitionTime":"2026-01-22T05:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.960628 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.960684 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.960700 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.960725 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:46 crc kubenswrapper[4933]: I0122 05:46:46.960743 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:46Z","lastTransitionTime":"2026-01-22T05:46:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.063150 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.063279 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.063340 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.063362 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.063421 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:47Z","lastTransitionTime":"2026-01-22T05:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.166965 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.167442 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.167673 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.167884 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.168164 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:47Z","lastTransitionTime":"2026-01-22T05:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.271379 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.271802 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.272015 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.272298 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.272494 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:47Z","lastTransitionTime":"2026-01-22T05:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.374688 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.374947 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.375012 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.375098 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.375176 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:47Z","lastTransitionTime":"2026-01-22T05:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.477776 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.478398 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.478484 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.478574 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.478640 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:47Z","lastTransitionTime":"2026-01-22T05:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.490586 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.490666 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.490826 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.490997 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:47 crc kubenswrapper[4933]: E0122 05:46:47.491650 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:47 crc kubenswrapper[4933]: E0122 05:46:47.491172 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:47 crc kubenswrapper[4933]: E0122 05:46:47.491223 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:46:47 crc kubenswrapper[4933]: E0122 05:46:47.490989 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.509602 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 22:57:11.524645741 +0000 UTC Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.581542 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.581865 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.581944 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.582009 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.582105 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:47Z","lastTransitionTime":"2026-01-22T05:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.685890 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.685949 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.685960 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.685983 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.685999 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:47Z","lastTransitionTime":"2026-01-22T05:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.788938 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.788983 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.788995 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.789013 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.789025 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:47Z","lastTransitionTime":"2026-01-22T05:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.892547 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.892793 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.892903 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.892986 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.893098 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:47Z","lastTransitionTime":"2026-01-22T05:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.995772 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.995836 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.995859 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.995889 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:47 crc kubenswrapper[4933]: I0122 05:46:47.995916 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:47Z","lastTransitionTime":"2026-01-22T05:46:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.098854 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.098913 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.098932 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.098957 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.098976 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:48Z","lastTransitionTime":"2026-01-22T05:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.201869 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.201926 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.201959 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.201980 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.201996 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:48Z","lastTransitionTime":"2026-01-22T05:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.304995 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.305175 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.305243 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.305271 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.305288 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:48Z","lastTransitionTime":"2026-01-22T05:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.408709 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.408789 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.408813 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.408842 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.408864 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:48Z","lastTransitionTime":"2026-01-22T05:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.509834 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 16:48:15.035482752 +0000 UTC Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.511967 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.512015 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.512029 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.512051 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.512069 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:48Z","lastTransitionTime":"2026-01-22T05:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.615022 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.615146 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.615180 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.615212 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.615237 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:48Z","lastTransitionTime":"2026-01-22T05:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.718229 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.718306 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.718334 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.718364 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.718387 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:48Z","lastTransitionTime":"2026-01-22T05:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.821721 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.821782 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.821800 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.821825 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.821844 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:48Z","lastTransitionTime":"2026-01-22T05:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.924349 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.924402 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.924423 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.924450 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:48 crc kubenswrapper[4933]: I0122 05:46:48.924470 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:48Z","lastTransitionTime":"2026-01-22T05:46:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.027978 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.028040 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.028063 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.028171 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.028190 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:49Z","lastTransitionTime":"2026-01-22T05:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.131690 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.131754 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.131772 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.131799 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.131817 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:49Z","lastTransitionTime":"2026-01-22T05:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.234153 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.234190 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.234200 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.234216 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.234227 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:49Z","lastTransitionTime":"2026-01-22T05:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.336156 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.336191 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.336200 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.336213 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.336223 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:49Z","lastTransitionTime":"2026-01-22T05:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.439454 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.439488 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.439498 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.439513 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.439523 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:49Z","lastTransitionTime":"2026-01-22T05:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.490353 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.490460 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.490490 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.490491 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:49 crc kubenswrapper[4933]: E0122 05:46:49.490562 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:49 crc kubenswrapper[4933]: E0122 05:46:49.490616 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:49 crc kubenswrapper[4933]: E0122 05:46:49.490748 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:49 crc kubenswrapper[4933]: E0122 05:46:49.490900 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.510677 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 13:06:24.236287326 +0000 UTC Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.542053 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.542107 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.542118 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.542133 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.542143 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:49Z","lastTransitionTime":"2026-01-22T05:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.644994 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.645040 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.645051 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.645067 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.645104 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:49Z","lastTransitionTime":"2026-01-22T05:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.748070 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.748181 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.748217 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.748247 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.748268 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:49Z","lastTransitionTime":"2026-01-22T05:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.851674 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.851737 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.851759 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.851786 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.851809 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:49Z","lastTransitionTime":"2026-01-22T05:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.955245 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.955307 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.955318 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.955339 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:49 crc kubenswrapper[4933]: I0122 05:46:49.955351 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:49Z","lastTransitionTime":"2026-01-22T05:46:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.058843 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.058906 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.058921 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.058943 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.058955 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:50Z","lastTransitionTime":"2026-01-22T05:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.161901 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.161950 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.161961 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.161979 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.161991 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:50Z","lastTransitionTime":"2026-01-22T05:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.265210 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.265298 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.265324 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.265360 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.265385 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:50Z","lastTransitionTime":"2026-01-22T05:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.367875 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.367922 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.367935 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.367954 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.367967 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:50Z","lastTransitionTime":"2026-01-22T05:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.470243 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.470302 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.470326 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.470354 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.470377 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:50Z","lastTransitionTime":"2026-01-22T05:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.511735 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 18:51:30.048395624 +0000 UTC Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.573139 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.573185 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.573200 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.573221 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.573237 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:50Z","lastTransitionTime":"2026-01-22T05:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.675470 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.675514 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.675527 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.675543 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.675554 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:50Z","lastTransitionTime":"2026-01-22T05:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.779335 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.779383 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.779400 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.779423 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.779439 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:50Z","lastTransitionTime":"2026-01-22T05:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.882285 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.882350 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.882374 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.882405 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.882426 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:50Z","lastTransitionTime":"2026-01-22T05:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.985772 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.985843 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.985886 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.985915 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:50 crc kubenswrapper[4933]: I0122 05:46:50.985940 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:50Z","lastTransitionTime":"2026-01-22T05:46:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.089964 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.090015 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.090026 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.090041 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.090054 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:51Z","lastTransitionTime":"2026-01-22T05:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.192970 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.193028 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.193047 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.193071 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.193126 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:51Z","lastTransitionTime":"2026-01-22T05:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.295917 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.295986 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.296008 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.296036 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.296056 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:51Z","lastTransitionTime":"2026-01-22T05:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.399478 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.399882 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.400034 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.400254 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.400449 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:51Z","lastTransitionTime":"2026-01-22T05:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.490381 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.490401 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:51 crc kubenswrapper[4933]: E0122 05:46:51.491000 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.490600 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:51 crc kubenswrapper[4933]: E0122 05:46:51.491033 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.490542 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:51 crc kubenswrapper[4933]: E0122 05:46:51.491508 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:51 crc kubenswrapper[4933]: E0122 05:46:51.491659 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.502967 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.503312 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.503332 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.503355 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.503368 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:51Z","lastTransitionTime":"2026-01-22T05:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.511886 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 15:24:14.268905138 +0000 UTC Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.606850 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.606903 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.606914 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.606967 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.606982 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:51Z","lastTransitionTime":"2026-01-22T05:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.710584 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.710838 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.710915 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.710999 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.711088 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:51Z","lastTransitionTime":"2026-01-22T05:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.813971 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.814033 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.814050 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.814108 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.814126 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:51Z","lastTransitionTime":"2026-01-22T05:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.917678 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.917965 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.918037 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.918136 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:51 crc kubenswrapper[4933]: I0122 05:46:51.918230 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:51Z","lastTransitionTime":"2026-01-22T05:46:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.021996 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.022056 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.022096 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.022119 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.022137 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:52Z","lastTransitionTime":"2026-01-22T05:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.124259 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.124315 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.124332 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.124355 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.124372 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:52Z","lastTransitionTime":"2026-01-22T05:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.227683 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.227728 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.227739 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.227754 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.227767 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:52Z","lastTransitionTime":"2026-01-22T05:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.330686 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.330749 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.330761 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.330776 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.330788 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:52Z","lastTransitionTime":"2026-01-22T05:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.432883 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.432934 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.432949 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.432967 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.432979 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:52Z","lastTransitionTime":"2026-01-22T05:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.508403 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:52Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.512624 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 03:17:03.539504236 +0000 UTC Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.531971 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:52Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.541297 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.541373 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.541394 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.541426 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.541448 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:52Z","lastTransitionTime":"2026-01-22T05:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.548899 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:52Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.565293 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:52Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.582232 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:52Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.597223 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:52Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.617502 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:52Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.635166 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:52Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.645541 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.645616 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.645638 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.645665 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.645686 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:52Z","lastTransitionTime":"2026-01-22T05:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.651157 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:52Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.666966 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:52Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.680154 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:52Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.713435 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:40Z\\\",\\\"message\\\":\\\"rce:services.Addr{IP:\\\\\\\"10.217.4.153\\\\\\\", Port:5443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:40.416204 6546 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z]\\\\nI0122 05:46:40.416173 6546 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-operator-lifecycle-manager/package-server-manager-metrics]} name:Service_openshift-operator-lifecycle-manage\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:52Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.732256 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b01760-735d-4991-8e66-28149847868b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8018d50ee76b7bed70197f4085cb2157cbad29ecbb0f9c3ea6cb4ed0621877ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aa4ed2d41484a7f47e481ae23e10c3a264edfdaddeb4c8f6ffd946694c88481\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa57c9c8f2cbba4f1282222cf6f3a29fd59efe4a378222e613eaeb32f0b71877\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://171c23458dabebaa0ddea08c10f662edcab7797ef6dffaab101c75d6a0737dff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://171c23458dabebaa0ddea08c10f662edcab7797ef6dffaab101c75d6a0737dff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:52Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.750147 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.750216 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.750238 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.750269 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.750293 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:52Z","lastTransitionTime":"2026-01-22T05:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.751042 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:52Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.766516 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:52Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.784629 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a82407-aef2-4209-bb3f-6c89e11387e3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66a005229530dfbb93c94f5b93260aeb17f8852e46eed9c67d1558c04d1552a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ad051b09250b7060a688905e48c62f57620af633bc9a1e302e91eb076c92981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-28bzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:52Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.800849 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t8rgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0902347a-c5e2-4891-812b-cfe6efc32261\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t8rgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:52Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.853622 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.853675 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.853692 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.853716 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.853733 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:52Z","lastTransitionTime":"2026-01-22T05:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.956954 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.957010 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.957020 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.957035 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:52 crc kubenswrapper[4933]: I0122 05:46:52.957044 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:52Z","lastTransitionTime":"2026-01-22T05:46:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.059749 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.059814 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.059826 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.059840 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.059851 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:53Z","lastTransitionTime":"2026-01-22T05:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.163223 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.163298 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.163334 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.163364 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.163385 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:53Z","lastTransitionTime":"2026-01-22T05:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.267847 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.267914 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.267975 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.267999 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.268013 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:53Z","lastTransitionTime":"2026-01-22T05:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.371868 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.371936 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.371953 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.372033 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.372055 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:53Z","lastTransitionTime":"2026-01-22T05:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.475355 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.475424 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.475443 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.475467 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.475484 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:53Z","lastTransitionTime":"2026-01-22T05:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.490872 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.490966 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:53 crc kubenswrapper[4933]: E0122 05:46:53.491149 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.491168 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.491334 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:53 crc kubenswrapper[4933]: E0122 05:46:53.491363 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:53 crc kubenswrapper[4933]: E0122 05:46:53.491536 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:53 crc kubenswrapper[4933]: E0122 05:46:53.491732 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.512984 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 22:58:36.204577993 +0000 UTC Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.578466 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.578525 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.578542 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.578567 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.578584 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:53Z","lastTransitionTime":"2026-01-22T05:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.681511 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.681572 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.681588 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.681608 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.681620 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:53Z","lastTransitionTime":"2026-01-22T05:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.784708 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.784758 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.784767 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.784780 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.784789 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:53Z","lastTransitionTime":"2026-01-22T05:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.886949 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.886979 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.886987 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.886999 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.887008 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:53Z","lastTransitionTime":"2026-01-22T05:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.989961 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.990044 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.990070 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.990173 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:53 crc kubenswrapper[4933]: I0122 05:46:53.990197 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:53Z","lastTransitionTime":"2026-01-22T05:46:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.093557 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.093629 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.093655 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.093683 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.093700 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:54Z","lastTransitionTime":"2026-01-22T05:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.197629 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.197670 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.197681 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.197701 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.197716 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:54Z","lastTransitionTime":"2026-01-22T05:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.300489 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.300535 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.300549 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.300566 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.300581 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:54Z","lastTransitionTime":"2026-01-22T05:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.403110 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.403230 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.403256 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.403283 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.403303 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:54Z","lastTransitionTime":"2026-01-22T05:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.505724 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.505763 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.505775 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.505790 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.505802 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:54Z","lastTransitionTime":"2026-01-22T05:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.513279 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 12:12:54.477528431 +0000 UTC Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.608191 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.608266 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.608297 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.608324 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.608346 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:54Z","lastTransitionTime":"2026-01-22T05:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.710947 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.710977 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.710985 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.710998 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.711006 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:54Z","lastTransitionTime":"2026-01-22T05:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.813815 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.813886 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.813902 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.813926 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.813946 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:54Z","lastTransitionTime":"2026-01-22T05:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.918168 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.918227 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.918244 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.918267 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:54 crc kubenswrapper[4933]: I0122 05:46:54.918283 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:54Z","lastTransitionTime":"2026-01-22T05:46:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.021285 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.021332 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.021344 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.021362 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.021378 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:55Z","lastTransitionTime":"2026-01-22T05:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.125380 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.125427 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.125443 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.125463 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.125475 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:55Z","lastTransitionTime":"2026-01-22T05:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.228752 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.228824 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.228836 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.228861 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.228876 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:55Z","lastTransitionTime":"2026-01-22T05:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.332378 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.332454 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.332468 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.332487 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.332500 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:55Z","lastTransitionTime":"2026-01-22T05:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.434903 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.434957 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.434974 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.434997 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.435013 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:55Z","lastTransitionTime":"2026-01-22T05:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.490634 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.490700 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.490716 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.490734 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:55 crc kubenswrapper[4933]: E0122 05:46:55.490865 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:55 crc kubenswrapper[4933]: E0122 05:46:55.491027 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:55 crc kubenswrapper[4933]: E0122 05:46:55.491234 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:46:55 crc kubenswrapper[4933]: E0122 05:46:55.491351 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.514123 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 00:05:40.909936485 +0000 UTC Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.537309 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.537388 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.537411 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.537437 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.537453 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:55Z","lastTransitionTime":"2026-01-22T05:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.639920 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.639961 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.639971 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.639984 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.639994 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:55Z","lastTransitionTime":"2026-01-22T05:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.742867 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.742915 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.742923 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.742939 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.742947 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:55Z","lastTransitionTime":"2026-01-22T05:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.750068 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.750111 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.750120 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.750131 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.750140 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:55Z","lastTransitionTime":"2026-01-22T05:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:55 crc kubenswrapper[4933]: E0122 05:46:55.762840 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.766671 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.766749 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.766762 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.766779 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.766840 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:55Z","lastTransitionTime":"2026-01-22T05:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:55 crc kubenswrapper[4933]: E0122 05:46:55.778180 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.781642 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.781667 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.781679 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.781693 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.781703 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:55Z","lastTransitionTime":"2026-01-22T05:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:55 crc kubenswrapper[4933]: E0122 05:46:55.800587 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.804435 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.804532 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.804542 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.804555 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.804565 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:55Z","lastTransitionTime":"2026-01-22T05:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:55 crc kubenswrapper[4933]: E0122 05:46:55.816206 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.820925 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.820951 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.820962 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.820976 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.820986 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:55Z","lastTransitionTime":"2026-01-22T05:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:55 crc kubenswrapper[4933]: E0122 05:46:55.837979 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:55 crc kubenswrapper[4933]: E0122 05:46:55.838104 4933 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.844808 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.844829 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.844838 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.844850 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.844858 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:55Z","lastTransitionTime":"2026-01-22T05:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.946494 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.946559 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.946582 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.946611 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:55 crc kubenswrapper[4933]: I0122 05:46:55.946633 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:55Z","lastTransitionTime":"2026-01-22T05:46:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.048897 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.048942 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.048951 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.048965 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.048974 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:56Z","lastTransitionTime":"2026-01-22T05:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.151258 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.151283 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.151293 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.151304 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.151313 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:56Z","lastTransitionTime":"2026-01-22T05:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.252891 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.252922 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.252931 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.252943 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.252951 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:56Z","lastTransitionTime":"2026-01-22T05:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.355847 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.355890 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.355899 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.355915 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.355924 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:56Z","lastTransitionTime":"2026-01-22T05:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.458450 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.458491 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.458502 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.458521 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.458532 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:56Z","lastTransitionTime":"2026-01-22T05:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.490886 4933 scope.go:117] "RemoveContainer" containerID="cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0" Jan 22 05:46:56 crc kubenswrapper[4933]: E0122 05:46:56.491164 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" podUID="6a721333-1932-4bb0-b384-c034492e59c4" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.514509 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 02:35:13.433469899 +0000 UTC Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.561258 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.561339 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.561375 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.561394 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.561408 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:56Z","lastTransitionTime":"2026-01-22T05:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.664141 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.664245 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.664265 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.664285 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.664302 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:56Z","lastTransitionTime":"2026-01-22T05:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.766467 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.766508 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.766519 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.766534 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.766544 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:56Z","lastTransitionTime":"2026-01-22T05:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.868703 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.868796 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.868810 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.868827 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.868838 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:56Z","lastTransitionTime":"2026-01-22T05:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.971927 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.971967 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.971980 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.971995 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:56 crc kubenswrapper[4933]: I0122 05:46:56.972006 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:56Z","lastTransitionTime":"2026-01-22T05:46:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.074054 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.074139 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.074158 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.074211 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.074232 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:57Z","lastTransitionTime":"2026-01-22T05:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.177852 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.177918 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.177932 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.177950 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.177964 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:57Z","lastTransitionTime":"2026-01-22T05:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.280036 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.280087 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.280096 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.280110 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.280119 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:57Z","lastTransitionTime":"2026-01-22T05:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.382488 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.382531 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.382543 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.382607 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.382622 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:57Z","lastTransitionTime":"2026-01-22T05:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.484956 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.484999 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.485012 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.485027 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.485038 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:57Z","lastTransitionTime":"2026-01-22T05:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.490290 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:57 crc kubenswrapper[4933]: E0122 05:46:57.490457 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.490704 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:57 crc kubenswrapper[4933]: E0122 05:46:57.490795 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.490984 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:57 crc kubenswrapper[4933]: E0122 05:46:57.491112 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.491239 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:57 crc kubenswrapper[4933]: E0122 05:46:57.491354 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.515260 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 12:00:53.391745729 +0000 UTC Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.532790 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs\") pod \"network-metrics-daemon-t8rgm\" (UID: \"0902347a-c5e2-4891-812b-cfe6efc32261\") " pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:57 crc kubenswrapper[4933]: E0122 05:46:57.532989 4933 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:46:57 crc kubenswrapper[4933]: E0122 05:46:57.533103 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs podName:0902347a-c5e2-4891-812b-cfe6efc32261 nodeName:}" failed. No retries permitted until 2026-01-22 05:47:29.533059342 +0000 UTC m=+97.370184785 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs") pod "network-metrics-daemon-t8rgm" (UID: "0902347a-c5e2-4891-812b-cfe6efc32261") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.587236 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.587286 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.587305 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.587325 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.587341 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:57Z","lastTransitionTime":"2026-01-22T05:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.689519 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.689567 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.689583 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.689602 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.689616 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:57Z","lastTransitionTime":"2026-01-22T05:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.791355 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.791394 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.791406 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.791420 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.791454 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:57Z","lastTransitionTime":"2026-01-22T05:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.894144 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.894198 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.894212 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.894230 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.894244 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:57Z","lastTransitionTime":"2026-01-22T05:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.996475 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.996515 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.996523 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.996540 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:57 crc kubenswrapper[4933]: I0122 05:46:57.996552 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:57Z","lastTransitionTime":"2026-01-22T05:46:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.098444 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.098497 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.098516 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.098537 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.098554 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:58Z","lastTransitionTime":"2026-01-22T05:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.201245 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.201326 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.201349 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.201380 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.201477 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:58Z","lastTransitionTime":"2026-01-22T05:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.303766 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.303816 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.303824 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.303840 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.303850 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:58Z","lastTransitionTime":"2026-01-22T05:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.406441 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.406487 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.406498 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.406516 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.406529 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:58Z","lastTransitionTime":"2026-01-22T05:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.502683 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.508727 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.508778 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.508789 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.508804 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.508816 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:58Z","lastTransitionTime":"2026-01-22T05:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.516304 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 16:57:00.240685769 +0000 UTC Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.611739 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.611787 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.611797 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.611810 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.611819 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:58Z","lastTransitionTime":"2026-01-22T05:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.714271 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.714305 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.714314 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.714327 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.714336 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:58Z","lastTransitionTime":"2026-01-22T05:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.817349 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.817387 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.817397 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.817412 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.817423 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:58Z","lastTransitionTime":"2026-01-22T05:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.919529 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.919563 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.919573 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.919608 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:58 crc kubenswrapper[4933]: I0122 05:46:58.919617 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:58Z","lastTransitionTime":"2026-01-22T05:46:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.021879 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.021933 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.021951 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.021969 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.021984 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:59Z","lastTransitionTime":"2026-01-22T05:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.124429 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.124477 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.124489 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.124505 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.124515 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:59Z","lastTransitionTime":"2026-01-22T05:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.227196 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.227247 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.227261 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.227279 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.227291 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:59Z","lastTransitionTime":"2026-01-22T05:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.330016 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.330091 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.330111 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.330135 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.330153 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:59Z","lastTransitionTime":"2026-01-22T05:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.433739 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.433808 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.433817 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.433831 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.433840 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:59Z","lastTransitionTime":"2026-01-22T05:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.490645 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:46:59 crc kubenswrapper[4933]: E0122 05:46:59.490769 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.491110 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:46:59 crc kubenswrapper[4933]: E0122 05:46:59.491174 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.491224 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:46:59 crc kubenswrapper[4933]: E0122 05:46:59.491308 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.491358 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:46:59 crc kubenswrapper[4933]: E0122 05:46:59.491506 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.517286 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 10:18:08.414817002 +0000 UTC Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.535255 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.535288 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.535299 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.535312 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.535323 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:59Z","lastTransitionTime":"2026-01-22T05:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.637927 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.637979 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.637988 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.638003 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.638012 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:59Z","lastTransitionTime":"2026-01-22T05:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.740348 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.740393 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.740405 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.740421 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.740435 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:59Z","lastTransitionTime":"2026-01-22T05:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.843753 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.843788 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.843797 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.843812 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.843820 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:59Z","lastTransitionTime":"2026-01-22T05:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.944614 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jr6rw_f066dd84-0cd5-4e8c-8411-cf12cc83ea7d/kube-multus/0.log" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.944669 4933 generic.go:334] "Generic (PLEG): container finished" podID="f066dd84-0cd5-4e8c-8411-cf12cc83ea7d" containerID="3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31" exitCode=1 Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.944705 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jr6rw" event={"ID":"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d","Type":"ContainerDied","Data":"3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31"} Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.945128 4933 scope.go:117] "RemoveContainer" containerID="3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.945422 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.945477 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.945492 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.945513 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.945527 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:46:59Z","lastTransitionTime":"2026-01-22T05:46:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.964159 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.980459 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:46:59 crc kubenswrapper[4933]: I0122 05:46:59.992804 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.006747 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.020127 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.032426 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.048022 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.048104 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.048121 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.048145 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.048162 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:00Z","lastTransitionTime":"2026-01-22T05:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.052263 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:40Z\\\",\\\"message\\\":\\\"rce:services.Addr{IP:\\\\\\\"10.217.4.153\\\\\\\", Port:5443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:40.416204 6546 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z]\\\\nI0122 05:46:40.416173 6546 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-operator-lifecycle-manager/package-server-manager-metrics]} name:Service_openshift-operator-lifecycle-manage\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.065238 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b01760-735d-4991-8e66-28149847868b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8018d50ee76b7bed70197f4085cb2157cbad29ecbb0f9c3ea6cb4ed0621877ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aa4ed2d41484a7f47e481ae23e10c3a264edfdaddeb4c8f6ffd946694c88481\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa57c9c8f2cbba4f1282222cf6f3a29fd59efe4a378222e613eaeb32f0b71877\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://171c23458dabebaa0ddea08c10f662edcab7797ef6dffaab101c75d6a0737dff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://171c23458dabebaa0ddea08c10f662edcab7797ef6dffaab101c75d6a0737dff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.078026 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.088334 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.097929 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a82407-aef2-4209-bb3f-6c89e11387e3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66a005229530dfbb93c94f5b93260aeb17f8852e46eed9c67d1558c04d1552a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ad051b09250b7060a688905e48c62f57620af633bc9a1e302e91eb076c92981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-28bzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.108944 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t8rgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0902347a-c5e2-4891-812b-cfe6efc32261\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t8rgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.123049 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:59Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:59Z\\\",\\\"message\\\":\\\"2026-01-22T05:46:14+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c2392d30-71bb-4850-932e-a51973540e40\\\\n2026-01-22T05:46:14+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c2392d30-71bb-4850-932e-a51973540e40 to /host/opt/cni/bin/\\\\n2026-01-22T05:46:14Z [verbose] multus-daemon started\\\\n2026-01-22T05:46:14Z [verbose] Readiness Indicator file check\\\\n2026-01-22T05:46:59Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.140458 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.150128 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.150192 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.150209 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.150233 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.150250 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:00Z","lastTransitionTime":"2026-01-22T05:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.150948 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be15cabd-6272-4819-9132-cbfe3e8a3b22\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a7388e9b8023ce1ca28ede29592438fb4cd2f85800d46b027cb5b4e8c100ebd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://053a5f03773a50e6add05b0c1d8de76035b560baaa24f51d573ca1f8305a1048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://053a5f03773a50e6add05b0c1d8de76035b560baaa24f51d573ca1f8305a1048\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.167632 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.179251 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.194851 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.252438 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.252470 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.252481 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.252495 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.252507 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:00Z","lastTransitionTime":"2026-01-22T05:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.517619 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 17:04:34.024088182 +0000 UTC Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.521704 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.521730 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.521738 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.521750 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.521760 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:00Z","lastTransitionTime":"2026-01-22T05:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.624037 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.624088 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.624100 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.624119 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.624131 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:00Z","lastTransitionTime":"2026-01-22T05:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.726796 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.726838 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.726848 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.726862 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.726874 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:00Z","lastTransitionTime":"2026-01-22T05:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.830298 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.830350 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.830367 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.830390 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.830408 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:00Z","lastTransitionTime":"2026-01-22T05:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.933215 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.933279 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.933297 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.933322 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.933345 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:00Z","lastTransitionTime":"2026-01-22T05:47:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.949545 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jr6rw_f066dd84-0cd5-4e8c-8411-cf12cc83ea7d/kube-multus/0.log" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.949596 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jr6rw" event={"ID":"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d","Type":"ContainerStarted","Data":"68112ca379a4d93e242173f33fb845379f58d92e02019847ede853e7a61df83f"} Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.963570 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b01760-735d-4991-8e66-28149847868b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8018d50ee76b7bed70197f4085cb2157cbad29ecbb0f9c3ea6cb4ed0621877ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aa4ed2d41484a7f47e481ae23e10c3a264edfdaddeb4c8f6ffd946694c88481\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa57c9c8f2cbba4f1282222cf6f3a29fd59efe4a378222e613eaeb32f0b71877\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://171c23458dabebaa0ddea08c10f662edcab7797ef6dffaab101c75d6a0737dff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://171c23458dabebaa0ddea08c10f662edcab7797ef6dffaab101c75d6a0737dff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.973600 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.983065 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:00 crc kubenswrapper[4933]: I0122 05:47:00.993191 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a82407-aef2-4209-bb3f-6c89e11387e3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66a005229530dfbb93c94f5b93260aeb17f8852e46eed9c67d1558c04d1552a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ad051b09250b7060a688905e48c62f57620af633bc9a1e302e91eb076c92981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-28bzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.002644 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t8rgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0902347a-c5e2-4891-812b-cfe6efc32261\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t8rgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.014129 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68112ca379a4d93e242173f33fb845379f58d92e02019847ede853e7a61df83f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:59Z\\\",\\\"message\\\":\\\"2026-01-22T05:46:14+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c2392d30-71bb-4850-932e-a51973540e40\\\\n2026-01-22T05:46:14+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c2392d30-71bb-4850-932e-a51973540e40 to /host/opt/cni/bin/\\\\n2026-01-22T05:46:14Z [verbose] multus-daemon started\\\\n2026-01-22T05:46:14Z [verbose] Readiness Indicator file check\\\\n2026-01-22T05:46:59Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.028819 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.037237 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.037263 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.037271 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.037283 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.037292 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:01Z","lastTransitionTime":"2026-01-22T05:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.042780 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be15cabd-6272-4819-9132-cbfe3e8a3b22\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a7388e9b8023ce1ca28ede29592438fb4cd2f85800d46b027cb5b4e8c100ebd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://053a5f03773a50e6add05b0c1d8de76035b560baaa24f51d573ca1f8305a1048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://053a5f03773a50e6add05b0c1d8de76035b560baaa24f51d573ca1f8305a1048\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.054891 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.065000 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.076109 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.088316 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.101327 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.115961 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.127922 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.138943 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.138984 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.138994 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.139010 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.139019 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:01Z","lastTransitionTime":"2026-01-22T05:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.143458 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.153115 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.170477 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:40Z\\\",\\\"message\\\":\\\"rce:services.Addr{IP:\\\\\\\"10.217.4.153\\\\\\\", Port:5443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:40.416204 6546 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z]\\\\nI0122 05:46:40.416173 6546 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-operator-lifecycle-manager/package-server-manager-metrics]} name:Service_openshift-operator-lifecycle-manage\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.241043 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.241179 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.241198 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.241224 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.241243 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:01Z","lastTransitionTime":"2026-01-22T05:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.343929 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.343968 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.343978 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.343993 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.344004 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:01Z","lastTransitionTime":"2026-01-22T05:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.446366 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.446399 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.446412 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.446427 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.446438 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:01Z","lastTransitionTime":"2026-01-22T05:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.490268 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.490302 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.490305 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.490423 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:01 crc kubenswrapper[4933]: E0122 05:47:01.490427 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:01 crc kubenswrapper[4933]: E0122 05:47:01.490518 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:01 crc kubenswrapper[4933]: E0122 05:47:01.490702 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:01 crc kubenswrapper[4933]: E0122 05:47:01.490749 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.518680 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 03:14:00.335176707 +0000 UTC Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.548884 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.548933 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.548942 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.548956 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.548967 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:01Z","lastTransitionTime":"2026-01-22T05:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.651623 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.651665 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.651674 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.651689 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.651702 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:01Z","lastTransitionTime":"2026-01-22T05:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.754064 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.754182 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.754201 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.754229 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.754246 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:01Z","lastTransitionTime":"2026-01-22T05:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.856855 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.856901 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.856911 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.856928 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.856941 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:01Z","lastTransitionTime":"2026-01-22T05:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.959198 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.959235 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.959246 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.959261 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:01 crc kubenswrapper[4933]: I0122 05:47:01.959271 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:01Z","lastTransitionTime":"2026-01-22T05:47:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.062614 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.062662 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.062679 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.062706 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.062724 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:02Z","lastTransitionTime":"2026-01-22T05:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.165839 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.165889 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.165900 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.165916 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.165927 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:02Z","lastTransitionTime":"2026-01-22T05:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.269138 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.269201 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.269219 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.269243 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.269261 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:02Z","lastTransitionTime":"2026-01-22T05:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.372270 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.372317 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.372328 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.372344 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.372354 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:02Z","lastTransitionTime":"2026-01-22T05:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.475464 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.475581 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.475603 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.475632 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.475649 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:02Z","lastTransitionTime":"2026-01-22T05:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.513902 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:40Z\\\",\\\"message\\\":\\\"rce:services.Addr{IP:\\\\\\\"10.217.4.153\\\\\\\", Port:5443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:40.416204 6546 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z]\\\\nI0122 05:46:40.416173 6546 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-operator-lifecycle-manager/package-server-manager-metrics]} name:Service_openshift-operator-lifecycle-manage\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.519797 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 13:29:53.882122178 +0000 UTC Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.527164 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.537280 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.551317 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.570417 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.582171 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.582214 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.582225 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.582242 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.582256 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:02Z","lastTransitionTime":"2026-01-22T05:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.588622 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.600543 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.613378 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b01760-735d-4991-8e66-28149847868b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8018d50ee76b7bed70197f4085cb2157cbad29ecbb0f9c3ea6cb4ed0621877ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aa4ed2d41484a7f47e481ae23e10c3a264edfdaddeb4c8f6ffd946694c88481\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa57c9c8f2cbba4f1282222cf6f3a29fd59efe4a378222e613eaeb32f0b71877\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://171c23458dabebaa0ddea08c10f662edcab7797ef6dffaab101c75d6a0737dff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://171c23458dabebaa0ddea08c10f662edcab7797ef6dffaab101c75d6a0737dff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.628490 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.639007 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.651216 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a82407-aef2-4209-bb3f-6c89e11387e3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66a005229530dfbb93c94f5b93260aeb17f8852e46eed9c67d1558c04d1552a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ad051b09250b7060a688905e48c62f57620af633bc9a1e302e91eb076c92981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-28bzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.663476 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t8rgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0902347a-c5e2-4891-812b-cfe6efc32261\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t8rgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.678046 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68112ca379a4d93e242173f33fb845379f58d92e02019847ede853e7a61df83f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:59Z\\\",\\\"message\\\":\\\"2026-01-22T05:46:14+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c2392d30-71bb-4850-932e-a51973540e40\\\\n2026-01-22T05:46:14+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c2392d30-71bb-4850-932e-a51973540e40 to /host/opt/cni/bin/\\\\n2026-01-22T05:46:14Z [verbose] multus-daemon started\\\\n2026-01-22T05:46:14Z [verbose] Readiness Indicator file check\\\\n2026-01-22T05:46:59Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.684326 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.684368 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.684385 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.684406 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.684423 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:02Z","lastTransitionTime":"2026-01-22T05:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.694682 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.706526 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be15cabd-6272-4819-9132-cbfe3e8a3b22\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a7388e9b8023ce1ca28ede29592438fb4cd2f85800d46b027cb5b4e8c100ebd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://053a5f03773a50e6add05b0c1d8de76035b560baaa24f51d573ca1f8305a1048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://053a5f03773a50e6add05b0c1d8de76035b560baaa24f51d573ca1f8305a1048\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.719315 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.730135 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.740677 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.786762 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.786801 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.786810 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.786824 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.786833 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:02Z","lastTransitionTime":"2026-01-22T05:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.888807 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.888851 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.888863 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.888878 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.888889 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:02Z","lastTransitionTime":"2026-01-22T05:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.990671 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.990708 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.990718 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.990733 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:02 crc kubenswrapper[4933]: I0122 05:47:02.990746 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:02Z","lastTransitionTime":"2026-01-22T05:47:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.093682 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.093764 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.093788 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.093820 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.093838 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:03Z","lastTransitionTime":"2026-01-22T05:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.195974 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.196023 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.196038 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.196057 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.196089 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:03Z","lastTransitionTime":"2026-01-22T05:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.298760 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.298796 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.298804 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.298826 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.298836 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:03Z","lastTransitionTime":"2026-01-22T05:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.400759 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.400817 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.400836 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.400864 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.400883 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:03Z","lastTransitionTime":"2026-01-22T05:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.490418 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.490457 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.490420 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:03 crc kubenswrapper[4933]: E0122 05:47:03.490534 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.490599 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:03 crc kubenswrapper[4933]: E0122 05:47:03.490779 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:03 crc kubenswrapper[4933]: E0122 05:47:03.490768 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:03 crc kubenswrapper[4933]: E0122 05:47:03.490925 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.503595 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.503659 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.503675 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.503698 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.503717 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:03Z","lastTransitionTime":"2026-01-22T05:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.520130 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 04:09:26.530716646 +0000 UTC Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.606308 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.606369 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.606384 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.606447 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.606461 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:03Z","lastTransitionTime":"2026-01-22T05:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.708651 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.708704 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.708728 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.708751 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.708769 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:03Z","lastTransitionTime":"2026-01-22T05:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.810789 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.810833 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.810845 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.810860 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.810873 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:03Z","lastTransitionTime":"2026-01-22T05:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.912708 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.912742 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.912753 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.912768 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:03 crc kubenswrapper[4933]: I0122 05:47:03.912777 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:03Z","lastTransitionTime":"2026-01-22T05:47:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.015790 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.015829 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.015840 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.015856 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.015865 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:04Z","lastTransitionTime":"2026-01-22T05:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.117812 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.118022 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.118128 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.118204 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.118277 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:04Z","lastTransitionTime":"2026-01-22T05:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.221184 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.221418 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.221486 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.221553 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.221608 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:04Z","lastTransitionTime":"2026-01-22T05:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.323387 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.323626 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.323683 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.323753 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.323839 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:04Z","lastTransitionTime":"2026-01-22T05:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.427183 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.427245 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.427263 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.427292 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.427310 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:04Z","lastTransitionTime":"2026-01-22T05:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.521146 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 19:58:00.421718827 +0000 UTC Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.530014 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.530042 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.530052 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.530067 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.530095 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:04Z","lastTransitionTime":"2026-01-22T05:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.633017 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.633368 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.633485 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.633594 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.633674 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:04Z","lastTransitionTime":"2026-01-22T05:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.736912 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.737422 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.737639 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.738173 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.738444 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:04Z","lastTransitionTime":"2026-01-22T05:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.841453 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.841514 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.841532 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.841557 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.841574 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:04Z","lastTransitionTime":"2026-01-22T05:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.943749 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.943787 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.943800 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.943816 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:04 crc kubenswrapper[4933]: I0122 05:47:04.943828 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:04Z","lastTransitionTime":"2026-01-22T05:47:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.046342 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.046402 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.046415 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.046431 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.046443 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:05Z","lastTransitionTime":"2026-01-22T05:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.148255 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.148295 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.148309 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.148325 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.148336 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:05Z","lastTransitionTime":"2026-01-22T05:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.250619 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.250652 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.250663 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.250680 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.250692 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:05Z","lastTransitionTime":"2026-01-22T05:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.352573 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.352752 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.352886 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.352976 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.353100 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:05Z","lastTransitionTime":"2026-01-22T05:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.456452 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.456522 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.456542 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.456568 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.456588 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:05Z","lastTransitionTime":"2026-01-22T05:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.490626 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.490716 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.490771 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.490782 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:05 crc kubenswrapper[4933]: E0122 05:47:05.490938 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:05 crc kubenswrapper[4933]: E0122 05:47:05.490870 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:05 crc kubenswrapper[4933]: E0122 05:47:05.491177 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:05 crc kubenswrapper[4933]: E0122 05:47:05.491230 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.524340 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 21:15:26.691320798 +0000 UTC Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.558788 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.558813 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.558824 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.558836 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.558845 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:05Z","lastTransitionTime":"2026-01-22T05:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.661243 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.661302 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.661318 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.661341 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.661358 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:05Z","lastTransitionTime":"2026-01-22T05:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.763682 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.763729 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.763740 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.763755 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.763767 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:05Z","lastTransitionTime":"2026-01-22T05:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.866598 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.866663 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.866683 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.866715 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.866733 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:05Z","lastTransitionTime":"2026-01-22T05:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.969196 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.969270 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.969296 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.969330 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:05 crc kubenswrapper[4933]: I0122 05:47:05.969353 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:05Z","lastTransitionTime":"2026-01-22T05:47:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.072825 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.072868 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.072888 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.072910 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.072927 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:06Z","lastTransitionTime":"2026-01-22T05:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.176107 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.176159 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.176176 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.176235 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.176254 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:06Z","lastTransitionTime":"2026-01-22T05:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.220194 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.220266 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.220284 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.220310 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.220331 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:06Z","lastTransitionTime":"2026-01-22T05:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:06 crc kubenswrapper[4933]: E0122 05:47:06.235228 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:06Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.240928 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.240990 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.241007 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.241037 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.241055 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:06Z","lastTransitionTime":"2026-01-22T05:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:06 crc kubenswrapper[4933]: E0122 05:47:06.261630 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:06Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.265942 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.265979 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.265994 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.266013 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.266024 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:06Z","lastTransitionTime":"2026-01-22T05:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:06 crc kubenswrapper[4933]: E0122 05:47:06.284798 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:06Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.289231 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.289309 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.289336 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.289366 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.289389 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:06Z","lastTransitionTime":"2026-01-22T05:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:06 crc kubenswrapper[4933]: E0122 05:47:06.306744 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:06Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.310135 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.310196 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.310220 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.310249 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.310267 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:06Z","lastTransitionTime":"2026-01-22T05:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:06 crc kubenswrapper[4933]: E0122 05:47:06.324141 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:06Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:06 crc kubenswrapper[4933]: E0122 05:47:06.324401 4933 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.326397 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.326435 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.326451 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.326470 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.326505 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:06Z","lastTransitionTime":"2026-01-22T05:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.429875 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.429945 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.429963 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.429988 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.430004 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:06Z","lastTransitionTime":"2026-01-22T05:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.525816 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 08:17:21.971452913 +0000 UTC Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.532781 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.532832 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.532848 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.532868 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.532885 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:06Z","lastTransitionTime":"2026-01-22T05:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.635483 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.635545 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.635565 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.635591 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.635610 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:06Z","lastTransitionTime":"2026-01-22T05:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.738100 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.738144 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.738157 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.738176 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.738192 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:06Z","lastTransitionTime":"2026-01-22T05:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.840570 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.840643 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.840662 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.840687 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.840706 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:06Z","lastTransitionTime":"2026-01-22T05:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.943386 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.943500 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.943525 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.943557 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:06 crc kubenswrapper[4933]: I0122 05:47:06.943582 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:06Z","lastTransitionTime":"2026-01-22T05:47:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.046029 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.046109 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.046126 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.046148 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.046171 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:07Z","lastTransitionTime":"2026-01-22T05:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.148508 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.148569 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.148587 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.148611 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.148629 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:07Z","lastTransitionTime":"2026-01-22T05:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.251851 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.251915 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.251932 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.251956 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.251974 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:07Z","lastTransitionTime":"2026-01-22T05:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.354992 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.355275 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.355312 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.355343 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.355364 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:07Z","lastTransitionTime":"2026-01-22T05:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.458318 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.458351 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.458359 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.458371 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.458380 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:07Z","lastTransitionTime":"2026-01-22T05:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.489869 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.489929 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.489878 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:07 crc kubenswrapper[4933]: E0122 05:47:07.490118 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:07 crc kubenswrapper[4933]: E0122 05:47:07.490195 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.490258 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:07 crc kubenswrapper[4933]: E0122 05:47:07.490334 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:07 crc kubenswrapper[4933]: E0122 05:47:07.490410 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.526563 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 03:06:17.154551472 +0000 UTC Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.561164 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.561223 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.561242 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.561265 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.561282 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:07Z","lastTransitionTime":"2026-01-22T05:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.663516 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.663558 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.663569 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.663586 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.663598 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:07Z","lastTransitionTime":"2026-01-22T05:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.766739 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.766813 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.766836 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.766858 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.766869 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:07Z","lastTransitionTime":"2026-01-22T05:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.869670 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.869747 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.869771 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.869802 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.869825 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:07Z","lastTransitionTime":"2026-01-22T05:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.972804 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.972879 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.972891 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.972940 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:07 crc kubenswrapper[4933]: I0122 05:47:07.972952 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:07Z","lastTransitionTime":"2026-01-22T05:47:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.075034 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.075118 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.075139 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.075164 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.075182 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:08Z","lastTransitionTime":"2026-01-22T05:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.177898 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.177930 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.177939 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.177952 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.177961 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:08Z","lastTransitionTime":"2026-01-22T05:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.280394 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.280829 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.281038 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.281305 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.281466 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:08Z","lastTransitionTime":"2026-01-22T05:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.384673 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.384741 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.384759 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.384787 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.384806 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:08Z","lastTransitionTime":"2026-01-22T05:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.487398 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.487451 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.487466 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.487485 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.487502 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:08Z","lastTransitionTime":"2026-01-22T05:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.527572 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 05:53:29.276439801 +0000 UTC Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.590873 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.590952 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.590975 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.591003 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.591025 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:08Z","lastTransitionTime":"2026-01-22T05:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.693774 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.693835 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.693853 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.693877 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.693894 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:08Z","lastTransitionTime":"2026-01-22T05:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.797059 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.797125 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.797135 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.797150 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.797160 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:08Z","lastTransitionTime":"2026-01-22T05:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.899490 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.899518 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.899527 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.899540 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:08 crc kubenswrapper[4933]: I0122 05:47:08.899550 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:08Z","lastTransitionTime":"2026-01-22T05:47:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.002105 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.002135 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.002143 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.002155 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.002164 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:09Z","lastTransitionTime":"2026-01-22T05:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.104666 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.104710 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.104721 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.104736 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.104746 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:09Z","lastTransitionTime":"2026-01-22T05:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.207302 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.207378 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.207396 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.207424 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.207442 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:09Z","lastTransitionTime":"2026-01-22T05:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.309641 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.309701 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.309718 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.309741 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.309760 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:09Z","lastTransitionTime":"2026-01-22T05:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.412336 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.412374 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.412384 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.412399 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.412411 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:09Z","lastTransitionTime":"2026-01-22T05:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.489894 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.489975 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.490520 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.490595 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:09 crc kubenswrapper[4933]: E0122 05:47:09.490730 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:09 crc kubenswrapper[4933]: E0122 05:47:09.491005 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:09 crc kubenswrapper[4933]: E0122 05:47:09.491265 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.491296 4933 scope.go:117] "RemoveContainer" containerID="cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0" Jan 22 05:47:09 crc kubenswrapper[4933]: E0122 05:47:09.491444 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.515359 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.515392 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.515400 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.515417 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.515427 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:09Z","lastTransitionTime":"2026-01-22T05:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.527734 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 15:31:17.877121286 +0000 UTC Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.618201 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.618261 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.618271 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.618287 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.618296 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:09Z","lastTransitionTime":"2026-01-22T05:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.721714 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.721805 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.721831 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.721862 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.721880 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:09Z","lastTransitionTime":"2026-01-22T05:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.824730 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.824918 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.824960 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.824992 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.825029 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:09Z","lastTransitionTime":"2026-01-22T05:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.930258 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.930298 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.930307 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.930322 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:09 crc kubenswrapper[4933]: I0122 05:47:09.930334 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:09Z","lastTransitionTime":"2026-01-22T05:47:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.033289 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.033358 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.033381 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.033413 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.033436 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:10Z","lastTransitionTime":"2026-01-22T05:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.135486 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.135519 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.135529 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.135542 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.135552 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:10Z","lastTransitionTime":"2026-01-22T05:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.237523 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.237567 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.237575 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.237589 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.237599 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:10Z","lastTransitionTime":"2026-01-22T05:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.340496 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.340563 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.340577 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.340595 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.340609 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:10Z","lastTransitionTime":"2026-01-22T05:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.443701 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.443745 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.443755 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.443773 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.443785 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:10Z","lastTransitionTime":"2026-01-22T05:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.528519 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 14:09:51.550760165 +0000 UTC Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.546256 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.546286 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.546296 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.546309 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.546317 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:10Z","lastTransitionTime":"2026-01-22T05:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.649782 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.650189 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.650400 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.650608 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.650773 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:10Z","lastTransitionTime":"2026-01-22T05:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.753673 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.753732 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.753744 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.753766 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.753779 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:10Z","lastTransitionTime":"2026-01-22T05:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.857018 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.857332 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.857491 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.857635 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.857803 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:10Z","lastTransitionTime":"2026-01-22T05:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.985559 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.985624 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.985636 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.985654 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.985666 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:10Z","lastTransitionTime":"2026-01-22T05:47:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:10 crc kubenswrapper[4933]: I0122 05:47:10.989944 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovnkube-controller/2.log" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.002730 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerStarted","Data":"2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c"} Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.003489 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.024660 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68112ca379a4d93e242173f33fb845379f58d92e02019847ede853e7a61df83f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:59Z\\\",\\\"message\\\":\\\"2026-01-22T05:46:14+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c2392d30-71bb-4850-932e-a51973540e40\\\\n2026-01-22T05:46:14+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c2392d30-71bb-4850-932e-a51973540e40 to /host/opt/cni/bin/\\\\n2026-01-22T05:46:14Z [verbose] multus-daemon started\\\\n2026-01-22T05:46:14Z [verbose] Readiness Indicator file check\\\\n2026-01-22T05:46:59Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:11Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.037791 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:11Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.046400 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be15cabd-6272-4819-9132-cbfe3e8a3b22\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a7388e9b8023ce1ca28ede29592438fb4cd2f85800d46b027cb5b4e8c100ebd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://053a5f03773a50e6add05b0c1d8de76035b560baaa24f51d573ca1f8305a1048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://053a5f03773a50e6add05b0c1d8de76035b560baaa24f51d573ca1f8305a1048\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:11Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.057178 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:11Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.067927 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:11Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.079557 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:11Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.088234 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.088268 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.088277 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.088292 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.088300 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:11Z","lastTransitionTime":"2026-01-22T05:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.090954 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:11Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.107991 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:40Z\\\",\\\"message\\\":\\\"rce:services.Addr{IP:\\\\\\\"10.217.4.153\\\\\\\", Port:5443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:40.416204 6546 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z]\\\\nI0122 05:46:40.416173 6546 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-operator-lifecycle-manager/package-server-manager-metrics]} name:Service_openshift-operator-lifecycle-manage\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:11Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.122428 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:11Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.134234 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:11Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.156122 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:11Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.182753 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:11Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.190133 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.190196 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.190210 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.190228 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.190238 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:11Z","lastTransitionTime":"2026-01-22T05:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.194164 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:11Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.202655 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t8rgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0902347a-c5e2-4891-812b-cfe6efc32261\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t8rgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:11Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.211429 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b01760-735d-4991-8e66-28149847868b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8018d50ee76b7bed70197f4085cb2157cbad29ecbb0f9c3ea6cb4ed0621877ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aa4ed2d41484a7f47e481ae23e10c3a264edfdaddeb4c8f6ffd946694c88481\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa57c9c8f2cbba4f1282222cf6f3a29fd59efe4a378222e613eaeb32f0b71877\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://171c23458dabebaa0ddea08c10f662edcab7797ef6dffaab101c75d6a0737dff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://171c23458dabebaa0ddea08c10f662edcab7797ef6dffaab101c75d6a0737dff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:11Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.220793 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:11Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.229781 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:11Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.238741 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a82407-aef2-4209-bb3f-6c89e11387e3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66a005229530dfbb93c94f5b93260aeb17f8852e46eed9c67d1558c04d1552a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ad051b09250b7060a688905e48c62f57620af633bc9a1e302e91eb076c92981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-28bzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:11Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.300395 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.300437 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.300446 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.300461 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.300471 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:11Z","lastTransitionTime":"2026-01-22T05:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.403702 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.403764 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.403785 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.403812 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.403832 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:11Z","lastTransitionTime":"2026-01-22T05:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.490650 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:11 crc kubenswrapper[4933]: E0122 05:47:11.490782 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.490979 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:11 crc kubenswrapper[4933]: E0122 05:47:11.491037 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.491201 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:11 crc kubenswrapper[4933]: E0122 05:47:11.491266 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.491393 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:11 crc kubenswrapper[4933]: E0122 05:47:11.491462 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.506068 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.506120 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.506131 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.506145 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.506157 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:11Z","lastTransitionTime":"2026-01-22T05:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.529171 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 21:54:36.858367812 +0000 UTC Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.608787 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.608847 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.608865 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.608890 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.608909 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:11Z","lastTransitionTime":"2026-01-22T05:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.710578 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.710638 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.710650 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.710665 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.710674 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:11Z","lastTransitionTime":"2026-01-22T05:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.813573 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.813627 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.813648 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.813675 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.813696 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:11Z","lastTransitionTime":"2026-01-22T05:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.916696 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.916757 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.916775 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.916799 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:11 crc kubenswrapper[4933]: I0122 05:47:11.916818 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:11Z","lastTransitionTime":"2026-01-22T05:47:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.018741 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.018801 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.018816 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.018838 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.018851 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:12Z","lastTransitionTime":"2026-01-22T05:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.121512 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.121553 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.121562 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.121577 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.121586 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:12Z","lastTransitionTime":"2026-01-22T05:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.223456 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.223499 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.223508 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.223521 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.223532 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:12Z","lastTransitionTime":"2026-01-22T05:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.326351 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.326759 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.326778 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.326800 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.326817 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:12Z","lastTransitionTime":"2026-01-22T05:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.429482 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.429596 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.429617 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.429643 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.429663 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:12Z","lastTransitionTime":"2026-01-22T05:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.505338 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.518622 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.529348 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.529353 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 22:36:28.724685255 +0000 UTC Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.532601 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.532639 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.532650 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.532668 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.532680 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:12Z","lastTransitionTime":"2026-01-22T05:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.553615 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:40Z\\\",\\\"message\\\":\\\"rce:services.Addr{IP:\\\\\\\"10.217.4.153\\\\\\\", Port:5443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:40.416204 6546 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z]\\\\nI0122 05:46:40.416173 6546 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-operator-lifecycle-manager/package-server-manager-metrics]} name:Service_openshift-operator-lifecycle-manage\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.572403 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.588993 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.604645 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.616634 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.625596 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a82407-aef2-4209-bb3f-6c89e11387e3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66a005229530dfbb93c94f5b93260aeb17f8852e46eed9c67d1558c04d1552a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ad051b09250b7060a688905e48c62f57620af633bc9a1e302e91eb076c92981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-28bzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.635018 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.635058 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.635086 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.635107 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.635120 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:12Z","lastTransitionTime":"2026-01-22T05:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.636268 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t8rgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0902347a-c5e2-4891-812b-cfe6efc32261\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t8rgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.648430 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b01760-735d-4991-8e66-28149847868b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8018d50ee76b7bed70197f4085cb2157cbad29ecbb0f9c3ea6cb4ed0621877ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aa4ed2d41484a7f47e481ae23e10c3a264edfdaddeb4c8f6ffd946694c88481\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa57c9c8f2cbba4f1282222cf6f3a29fd59efe4a378222e613eaeb32f0b71877\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://171c23458dabebaa0ddea08c10f662edcab7797ef6dffaab101c75d6a0737dff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://171c23458dabebaa0ddea08c10f662edcab7797ef6dffaab101c75d6a0737dff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.658787 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.672376 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68112ca379a4d93e242173f33fb845379f58d92e02019847ede853e7a61df83f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:59Z\\\",\\\"message\\\":\\\"2026-01-22T05:46:14+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c2392d30-71bb-4850-932e-a51973540e40\\\\n2026-01-22T05:46:14+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c2392d30-71bb-4850-932e-a51973540e40 to /host/opt/cni/bin/\\\\n2026-01-22T05:46:14Z [verbose] multus-daemon started\\\\n2026-01-22T05:46:14Z [verbose] Readiness Indicator file check\\\\n2026-01-22T05:46:59Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.686872 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.699681 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.711967 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be15cabd-6272-4819-9132-cbfe3e8a3b22\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a7388e9b8023ce1ca28ede29592438fb4cd2f85800d46b027cb5b4e8c100ebd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://053a5f03773a50e6add05b0c1d8de76035b560baaa24f51d573ca1f8305a1048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://053a5f03773a50e6add05b0c1d8de76035b560baaa24f51d573ca1f8305a1048\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.725702 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.737427 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.737487 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.737505 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.737531 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.737551 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:12Z","lastTransitionTime":"2026-01-22T05:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.743511 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:12Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.848333 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.848401 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.848423 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.848450 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.848470 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:12Z","lastTransitionTime":"2026-01-22T05:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.952017 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.952145 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.952175 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.952206 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:12 crc kubenswrapper[4933]: I0122 05:47:12.952229 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:12Z","lastTransitionTime":"2026-01-22T05:47:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.055682 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.055935 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.056120 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.056242 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.056374 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:13Z","lastTransitionTime":"2026-01-22T05:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.159886 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.160273 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.160430 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.160719 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.160929 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:13Z","lastTransitionTime":"2026-01-22T05:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.264415 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.264856 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.265004 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.265395 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.265572 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:13Z","lastTransitionTime":"2026-01-22T05:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.368310 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.368602 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.368756 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.368881 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.368990 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:13Z","lastTransitionTime":"2026-01-22T05:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.472446 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.472763 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.472982 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.473243 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.473530 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:13Z","lastTransitionTime":"2026-01-22T05:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.490018 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.490127 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.490127 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:13 crc kubenswrapper[4933]: E0122 05:47:13.490218 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:13 crc kubenswrapper[4933]: E0122 05:47:13.490323 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:13 crc kubenswrapper[4933]: E0122 05:47:13.490400 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.490676 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:13 crc kubenswrapper[4933]: E0122 05:47:13.490952 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.530180 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 05:14:49.841865562 +0000 UTC Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.576336 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.576415 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.576449 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.576480 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.576502 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:13Z","lastTransitionTime":"2026-01-22T05:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.679523 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.679865 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.680157 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.680429 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.680695 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:13Z","lastTransitionTime":"2026-01-22T05:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.784037 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.784690 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.784880 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.785027 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.785313 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:13Z","lastTransitionTime":"2026-01-22T05:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.889046 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.889180 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.889209 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.889243 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.889267 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:13Z","lastTransitionTime":"2026-01-22T05:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.992560 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.992630 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.992653 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.992681 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:13 crc kubenswrapper[4933]: I0122 05:47:13.992702 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:13Z","lastTransitionTime":"2026-01-22T05:47:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.095603 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.095661 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.095678 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.095700 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.095716 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:14Z","lastTransitionTime":"2026-01-22T05:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.198923 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.198971 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.198989 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.199011 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.199033 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:14Z","lastTransitionTime":"2026-01-22T05:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.302022 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.302133 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.302151 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.302178 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.302194 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:14Z","lastTransitionTime":"2026-01-22T05:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.405258 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.405615 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.405850 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.406055 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.406280 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:14Z","lastTransitionTime":"2026-01-22T05:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.508898 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.508928 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.508937 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.508950 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.508958 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:14Z","lastTransitionTime":"2026-01-22T05:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.531376 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 01:46:02.374978299 +0000 UTC Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.611258 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.611336 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.611353 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.611376 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.611393 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:14Z","lastTransitionTime":"2026-01-22T05:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.713735 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.713782 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.713794 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.713812 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.713825 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:14Z","lastTransitionTime":"2026-01-22T05:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.816586 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.816963 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.817148 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.817311 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.817447 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:14Z","lastTransitionTime":"2026-01-22T05:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.919781 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.919838 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.919855 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.919877 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:14 crc kubenswrapper[4933]: I0122 05:47:14.919894 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:14Z","lastTransitionTime":"2026-01-22T05:47:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.022507 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.022652 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.022681 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.022709 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.022728 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:15Z","lastTransitionTime":"2026-01-22T05:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.126345 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.126400 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.126415 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.126435 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.126451 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:15Z","lastTransitionTime":"2026-01-22T05:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.229922 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.229985 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.230010 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.230041 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.230071 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:15Z","lastTransitionTime":"2026-01-22T05:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.333770 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.333823 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.333841 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.333867 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.333885 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:15Z","lastTransitionTime":"2026-01-22T05:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.437445 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.437519 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.437542 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.437574 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.437597 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:15Z","lastTransitionTime":"2026-01-22T05:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.490415 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.490480 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.490492 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:15 crc kubenswrapper[4933]: E0122 05:47:15.491473 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:15 crc kubenswrapper[4933]: E0122 05:47:15.490998 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:15 crc kubenswrapper[4933]: E0122 05:47:15.491613 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.490577 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:15 crc kubenswrapper[4933]: E0122 05:47:15.491735 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.532022 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 20:16:54.169743795 +0000 UTC Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.540298 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.540343 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.540357 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.540376 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.540391 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:15Z","lastTransitionTime":"2026-01-22T05:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.553562 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.553712 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.553762 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:15 crc kubenswrapper[4933]: E0122 05:47:15.553871 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:19.553835067 +0000 UTC m=+147.390960460 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:47:15 crc kubenswrapper[4933]: E0122 05:47:15.553928 4933 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.553979 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:15 crc kubenswrapper[4933]: E0122 05:47:15.553933 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:47:15 crc kubenswrapper[4933]: E0122 05:47:15.554044 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:47:15 crc kubenswrapper[4933]: E0122 05:47:15.554047 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:48:19.554008201 +0000 UTC m=+147.391133594 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:47:15 crc kubenswrapper[4933]: E0122 05:47:15.554064 4933 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.554157 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:15 crc kubenswrapper[4933]: E0122 05:47:15.554169 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:47:15 crc kubenswrapper[4933]: E0122 05:47:15.554250 4933 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:47:15 crc kubenswrapper[4933]: E0122 05:47:15.554268 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:47:15 crc kubenswrapper[4933]: E0122 05:47:15.554292 4933 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:47:15 crc kubenswrapper[4933]: E0122 05:47:15.554311 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 05:48:19.554155304 +0000 UTC m=+147.391280687 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:47:15 crc kubenswrapper[4933]: E0122 05:47:15.554339 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:48:19.554325118 +0000 UTC m=+147.391450501 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:47:15 crc kubenswrapper[4933]: E0122 05:47:15.554361 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 05:48:19.554350018 +0000 UTC m=+147.391475411 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.643438 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.643522 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.643545 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.643574 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.643596 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:15Z","lastTransitionTime":"2026-01-22T05:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.745741 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.745780 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.745790 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.745807 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.745818 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:15Z","lastTransitionTime":"2026-01-22T05:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.848931 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.849379 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.849398 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.849423 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.849443 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:15Z","lastTransitionTime":"2026-01-22T05:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.952910 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.953015 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.953038 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.953131 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:15 crc kubenswrapper[4933]: I0122 05:47:15.953152 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:15Z","lastTransitionTime":"2026-01-22T05:47:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.056273 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.056326 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.056337 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.056354 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.056365 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:16Z","lastTransitionTime":"2026-01-22T05:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.159811 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.159856 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.159867 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.159882 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.159893 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:16Z","lastTransitionTime":"2026-01-22T05:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.263335 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.263390 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.263407 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.263431 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.263447 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:16Z","lastTransitionTime":"2026-01-22T05:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.366161 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.366220 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.366238 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.366262 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.366279 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:16Z","lastTransitionTime":"2026-01-22T05:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.469333 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.469636 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.469654 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.469684 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.469706 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:16Z","lastTransitionTime":"2026-01-22T05:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.532658 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 09:36:47.64376631 +0000 UTC Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.571616 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.571672 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.571685 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.571715 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.571726 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:16Z","lastTransitionTime":"2026-01-22T05:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.674252 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.674296 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.674305 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.674321 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.674336 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:16Z","lastTransitionTime":"2026-01-22T05:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.692621 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.692677 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.692693 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.692715 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.692732 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:16Z","lastTransitionTime":"2026-01-22T05:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:16 crc kubenswrapper[4933]: E0122 05:47:16.715305 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.718729 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.718788 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.718800 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.718814 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.718844 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:16Z","lastTransitionTime":"2026-01-22T05:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:16 crc kubenswrapper[4933]: E0122 05:47:16.731022 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.734906 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.734963 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.734976 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.734988 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.735010 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:16Z","lastTransitionTime":"2026-01-22T05:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:16 crc kubenswrapper[4933]: E0122 05:47:16.751772 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.755301 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.755348 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.755364 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.755385 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.755401 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:16Z","lastTransitionTime":"2026-01-22T05:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:16 crc kubenswrapper[4933]: E0122 05:47:16.778030 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.795452 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.795482 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.795491 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.795506 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.795516 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:16Z","lastTransitionTime":"2026-01-22T05:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:16 crc kubenswrapper[4933]: E0122 05:47:16.825252 4933 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"c8062a48-506a-465a-9977-93e8530bae49\\\",\\\"systemUUID\\\":\\\"d621d350-6f7c-490e-a47d-b396db235280\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:16Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:16 crc kubenswrapper[4933]: E0122 05:47:16.825618 4933 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.828207 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.828466 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.828656 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.828844 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.829027 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:16Z","lastTransitionTime":"2026-01-22T05:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.932496 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.932563 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.932583 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.932607 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:16 crc kubenswrapper[4933]: I0122 05:47:16.932626 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:16Z","lastTransitionTime":"2026-01-22T05:47:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.035870 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.035927 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.035938 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.035954 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.035967 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:17Z","lastTransitionTime":"2026-01-22T05:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.139489 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.139752 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.139862 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.139958 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.140059 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:17Z","lastTransitionTime":"2026-01-22T05:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.243798 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.243853 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.243877 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.243906 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.243928 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:17Z","lastTransitionTime":"2026-01-22T05:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.346287 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.346538 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.346560 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.346590 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.346611 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:17Z","lastTransitionTime":"2026-01-22T05:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.449758 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.449817 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.449836 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.449860 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.449876 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:17Z","lastTransitionTime":"2026-01-22T05:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.490431 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.490537 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.490564 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.490582 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:17 crc kubenswrapper[4933]: E0122 05:47:17.490653 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:17 crc kubenswrapper[4933]: E0122 05:47:17.491186 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:17 crc kubenswrapper[4933]: E0122 05:47:17.491306 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:17 crc kubenswrapper[4933]: E0122 05:47:17.491497 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.533365 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 09:08:56.261842498 +0000 UTC Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.553497 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.553547 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.553558 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.553576 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.553586 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:17Z","lastTransitionTime":"2026-01-22T05:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.656728 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.656785 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.656801 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.656824 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.656840 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:17Z","lastTransitionTime":"2026-01-22T05:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.759441 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.759483 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.759507 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.759525 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.759540 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:17Z","lastTransitionTime":"2026-01-22T05:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.863230 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.863294 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.863315 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.863340 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.863359 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:17Z","lastTransitionTime":"2026-01-22T05:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.966842 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.966900 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.966916 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.966937 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:17 crc kubenswrapper[4933]: I0122 05:47:17.966952 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:17Z","lastTransitionTime":"2026-01-22T05:47:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.190235 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.190269 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.190277 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.190290 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.190298 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:18Z","lastTransitionTime":"2026-01-22T05:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.292570 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.292601 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.292609 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.292622 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.292631 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:18Z","lastTransitionTime":"2026-01-22T05:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.395619 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.395680 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.395698 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.395722 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.395741 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:18Z","lastTransitionTime":"2026-01-22T05:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.498192 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.498271 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.498295 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.498322 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.498343 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:18Z","lastTransitionTime":"2026-01-22T05:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.537460 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 14:42:09.627761552 +0000 UTC Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.601330 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.601391 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.601413 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.601444 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.601466 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:18Z","lastTransitionTime":"2026-01-22T05:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.704366 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.704434 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.704451 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.704473 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.704492 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:18Z","lastTransitionTime":"2026-01-22T05:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.807096 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.807145 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.807160 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.807180 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.807195 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:18Z","lastTransitionTime":"2026-01-22T05:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.909919 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.909982 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.909991 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.910009 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:18 crc kubenswrapper[4933]: I0122 05:47:18.910021 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:18Z","lastTransitionTime":"2026-01-22T05:47:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.012902 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.012946 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.012960 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.012978 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.012993 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:19Z","lastTransitionTime":"2026-01-22T05:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.116167 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.116218 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.116234 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.116252 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.116264 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:19Z","lastTransitionTime":"2026-01-22T05:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.224569 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.224651 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.224677 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.224716 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.224736 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:19Z","lastTransitionTime":"2026-01-22T05:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.329704 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.329765 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.329787 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.329813 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.329835 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:19Z","lastTransitionTime":"2026-01-22T05:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.433268 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.433358 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.433383 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.433415 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.433439 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:19Z","lastTransitionTime":"2026-01-22T05:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.489895 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.489926 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.490027 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:19 crc kubenswrapper[4933]: E0122 05:47:19.490109 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.490146 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:19 crc kubenswrapper[4933]: E0122 05:47:19.490315 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:19 crc kubenswrapper[4933]: E0122 05:47:19.490427 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:19 crc kubenswrapper[4933]: E0122 05:47:19.490517 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.536061 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.536115 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.536126 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.536141 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.536152 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:19Z","lastTransitionTime":"2026-01-22T05:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.538493 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 23:38:14.232734334 +0000 UTC Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.638809 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.638873 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.638893 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.638917 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.638935 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:19Z","lastTransitionTime":"2026-01-22T05:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.741585 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.741637 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.741654 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.741677 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.741693 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:19Z","lastTransitionTime":"2026-01-22T05:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.844659 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.845314 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.845341 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.845378 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.845442 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:19Z","lastTransitionTime":"2026-01-22T05:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.948659 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.948730 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.948754 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.948783 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:19 crc kubenswrapper[4933]: I0122 05:47:19.948806 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:19Z","lastTransitionTime":"2026-01-22T05:47:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.051653 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.051717 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.051735 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.051760 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.051777 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:20Z","lastTransitionTime":"2026-01-22T05:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.153935 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.153986 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.153998 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.154015 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.154027 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:20Z","lastTransitionTime":"2026-01-22T05:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.256430 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.256486 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.256502 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.256523 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.256539 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:20Z","lastTransitionTime":"2026-01-22T05:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.359327 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.359374 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.359388 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.359404 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.359418 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:20Z","lastTransitionTime":"2026-01-22T05:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.461747 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.461819 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.461843 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.461871 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.461891 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:20Z","lastTransitionTime":"2026-01-22T05:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.539628 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 19:30:17.465623986 +0000 UTC Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.564455 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.564504 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.564520 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.564543 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.564561 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:20Z","lastTransitionTime":"2026-01-22T05:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.667821 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.667866 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.667877 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.667893 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.667904 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:20Z","lastTransitionTime":"2026-01-22T05:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.770519 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.770554 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.770564 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.770577 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.770585 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:20Z","lastTransitionTime":"2026-01-22T05:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.873547 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.873605 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.873623 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.873645 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.873662 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:20Z","lastTransitionTime":"2026-01-22T05:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.976750 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.976806 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.976822 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.976843 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:20 crc kubenswrapper[4933]: I0122 05:47:20.976859 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:20Z","lastTransitionTime":"2026-01-22T05:47:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.079911 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.079979 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.080002 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.080029 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.080050 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:21Z","lastTransitionTime":"2026-01-22T05:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.183454 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.183531 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.183553 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.183580 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.183600 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:21Z","lastTransitionTime":"2026-01-22T05:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.285933 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.285995 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.286013 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.286037 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.286053 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:21Z","lastTransitionTime":"2026-01-22T05:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.389444 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.389525 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.389550 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.389580 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.389603 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:21Z","lastTransitionTime":"2026-01-22T05:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.490245 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.490299 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.490353 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:21 crc kubenswrapper[4933]: E0122 05:47:21.490385 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:21 crc kubenswrapper[4933]: E0122 05:47:21.490555 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.490606 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:21 crc kubenswrapper[4933]: E0122 05:47:21.490713 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:21 crc kubenswrapper[4933]: E0122 05:47:21.490841 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.491796 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.491828 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.491838 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.491853 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.491864 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:21Z","lastTransitionTime":"2026-01-22T05:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.539744 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 19:53:11.420265713 +0000 UTC Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.594345 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.594411 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.594429 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.594454 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.594473 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:21Z","lastTransitionTime":"2026-01-22T05:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.696799 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.696925 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.696951 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.696978 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.696996 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:21Z","lastTransitionTime":"2026-01-22T05:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.799897 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.799971 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.799995 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.800022 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.800044 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:21Z","lastTransitionTime":"2026-01-22T05:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.903316 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.903379 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.903396 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.903421 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:21 crc kubenswrapper[4933]: I0122 05:47:21.903438 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:21Z","lastTransitionTime":"2026-01-22T05:47:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.006931 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.006961 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.006971 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.006985 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.006997 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:22Z","lastTransitionTime":"2026-01-22T05:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.036110 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovnkube-controller" probeResult="failure" output="" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.109532 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.109578 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.109589 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.109607 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.109619 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:22Z","lastTransitionTime":"2026-01-22T05:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.212343 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.212439 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.212491 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.212516 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.212535 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:22Z","lastTransitionTime":"2026-01-22T05:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.315179 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.315244 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.315265 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.315295 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.315318 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:22Z","lastTransitionTime":"2026-01-22T05:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.418748 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.419067 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.419212 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.419327 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.419443 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:22Z","lastTransitionTime":"2026-01-22T05:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.509625 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jr6rw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:47:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68112ca379a4d93e242173f33fb845379f58d92e02019847ede853e7a61df83f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:59Z\\\",\\\"message\\\":\\\"2026-01-22T05:46:14+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_c2392d30-71bb-4850-932e-a51973540e40\\\\n2026-01-22T05:46:14+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_c2392d30-71bb-4850-932e-a51973540e40 to /host/opt/cni/bin/\\\\n2026-01-22T05:46:14Z [verbose] multus-daemon started\\\\n2026-01-22T05:46:14Z [verbose] Readiness Indicator file check\\\\n2026-01-22T05:46:59Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:47:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v4gxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jr6rw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.522983 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.523406 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.523556 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.523687 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.523845 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:22Z","lastTransitionTime":"2026-01-22T05:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.528934 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-q8l78" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83dfdde7-cd49-49e0-85a0-0165d464b2c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e3de35a667d3620666041da8bcc357baea90e882fba4d33ef6c9f38906699cac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08f02fb16a690440d7d8d645b0b93f1be8ae0d89661e8aecd0b65d0b998f0691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eafffe51e6a21f4470340a4e940639f0ac1a739ca098079c03b49ad27a1b0e02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10a5e929d6c7ba0e26e7e0e774833dcedafbd54359b64726e8ad97e6f7812365\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4246deb41f01235a6da71afc61877966e4d06317f6ed8ebaae57fb3cbadee691\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6c957ddf8a5cbd4c7422c0cfc99bb5b49b5df4cd6356664e664be3388dd88f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7faa60875b881d13045a1dda5118abb7083a598c70d29edef401375867c13986\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kqn9c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-q8l78\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.540510 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 15:30:50.888245154 +0000 UTC Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.543619 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be15cabd-6272-4819-9132-cbfe3e8a3b22\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a7388e9b8023ce1ca28ede29592438fb4cd2f85800d46b027cb5b4e8c100ebd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://053a5f03773a50e6add05b0c1d8de76035b560baaa24f51d573ca1f8305a1048\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://053a5f03773a50e6add05b0c1d8de76035b560baaa24f51d573ca1f8305a1048\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.558415 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59b05e4bfee6067658a60220a411d49a83c5ca76039e31325ce7646cbbeeba3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://71b7f697413549ee75514e57fcb6e2d93c6b7e349b1cf93e8fc8844c96036d60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.570214 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c47f771dea621131d66ab686e6c28f7f5aeb129834c0f8308f467c2bd1b871e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.587695 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.607278 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:46:10Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0122 05:46:04.652334 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:46:04.654188 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-11537279/tls.crt::/tmp/serving-cert-11537279/tls.key\\\\\\\"\\\\nI0122 05:46:10.676992 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:46:10.680821 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:46:10.680851 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:46:10.680897 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:46:10.680911 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:46:10.690531 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0122 05:46:10.690586 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0122 05:46:10.690615 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0122 05:46:10.690623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0122 05:46:10.690632 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0122 05:46:10.690639 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0122 05:46:10.691151 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:46:10.696387 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.623451 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"88329ed7-908d-4584-abfb-6c24f9a764f8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5e260f75dcd418c5e606ef0ce7c06aa8db34b655acdc7ed4d7540f3ed59d873\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://401030b22a7ca1989952ec1bf2d9d7d8a5b78e0cba875dcd6b1d2fe4f2548c54\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ada618a16d850d1ea940debe486a3836a49697a4742426d29bddb32d496a9d61\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.626542 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.626576 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.626588 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.626605 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.626617 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:22Z","lastTransitionTime":"2026-01-22T05:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.642534 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27ea089ef37f2af216c553e31f4852fe12489f919f95c9c6ec1c579babcedbb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.663177 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.676129 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.688823 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-nvpgt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"126473c5-96ff-4ca9-83c0-7548d7e219c0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64f3d49f15e75b73f18118e60b0e8cdae794506cff25ef11ea3a217db91a4b82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w6xgn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-nvpgt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.711273 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a721333-1932-4bb0-b384-c034492e59c4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:46:40Z\\\",\\\"message\\\":\\\"rce:services.Addr{IP:\\\\\\\"10.217.4.153\\\\\\\", Port:5443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:46:40.416204 6546 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:46:40Z is after 2025-08-24T17:21:41Z]\\\\nI0122 05:46:40.416173 6546 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-operator-lifecycle-manager/package-server-manager-metrics]} name:Service_openshift-operator-lifecycle-manage\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:39Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:47:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7lwp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z88sj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.729180 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.729217 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.729225 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.729238 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.729125 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b01760-735d-4991-8e66-28149847868b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:45:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8018d50ee76b7bed70197f4085cb2157cbad29ecbb0f9c3ea6cb4ed0621877ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9aa4ed2d41484a7f47e481ae23e10c3a264edfdaddeb4c8f6ffd946694c88481\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa57c9c8f2cbba4f1282222cf6f3a29fd59efe4a378222e613eaeb32f0b71877\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:45:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://171c23458dabebaa0ddea08c10f662edcab7797ef6dffaab101c75d6a0737dff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://171c23458dabebaa0ddea08c10f662edcab7797ef6dffaab101c75d6a0737dff\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:45:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:45:53Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:45:52Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.729248 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:22Z","lastTransitionTime":"2026-01-22T05:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.743403 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70f2db1d-40cb-4864-917b-3b99f69cdafb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b19005c1dbe0b643170cfc21cda4a46a6ab3630efbf5198dc7c19882bb5ff6a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dcfhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:11Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zfnsx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.753748 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-7r526" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a2b23423-6793-44cd-b47e-dc4d25bbe3ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://78e689c029c4c4b19873187eddbdcd40bbc62b4e93d9a120a801e04db7c56ecc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hfvwc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-7r526\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.766012 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9a82407-aef2-4209-bb3f-6c89e11387e3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66a005229530dfbb93c94f5b93260aeb17f8852e46eed9c67d1558c04d1552a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ad051b09250b7060a688905e48c62f57620af633bc9a1e302e91eb076c92981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:46:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2vkg8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:24Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-28bzv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.777570 4933 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t8rgm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0902347a-c5e2-4891-812b-cfe6efc32261\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:46:25Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4ghgv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:46:25Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t8rgm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:47:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.832221 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.832281 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.832296 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.832316 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.832331 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:22Z","lastTransitionTime":"2026-01-22T05:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.935497 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.935547 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.935563 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.935586 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:22 crc kubenswrapper[4933]: I0122 05:47:22.935603 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:22Z","lastTransitionTime":"2026-01-22T05:47:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.037967 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.038304 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.038403 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.038494 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.038573 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:23Z","lastTransitionTime":"2026-01-22T05:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.140878 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.140920 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.140933 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.140950 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.140965 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:23Z","lastTransitionTime":"2026-01-22T05:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.243788 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.243877 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.243900 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.243930 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.243952 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:23Z","lastTransitionTime":"2026-01-22T05:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.347002 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.347049 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.347062 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.347099 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.347111 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:23Z","lastTransitionTime":"2026-01-22T05:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.450480 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.450553 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.450569 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.450618 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.450637 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:23Z","lastTransitionTime":"2026-01-22T05:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.490264 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.490400 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:23 crc kubenswrapper[4933]: E0122 05:47:23.490469 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:23 crc kubenswrapper[4933]: E0122 05:47:23.490591 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.490658 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:23 crc kubenswrapper[4933]: E0122 05:47:23.490777 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.491398 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:23 crc kubenswrapper[4933]: E0122 05:47:23.491701 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.541573 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 16:45:37.602740563 +0000 UTC Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.554358 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.554412 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.554429 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.554452 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.554469 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:23Z","lastTransitionTime":"2026-01-22T05:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.658226 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.658536 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.658614 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.658688 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.658759 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:23Z","lastTransitionTime":"2026-01-22T05:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.762055 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.762156 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.762180 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.762209 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.762231 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:23Z","lastTransitionTime":"2026-01-22T05:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.865871 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.866240 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.866389 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.866529 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.866660 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:23Z","lastTransitionTime":"2026-01-22T05:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.970040 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.970128 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.970151 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.970178 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:23 crc kubenswrapper[4933]: I0122 05:47:23.970195 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:23Z","lastTransitionTime":"2026-01-22T05:47:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.073689 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.074111 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.074303 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.074506 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.074695 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:24Z","lastTransitionTime":"2026-01-22T05:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.178004 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.178106 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.178124 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.178147 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.178165 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:24Z","lastTransitionTime":"2026-01-22T05:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.280775 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.281225 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.281429 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.281634 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.281832 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:24Z","lastTransitionTime":"2026-01-22T05:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.384887 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.385255 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.385427 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.385607 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.385738 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:24Z","lastTransitionTime":"2026-01-22T05:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.488480 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.488545 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.488571 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.488617 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.488642 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:24Z","lastTransitionTime":"2026-01-22T05:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.541831 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 13:54:36.183576845 +0000 UTC Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.591551 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.591606 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.591616 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.591633 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.591643 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:24Z","lastTransitionTime":"2026-01-22T05:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.695223 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.695299 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.695322 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.695352 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.695374 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:24Z","lastTransitionTime":"2026-01-22T05:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.798197 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.798235 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.798246 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.798262 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.798273 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:24Z","lastTransitionTime":"2026-01-22T05:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.901227 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.901316 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.901346 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.901378 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:24 crc kubenswrapper[4933]: I0122 05:47:24.901400 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:24Z","lastTransitionTime":"2026-01-22T05:47:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.004818 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.004874 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.004900 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.004948 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.004966 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:25Z","lastTransitionTime":"2026-01-22T05:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.108403 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.108485 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.108508 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.108537 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.108558 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:25Z","lastTransitionTime":"2026-01-22T05:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.210930 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.210969 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.210978 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.210991 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.211004 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:25Z","lastTransitionTime":"2026-01-22T05:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.313671 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.313746 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.313771 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.313802 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.313828 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:25Z","lastTransitionTime":"2026-01-22T05:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.415789 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.415822 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.415831 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.415843 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.415852 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:25Z","lastTransitionTime":"2026-01-22T05:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.489661 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.489725 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:25 crc kubenswrapper[4933]: E0122 05:47:25.489809 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.489850 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.489916 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:25 crc kubenswrapper[4933]: E0122 05:47:25.490052 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:25 crc kubenswrapper[4933]: E0122 05:47:25.490267 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:25 crc kubenswrapper[4933]: E0122 05:47:25.490310 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.519036 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.519125 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.519142 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.519165 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.519184 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:25Z","lastTransitionTime":"2026-01-22T05:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.542107 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 16:41:43.468834937 +0000 UTC Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.625401 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.625441 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.625453 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.625470 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.625481 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:25Z","lastTransitionTime":"2026-01-22T05:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.728736 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.728781 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.728798 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.728820 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.728839 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:25Z","lastTransitionTime":"2026-01-22T05:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.831755 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.831806 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.831824 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.831847 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.831864 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:25Z","lastTransitionTime":"2026-01-22T05:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.934778 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.934825 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.934843 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.934865 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:25 crc kubenswrapper[4933]: I0122 05:47:25.934881 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:25Z","lastTransitionTime":"2026-01-22T05:47:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.038401 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.038462 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.038488 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.038511 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.038528 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:26Z","lastTransitionTime":"2026-01-22T05:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.141141 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.141263 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.141287 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.141316 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.141337 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:26Z","lastTransitionTime":"2026-01-22T05:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.244487 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.244612 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.244638 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.244680 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.244707 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:26Z","lastTransitionTime":"2026-01-22T05:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.348422 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.348492 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.348510 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.348536 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.348553 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:26Z","lastTransitionTime":"2026-01-22T05:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.452268 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.452624 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.452811 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.452976 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.453356 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:26Z","lastTransitionTime":"2026-01-22T05:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.542598 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 20:38:45.131166342 +0000 UTC Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.556796 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.557111 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.557313 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.557525 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.557749 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:26Z","lastTransitionTime":"2026-01-22T05:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.661242 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.662136 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.662354 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.662529 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.662725 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:26Z","lastTransitionTime":"2026-01-22T05:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.766024 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.766069 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.766100 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.766118 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.766130 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:26Z","lastTransitionTime":"2026-01-22T05:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.851786 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.851836 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.851854 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.851871 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.851882 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:26Z","lastTransitionTime":"2026-01-22T05:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.876181 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.876212 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.876223 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.876241 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.876272 4933 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:47:26Z","lastTransitionTime":"2026-01-22T05:47:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.909684 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6"] Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.910807 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.914568 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.915429 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.915930 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.915960 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.934950 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=28.934930998 podStartE2EDuration="28.934930998s" podCreationTimestamp="2026-01-22 05:46:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:47:26.934879567 +0000 UTC m=+94.772004950" watchObservedRunningTime="2026-01-22 05:47:26.934930998 +0000 UTC m=+94.772056351" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.980921 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1af961d2-576b-42e0-8eda-73c134fb8471-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-8pnx6\" (UID: \"1af961d2-576b-42e0-8eda-73c134fb8471\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.980997 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1af961d2-576b-42e0-8eda-73c134fb8471-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-8pnx6\" (UID: \"1af961d2-576b-42e0-8eda-73c134fb8471\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.981023 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1af961d2-576b-42e0-8eda-73c134fb8471-service-ca\") pod \"cluster-version-operator-5c965bbfc6-8pnx6\" (UID: \"1af961d2-576b-42e0-8eda-73c134fb8471\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.981044 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1af961d2-576b-42e0-8eda-73c134fb8471-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-8pnx6\" (UID: \"1af961d2-576b-42e0-8eda-73c134fb8471\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.981118 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1af961d2-576b-42e0-8eda-73c134fb8471-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-8pnx6\" (UID: \"1af961d2-576b-42e0-8eda-73c134fb8471\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" Jan 22 05:47:26 crc kubenswrapper[4933]: I0122 05:47:26.995251 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=75.995230011 podStartE2EDuration="1m15.995230011s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:47:26.994472773 +0000 UTC m=+94.831598126" watchObservedRunningTime="2026-01-22 05:47:26.995230011 +0000 UTC m=+94.832355394" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.016372 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=71.016348645 podStartE2EDuration="1m11.016348645s" podCreationTimestamp="2026-01-22 05:46:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:47:27.015612127 +0000 UTC m=+94.852737480" watchObservedRunningTime="2026-01-22 05:47:27.016348645 +0000 UTC m=+94.853474038" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.071989 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-nvpgt" podStartSLOduration=77.071975399 podStartE2EDuration="1m17.071975399s" podCreationTimestamp="2026-01-22 05:46:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:47:27.071971529 +0000 UTC m=+94.909096922" watchObservedRunningTime="2026-01-22 05:47:27.071975399 +0000 UTC m=+94.909100752" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.082172 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1af961d2-576b-42e0-8eda-73c134fb8471-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-8pnx6\" (UID: \"1af961d2-576b-42e0-8eda-73c134fb8471\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.082226 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1af961d2-576b-42e0-8eda-73c134fb8471-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-8pnx6\" (UID: \"1af961d2-576b-42e0-8eda-73c134fb8471\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.082273 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1af961d2-576b-42e0-8eda-73c134fb8471-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-8pnx6\" (UID: \"1af961d2-576b-42e0-8eda-73c134fb8471\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.082297 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1af961d2-576b-42e0-8eda-73c134fb8471-service-ca\") pod \"cluster-version-operator-5c965bbfc6-8pnx6\" (UID: \"1af961d2-576b-42e0-8eda-73c134fb8471\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.082317 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1af961d2-576b-42e0-8eda-73c134fb8471-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-8pnx6\" (UID: \"1af961d2-576b-42e0-8eda-73c134fb8471\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.082351 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1af961d2-576b-42e0-8eda-73c134fb8471-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-8pnx6\" (UID: \"1af961d2-576b-42e0-8eda-73c134fb8471\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.082435 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1af961d2-576b-42e0-8eda-73c134fb8471-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-8pnx6\" (UID: \"1af961d2-576b-42e0-8eda-73c134fb8471\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.083627 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1af961d2-576b-42e0-8eda-73c134fb8471-service-ca\") pod \"cluster-version-operator-5c965bbfc6-8pnx6\" (UID: \"1af961d2-576b-42e0-8eda-73c134fb8471\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.095064 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" podStartSLOduration=76.095049199 podStartE2EDuration="1m16.095049199s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:47:27.094290081 +0000 UTC m=+94.931415434" watchObservedRunningTime="2026-01-22 05:47:27.095049199 +0000 UTC m=+94.932174552" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.097022 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1af961d2-576b-42e0-8eda-73c134fb8471-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-8pnx6\" (UID: \"1af961d2-576b-42e0-8eda-73c134fb8471\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.098025 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1af961d2-576b-42e0-8eda-73c134fb8471-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-8pnx6\" (UID: \"1af961d2-576b-42e0-8eda-73c134fb8471\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.107605 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=45.107585476 podStartE2EDuration="45.107585476s" podCreationTimestamp="2026-01-22 05:46:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:47:27.107012083 +0000 UTC m=+94.944137436" watchObservedRunningTime="2026-01-22 05:47:27.107585476 +0000 UTC m=+94.944710839" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.119878 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podStartSLOduration=76.119832637 podStartE2EDuration="1m16.119832637s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:47:27.119733805 +0000 UTC m=+94.956859158" watchObservedRunningTime="2026-01-22 05:47:27.119832637 +0000 UTC m=+94.956957990" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.129465 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-7r526" podStartSLOduration=77.129448898 podStartE2EDuration="1m17.129448898s" podCreationTimestamp="2026-01-22 05:46:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:47:27.128537997 +0000 UTC m=+94.965663350" watchObservedRunningTime="2026-01-22 05:47:27.129448898 +0000 UTC m=+94.966574251" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.153942 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-28bzv" podStartSLOduration=76.153927319 podStartE2EDuration="1m16.153927319s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:47:27.140791358 +0000 UTC m=+94.977916721" watchObservedRunningTime="2026-01-22 05:47:27.153927319 +0000 UTC m=+94.991052672" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.167873 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-jr6rw" podStartSLOduration=76.167855718 podStartE2EDuration="1m16.167855718s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:47:27.167003948 +0000 UTC m=+95.004129311" watchObservedRunningTime="2026-01-22 05:47:27.167855718 +0000 UTC m=+95.004981071" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.182057 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-q8l78" podStartSLOduration=76.182040583 podStartE2EDuration="1m16.182040583s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:47:27.181321277 +0000 UTC m=+95.018446680" watchObservedRunningTime="2026-01-22 05:47:27.182040583 +0000 UTC m=+95.019165946" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.237804 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.489853 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.490162 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:27 crc kubenswrapper[4933]: E0122 05:47:27.490357 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.490422 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:27 crc kubenswrapper[4933]: E0122 05:47:27.490559 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:27 crc kubenswrapper[4933]: E0122 05:47:27.490870 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.490977 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:27 crc kubenswrapper[4933]: E0122 05:47:27.491366 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.543179 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 05:03:49.599470935 +0000 UTC Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.543262 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 22 05:47:27 crc kubenswrapper[4933]: I0122 05:47:27.554991 4933 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 22 05:47:28 crc kubenswrapper[4933]: I0122 05:47:28.230612 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" event={"ID":"1af961d2-576b-42e0-8eda-73c134fb8471","Type":"ContainerStarted","Data":"bedd3a13bccde4296d737ddcdbbdd3de810846dc763e0e93012def259d04cfa7"} Jan 22 05:47:28 crc kubenswrapper[4933]: I0122 05:47:28.232188 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" event={"ID":"1af961d2-576b-42e0-8eda-73c134fb8471","Type":"ContainerStarted","Data":"7392bf700b6cb45420e7f5b420e22673a2f679e74c9459569d45abee12f2a5a5"} Jan 22 05:47:28 crc kubenswrapper[4933]: I0122 05:47:28.247975 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-8pnx6" podStartSLOduration=77.247953223 podStartE2EDuration="1m17.247953223s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:47:28.247136014 +0000 UTC m=+96.084261437" watchObservedRunningTime="2026-01-22 05:47:28.247953223 +0000 UTC m=+96.085078586" Jan 22 05:47:29 crc kubenswrapper[4933]: I0122 05:47:29.489790 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:29 crc kubenswrapper[4933]: I0122 05:47:29.489818 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:29 crc kubenswrapper[4933]: I0122 05:47:29.489952 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:29 crc kubenswrapper[4933]: E0122 05:47:29.490063 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:29 crc kubenswrapper[4933]: I0122 05:47:29.490127 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:29 crc kubenswrapper[4933]: E0122 05:47:29.490293 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:29 crc kubenswrapper[4933]: E0122 05:47:29.490434 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:29 crc kubenswrapper[4933]: E0122 05:47:29.490550 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:29 crc kubenswrapper[4933]: I0122 05:47:29.590686 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs\") pod \"network-metrics-daemon-t8rgm\" (UID: \"0902347a-c5e2-4891-812b-cfe6efc32261\") " pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:29 crc kubenswrapper[4933]: E0122 05:47:29.590942 4933 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:47:29 crc kubenswrapper[4933]: E0122 05:47:29.591039 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs podName:0902347a-c5e2-4891-812b-cfe6efc32261 nodeName:}" failed. No retries permitted until 2026-01-22 05:48:33.591015828 +0000 UTC m=+161.428141191 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs") pod "network-metrics-daemon-t8rgm" (UID: "0902347a-c5e2-4891-812b-cfe6efc32261") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:47:30 crc kubenswrapper[4933]: I0122 05:47:30.518787 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 22 05:47:31 crc kubenswrapper[4933]: I0122 05:47:31.490277 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:31 crc kubenswrapper[4933]: I0122 05:47:31.490345 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:31 crc kubenswrapper[4933]: I0122 05:47:31.490352 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:31 crc kubenswrapper[4933]: E0122 05:47:31.490432 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:31 crc kubenswrapper[4933]: E0122 05:47:31.490481 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:31 crc kubenswrapper[4933]: E0122 05:47:31.490542 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:31 crc kubenswrapper[4933]: I0122 05:47:31.490294 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:31 crc kubenswrapper[4933]: E0122 05:47:31.491420 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:32 crc kubenswrapper[4933]: I0122 05:47:32.245982 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovnkube-controller/3.log" Jan 22 05:47:32 crc kubenswrapper[4933]: I0122 05:47:32.247850 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovnkube-controller/2.log" Jan 22 05:47:32 crc kubenswrapper[4933]: I0122 05:47:32.251384 4933 generic.go:334] "Generic (PLEG): container finished" podID="6a721333-1932-4bb0-b384-c034492e59c4" containerID="2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c" exitCode=1 Jan 22 05:47:32 crc kubenswrapper[4933]: I0122 05:47:32.251421 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerDied","Data":"2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c"} Jan 22 05:47:32 crc kubenswrapper[4933]: I0122 05:47:32.251459 4933 scope.go:117] "RemoveContainer" containerID="cd9cfc51c4b76a4a0fe6458da0e032e772b1876046969f7bd972e265fa5a8ca0" Jan 22 05:47:32 crc kubenswrapper[4933]: I0122 05:47:32.252622 4933 scope.go:117] "RemoveContainer" containerID="2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c" Jan 22 05:47:32 crc kubenswrapper[4933]: E0122 05:47:32.252871 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" podUID="6a721333-1932-4bb0-b384-c034492e59c4" Jan 22 05:47:33 crc kubenswrapper[4933]: I0122 05:47:33.260003 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovnkube-controller/3.log" Jan 22 05:47:33 crc kubenswrapper[4933]: I0122 05:47:33.490580 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:33 crc kubenswrapper[4933]: I0122 05:47:33.490627 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:33 crc kubenswrapper[4933]: E0122 05:47:33.490755 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:33 crc kubenswrapper[4933]: I0122 05:47:33.490809 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:33 crc kubenswrapper[4933]: E0122 05:47:33.490977 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:33 crc kubenswrapper[4933]: E0122 05:47:33.490872 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:33 crc kubenswrapper[4933]: I0122 05:47:33.490825 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:33 crc kubenswrapper[4933]: E0122 05:47:33.491055 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:35 crc kubenswrapper[4933]: I0122 05:47:35.490453 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:35 crc kubenswrapper[4933]: I0122 05:47:35.490461 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:35 crc kubenswrapper[4933]: I0122 05:47:35.490531 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:35 crc kubenswrapper[4933]: I0122 05:47:35.490514 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:35 crc kubenswrapper[4933]: E0122 05:47:35.490774 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:35 crc kubenswrapper[4933]: E0122 05:47:35.490938 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:35 crc kubenswrapper[4933]: E0122 05:47:35.491137 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:35 crc kubenswrapper[4933]: E0122 05:47:35.491642 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:37 crc kubenswrapper[4933]: I0122 05:47:37.489849 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:37 crc kubenswrapper[4933]: I0122 05:47:37.490206 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:37 crc kubenswrapper[4933]: E0122 05:47:37.490257 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:37 crc kubenswrapper[4933]: I0122 05:47:37.490328 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:37 crc kubenswrapper[4933]: I0122 05:47:37.490333 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:37 crc kubenswrapper[4933]: E0122 05:47:37.490556 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:37 crc kubenswrapper[4933]: E0122 05:47:37.490897 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:37 crc kubenswrapper[4933]: E0122 05:47:37.491039 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:39 crc kubenswrapper[4933]: I0122 05:47:39.490487 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:39 crc kubenswrapper[4933]: I0122 05:47:39.490650 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:39 crc kubenswrapper[4933]: I0122 05:47:39.490725 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:39 crc kubenswrapper[4933]: I0122 05:47:39.490802 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:39 crc kubenswrapper[4933]: E0122 05:47:39.490802 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:39 crc kubenswrapper[4933]: E0122 05:47:39.490952 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:39 crc kubenswrapper[4933]: E0122 05:47:39.491060 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:39 crc kubenswrapper[4933]: E0122 05:47:39.491414 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:41 crc kubenswrapper[4933]: I0122 05:47:41.489777 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:41 crc kubenswrapper[4933]: E0122 05:47:41.489940 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:41 crc kubenswrapper[4933]: I0122 05:47:41.489790 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:41 crc kubenswrapper[4933]: I0122 05:47:41.489798 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:41 crc kubenswrapper[4933]: E0122 05:47:41.490128 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:41 crc kubenswrapper[4933]: E0122 05:47:41.490185 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:41 crc kubenswrapper[4933]: I0122 05:47:41.489774 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:41 crc kubenswrapper[4933]: E0122 05:47:41.490306 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:43 crc kubenswrapper[4933]: I0122 05:47:43.490172 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:43 crc kubenswrapper[4933]: E0122 05:47:43.490354 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:43 crc kubenswrapper[4933]: I0122 05:47:43.490667 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:43 crc kubenswrapper[4933]: E0122 05:47:43.490767 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:43 crc kubenswrapper[4933]: I0122 05:47:43.490943 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:43 crc kubenswrapper[4933]: E0122 05:47:43.491023 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:43 crc kubenswrapper[4933]: I0122 05:47:43.490188 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:43 crc kubenswrapper[4933]: I0122 05:47:43.491242 4933 scope.go:117] "RemoveContainer" containerID="2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c" Jan 22 05:47:43 crc kubenswrapper[4933]: E0122 05:47:43.491298 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:43 crc kubenswrapper[4933]: E0122 05:47:43.491473 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" podUID="6a721333-1932-4bb0-b384-c034492e59c4" Jan 22 05:47:43 crc kubenswrapper[4933]: I0122 05:47:43.522867 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=13.522850671 podStartE2EDuration="13.522850671s" podCreationTimestamp="2026-01-22 05:47:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:47:32.346704171 +0000 UTC m=+100.183829534" watchObservedRunningTime="2026-01-22 05:47:43.522850671 +0000 UTC m=+111.359976024" Jan 22 05:47:45 crc kubenswrapper[4933]: I0122 05:47:45.490457 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:45 crc kubenswrapper[4933]: I0122 05:47:45.490501 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:45 crc kubenswrapper[4933]: I0122 05:47:45.490475 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:45 crc kubenswrapper[4933]: E0122 05:47:45.490598 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:45 crc kubenswrapper[4933]: I0122 05:47:45.490606 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:45 crc kubenswrapper[4933]: E0122 05:47:45.490698 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:45 crc kubenswrapper[4933]: E0122 05:47:45.490839 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:45 crc kubenswrapper[4933]: E0122 05:47:45.490970 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:46 crc kubenswrapper[4933]: I0122 05:47:46.308756 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jr6rw_f066dd84-0cd5-4e8c-8411-cf12cc83ea7d/kube-multus/1.log" Jan 22 05:47:46 crc kubenswrapper[4933]: I0122 05:47:46.309436 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jr6rw_f066dd84-0cd5-4e8c-8411-cf12cc83ea7d/kube-multus/0.log" Jan 22 05:47:46 crc kubenswrapper[4933]: I0122 05:47:46.309512 4933 generic.go:334] "Generic (PLEG): container finished" podID="f066dd84-0cd5-4e8c-8411-cf12cc83ea7d" containerID="68112ca379a4d93e242173f33fb845379f58d92e02019847ede853e7a61df83f" exitCode=1 Jan 22 05:47:46 crc kubenswrapper[4933]: I0122 05:47:46.309555 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jr6rw" event={"ID":"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d","Type":"ContainerDied","Data":"68112ca379a4d93e242173f33fb845379f58d92e02019847ede853e7a61df83f"} Jan 22 05:47:46 crc kubenswrapper[4933]: I0122 05:47:46.309616 4933 scope.go:117] "RemoveContainer" containerID="3a6b69e5a5d7420dbcda9a6a023402f27c1d584233c06bfb3d09ae7efb243e31" Jan 22 05:47:46 crc kubenswrapper[4933]: I0122 05:47:46.310354 4933 scope.go:117] "RemoveContainer" containerID="68112ca379a4d93e242173f33fb845379f58d92e02019847ede853e7a61df83f" Jan 22 05:47:46 crc kubenswrapper[4933]: E0122 05:47:46.310669 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-jr6rw_openshift-multus(f066dd84-0cd5-4e8c-8411-cf12cc83ea7d)\"" pod="openshift-multus/multus-jr6rw" podUID="f066dd84-0cd5-4e8c-8411-cf12cc83ea7d" Jan 22 05:47:47 crc kubenswrapper[4933]: I0122 05:47:47.314193 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jr6rw_f066dd84-0cd5-4e8c-8411-cf12cc83ea7d/kube-multus/1.log" Jan 22 05:47:47 crc kubenswrapper[4933]: I0122 05:47:47.490181 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:47 crc kubenswrapper[4933]: I0122 05:47:47.490242 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:47 crc kubenswrapper[4933]: I0122 05:47:47.490201 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:47 crc kubenswrapper[4933]: E0122 05:47:47.490383 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:47 crc kubenswrapper[4933]: I0122 05:47:47.490250 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:47 crc kubenswrapper[4933]: E0122 05:47:47.490594 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:47 crc kubenswrapper[4933]: E0122 05:47:47.490764 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:47 crc kubenswrapper[4933]: E0122 05:47:47.490940 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:49 crc kubenswrapper[4933]: I0122 05:47:49.490234 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:49 crc kubenswrapper[4933]: E0122 05:47:49.490371 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:49 crc kubenswrapper[4933]: I0122 05:47:49.490448 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:49 crc kubenswrapper[4933]: E0122 05:47:49.490580 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:49 crc kubenswrapper[4933]: I0122 05:47:49.490451 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:49 crc kubenswrapper[4933]: I0122 05:47:49.490451 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:49 crc kubenswrapper[4933]: E0122 05:47:49.490694 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:49 crc kubenswrapper[4933]: E0122 05:47:49.490889 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:51 crc kubenswrapper[4933]: I0122 05:47:51.489952 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:51 crc kubenswrapper[4933]: I0122 05:47:51.490348 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:51 crc kubenswrapper[4933]: I0122 05:47:51.490382 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:51 crc kubenswrapper[4933]: I0122 05:47:51.490452 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:51 crc kubenswrapper[4933]: E0122 05:47:51.490633 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:51 crc kubenswrapper[4933]: E0122 05:47:51.490798 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:51 crc kubenswrapper[4933]: E0122 05:47:51.490908 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:51 crc kubenswrapper[4933]: E0122 05:47:51.491097 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:52 crc kubenswrapper[4933]: E0122 05:47:52.501952 4933 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Jan 22 05:47:52 crc kubenswrapper[4933]: E0122 05:47:52.564869 4933 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 22 05:47:53 crc kubenswrapper[4933]: I0122 05:47:53.490828 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:53 crc kubenswrapper[4933]: I0122 05:47:53.490831 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:53 crc kubenswrapper[4933]: I0122 05:47:53.490883 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:53 crc kubenswrapper[4933]: I0122 05:47:53.491017 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:53 crc kubenswrapper[4933]: E0122 05:47:53.491402 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:53 crc kubenswrapper[4933]: E0122 05:47:53.491752 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:53 crc kubenswrapper[4933]: E0122 05:47:53.492627 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:53 crc kubenswrapper[4933]: E0122 05:47:53.492739 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:55 crc kubenswrapper[4933]: I0122 05:47:55.489691 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:55 crc kubenswrapper[4933]: E0122 05:47:55.489871 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:55 crc kubenswrapper[4933]: I0122 05:47:55.489896 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:55 crc kubenswrapper[4933]: I0122 05:47:55.489971 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:55 crc kubenswrapper[4933]: E0122 05:47:55.490165 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:55 crc kubenswrapper[4933]: E0122 05:47:55.490247 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:55 crc kubenswrapper[4933]: I0122 05:47:55.490659 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:55 crc kubenswrapper[4933]: E0122 05:47:55.490815 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:57 crc kubenswrapper[4933]: I0122 05:47:57.490497 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:57 crc kubenswrapper[4933]: I0122 05:47:57.490553 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:57 crc kubenswrapper[4933]: I0122 05:47:57.490716 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:57 crc kubenswrapper[4933]: E0122 05:47:57.490997 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:47:57 crc kubenswrapper[4933]: I0122 05:47:57.491198 4933 scope.go:117] "RemoveContainer" containerID="68112ca379a4d93e242173f33fb845379f58d92e02019847ede853e7a61df83f" Jan 22 05:47:57 crc kubenswrapper[4933]: I0122 05:47:57.491611 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:57 crc kubenswrapper[4933]: E0122 05:47:57.491676 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:57 crc kubenswrapper[4933]: E0122 05:47:57.491721 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:57 crc kubenswrapper[4933]: E0122 05:47:57.493329 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:57 crc kubenswrapper[4933]: E0122 05:47:57.566288 4933 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 22 05:47:58 crc kubenswrapper[4933]: I0122 05:47:58.371035 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jr6rw_f066dd84-0cd5-4e8c-8411-cf12cc83ea7d/kube-multus/1.log" Jan 22 05:47:58 crc kubenswrapper[4933]: I0122 05:47:58.371174 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jr6rw" event={"ID":"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d","Type":"ContainerStarted","Data":"554f6ede0a925394463d392b48700a7c5dcd211ebffe4e3dc51046123872907f"} Jan 22 05:47:58 crc kubenswrapper[4933]: I0122 05:47:58.491602 4933 scope.go:117] "RemoveContainer" containerID="2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c" Jan 22 05:47:58 crc kubenswrapper[4933]: E0122 05:47:58.491925 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-z88sj_openshift-ovn-kubernetes(6a721333-1932-4bb0-b384-c034492e59c4)\"" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" podUID="6a721333-1932-4bb0-b384-c034492e59c4" Jan 22 05:47:59 crc kubenswrapper[4933]: I0122 05:47:59.489941 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:47:59 crc kubenswrapper[4933]: I0122 05:47:59.489937 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:47:59 crc kubenswrapper[4933]: I0122 05:47:59.489937 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:47:59 crc kubenswrapper[4933]: I0122 05:47:59.490115 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:47:59 crc kubenswrapper[4933]: E0122 05:47:59.490268 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:47:59 crc kubenswrapper[4933]: E0122 05:47:59.490447 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:47:59 crc kubenswrapper[4933]: E0122 05:47:59.490578 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:47:59 crc kubenswrapper[4933]: E0122 05:47:59.490678 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:48:01 crc kubenswrapper[4933]: I0122 05:48:01.489753 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:48:01 crc kubenswrapper[4933]: E0122 05:48:01.490302 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:48:01 crc kubenswrapper[4933]: I0122 05:48:01.489794 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:48:01 crc kubenswrapper[4933]: I0122 05:48:01.489917 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:48:01 crc kubenswrapper[4933]: E0122 05:48:01.490500 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:48:01 crc kubenswrapper[4933]: E0122 05:48:01.490418 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:48:01 crc kubenswrapper[4933]: I0122 05:48:01.489760 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:48:01 crc kubenswrapper[4933]: E0122 05:48:01.490562 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:48:02 crc kubenswrapper[4933]: E0122 05:48:02.566859 4933 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 22 05:48:03 crc kubenswrapper[4933]: I0122 05:48:03.489790 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:48:03 crc kubenswrapper[4933]: I0122 05:48:03.489843 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:48:03 crc kubenswrapper[4933]: I0122 05:48:03.489900 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:48:03 crc kubenswrapper[4933]: I0122 05:48:03.489901 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:48:03 crc kubenswrapper[4933]: E0122 05:48:03.490016 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:48:03 crc kubenswrapper[4933]: E0122 05:48:03.490166 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:48:03 crc kubenswrapper[4933]: E0122 05:48:03.490280 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:48:03 crc kubenswrapper[4933]: E0122 05:48:03.490422 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:48:05 crc kubenswrapper[4933]: I0122 05:48:05.489976 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:48:05 crc kubenswrapper[4933]: I0122 05:48:05.489976 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:48:05 crc kubenswrapper[4933]: I0122 05:48:05.490179 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:48:05 crc kubenswrapper[4933]: I0122 05:48:05.489995 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:48:05 crc kubenswrapper[4933]: E0122 05:48:05.490248 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:48:05 crc kubenswrapper[4933]: E0122 05:48:05.490348 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:48:05 crc kubenswrapper[4933]: E0122 05:48:05.490493 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:48:05 crc kubenswrapper[4933]: E0122 05:48:05.490649 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:48:07 crc kubenswrapper[4933]: I0122 05:48:07.489769 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:48:07 crc kubenswrapper[4933]: I0122 05:48:07.489820 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:48:07 crc kubenswrapper[4933]: I0122 05:48:07.489858 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:48:07 crc kubenswrapper[4933]: E0122 05:48:07.489922 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:48:07 crc kubenswrapper[4933]: I0122 05:48:07.489956 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:48:07 crc kubenswrapper[4933]: E0122 05:48:07.490031 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:48:07 crc kubenswrapper[4933]: E0122 05:48:07.490169 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:48:07 crc kubenswrapper[4933]: E0122 05:48:07.490304 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:48:07 crc kubenswrapper[4933]: E0122 05:48:07.568325 4933 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 22 05:48:09 crc kubenswrapper[4933]: I0122 05:48:09.489899 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:48:09 crc kubenswrapper[4933]: E0122 05:48:09.490608 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:48:09 crc kubenswrapper[4933]: I0122 05:48:09.489964 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:48:09 crc kubenswrapper[4933]: E0122 05:48:09.490834 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:48:09 crc kubenswrapper[4933]: I0122 05:48:09.490063 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:48:09 crc kubenswrapper[4933]: E0122 05:48:09.491257 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:48:09 crc kubenswrapper[4933]: I0122 05:48:09.489933 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:48:09 crc kubenswrapper[4933]: E0122 05:48:09.491863 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:48:11 crc kubenswrapper[4933]: I0122 05:48:11.490731 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:48:11 crc kubenswrapper[4933]: I0122 05:48:11.490777 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:48:11 crc kubenswrapper[4933]: I0122 05:48:11.490792 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:48:11 crc kubenswrapper[4933]: I0122 05:48:11.490803 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:48:11 crc kubenswrapper[4933]: E0122 05:48:11.490893 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:48:11 crc kubenswrapper[4933]: E0122 05:48:11.491120 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:48:11 crc kubenswrapper[4933]: E0122 05:48:11.491235 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:48:11 crc kubenswrapper[4933]: E0122 05:48:11.491338 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:48:12 crc kubenswrapper[4933]: I0122 05:48:12.491342 4933 scope.go:117] "RemoveContainer" containerID="2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c" Jan 22 05:48:12 crc kubenswrapper[4933]: E0122 05:48:12.569099 4933 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 22 05:48:13 crc kubenswrapper[4933]: I0122 05:48:13.426286 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovnkube-controller/3.log" Jan 22 05:48:13 crc kubenswrapper[4933]: I0122 05:48:13.429740 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerStarted","Data":"5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6"} Jan 22 05:48:13 crc kubenswrapper[4933]: I0122 05:48:13.430181 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:48:13 crc kubenswrapper[4933]: I0122 05:48:13.470515 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-t8rgm"] Jan 22 05:48:13 crc kubenswrapper[4933]: I0122 05:48:13.470716 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:48:13 crc kubenswrapper[4933]: E0122 05:48:13.470957 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:48:13 crc kubenswrapper[4933]: I0122 05:48:13.490618 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:48:13 crc kubenswrapper[4933]: E0122 05:48:13.490999 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:48:13 crc kubenswrapper[4933]: I0122 05:48:13.491056 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:48:13 crc kubenswrapper[4933]: I0122 05:48:13.491088 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:48:13 crc kubenswrapper[4933]: E0122 05:48:13.491196 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:48:13 crc kubenswrapper[4933]: E0122 05:48:13.491358 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:48:15 crc kubenswrapper[4933]: I0122 05:48:15.490859 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:48:15 crc kubenswrapper[4933]: E0122 05:48:15.491620 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:48:15 crc kubenswrapper[4933]: I0122 05:48:15.491866 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:48:15 crc kubenswrapper[4933]: E0122 05:48:15.491928 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:48:15 crc kubenswrapper[4933]: I0122 05:48:15.492049 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:48:15 crc kubenswrapper[4933]: E0122 05:48:15.492132 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:48:15 crc kubenswrapper[4933]: I0122 05:48:15.492324 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:48:15 crc kubenswrapper[4933]: E0122 05:48:15.492491 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.490792 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.490850 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:48:17 crc kubenswrapper[4933]: E0122 05:48:17.490994 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t8rgm" podUID="0902347a-c5e2-4891-812b-cfe6efc32261" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.491021 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:48:17 crc kubenswrapper[4933]: E0122 05:48:17.491389 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.491680 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:48:17 crc kubenswrapper[4933]: E0122 05:48:17.491845 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:48:17 crc kubenswrapper[4933]: E0122 05:48:17.491306 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.920724 4933 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.968229 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-6r979"] Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.968598 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-llbq4"] Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.968969 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.969353 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-6r979" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.971146 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9j8zq"] Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.971465 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9j8zq" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.972757 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.972796 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.972953 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.974599 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.974787 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.974921 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.975863 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.976049 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.976289 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.977599 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-jt4v9"] Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.977900 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.978193 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.978417 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.978758 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.978759 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.979261 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.979945 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.981321 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.982060 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.982636 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.982770 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.982941 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.987449 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9"] Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.988071 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.988695 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zshgr"] Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.995945 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.996774 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zshgr" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.997855 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.998288 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.998683 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.998695 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k"] Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.998863 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.998752 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 22 05:48:17 crc kubenswrapper[4933]: I0122 05:48:17.999510 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.007789 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.009586 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.009949 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.010606 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.051784 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.010861 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.011309 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.011879 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.011935 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.012028 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.012810 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.012932 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.012988 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.023375 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.013018 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gj98r"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.059599 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.061366 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3af890e6-9547-4a96-8719-a7599d1b1701-client-ca\") pod \"controller-manager-879f6c89f-jt4v9\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.061418 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3af890e6-9547-4a96-8719-a7599d1b1701-config\") pod \"controller-manager-879f6c89f-jt4v9\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.061536 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3af890e6-9547-4a96-8719-a7599d1b1701-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-jt4v9\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.061643 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3af890e6-9547-4a96-8719-a7599d1b1701-serving-cert\") pod \"controller-manager-879f6c89f-jt4v9\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.061730 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hc95z\" (UniqueName: \"kubernetes.io/projected/3af890e6-9547-4a96-8719-a7599d1b1701-kube-api-access-hc95z\") pod \"controller-manager-879f6c89f-jt4v9\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.062376 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-77xhn"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.062717 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-74lmm"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.062966 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-pl49j"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.063234 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.063275 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.063932 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-gqpfp"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.064210 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-fntp2"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.064410 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-74lmm" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.064533 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-fntp2" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.064727 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.065124 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-pl49j" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.068233 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-tnl5g"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.068660 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skfkj"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.068862 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dgpsx"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.069125 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dgpsx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.069199 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-tnl5g" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.069508 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skfkj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.070619 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.070820 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-x5npd"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.071354 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.073202 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-sk5qf"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.073678 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.074040 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.074287 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-sk5qf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.075670 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.075834 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.076143 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.076340 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.076542 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.076865 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.077014 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.077171 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.077322 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-2z9q7"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.077351 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.077484 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.077688 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.077759 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.077873 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.077688 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.078143 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-tm764"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.078323 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.078610 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-tm764" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.078615 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.078909 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.081023 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-6r979"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.081054 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vb2np"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.081364 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-rng6t"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.081641 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-rng6t" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.084472 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.085521 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.085659 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.086133 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.086229 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.086242 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.086383 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.086409 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.086493 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.086498 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.086529 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.086557 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.086590 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.086607 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.086644 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.087185 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.087260 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.087329 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.087443 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.087748 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.087811 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.087945 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.088013 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.088088 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.088150 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.088302 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.088326 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.088406 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.088425 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.088764 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.088898 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.089012 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.088769 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.089207 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.089320 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.089426 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.089531 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.089675 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.097593 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.102437 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-cgnhf"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.103092 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgnhf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.128398 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.127798 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.129110 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.131542 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.133006 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.134238 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.134382 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.135109 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.135357 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.135559 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.136036 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-lpvhh"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.136527 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.136680 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.137889 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.138231 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.138329 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.138912 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-t7f4w"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.139102 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-lpvhh" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.158521 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.162832 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0f030cb7-e6bb-47bb-b4a4-821f4a21ad42-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-2z9q7\" (UID: \"0f030cb7-e6bb-47bb-b4a4-821f4a21ad42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.162902 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.162955 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a7dbd55-b911-42ff-a5e7-ceb61a071343-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-skfkj\" (UID: \"4a7dbd55-b911-42ff-a5e7-ceb61a071343\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skfkj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.162973 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8bdbecf3-1fbc-4184-b33b-94031b7e3845-audit-dir\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.163024 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.163042 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8bdbecf3-1fbc-4184-b33b-94031b7e3845-trusted-ca-bundle\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.163148 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9fsn\" (UniqueName: \"kubernetes.io/projected/4c3867cf-bc74-4587-aa51-d30bf357f0f2-kube-api-access-f9fsn\") pod \"openshift-controller-manager-operator-756b6f6bc6-dgpsx\" (UID: \"4c3867cf-bc74-4587-aa51-d30bf357f0f2\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dgpsx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.163168 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjdmh\" (UniqueName: \"kubernetes.io/projected/5f757f81-9a44-488a-8a60-4814d2bc418d-kube-api-access-fjdmh\") pod \"multus-admission-controller-857f4d67dd-sk5qf\" (UID: \"5f757f81-9a44-488a-8a60-4814d2bc418d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sk5qf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.163397 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/1ba473a8-8e29-49fe-a1b8-8cc7e422037f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-jtr4k\" (UID: \"1ba473a8-8e29-49fe-a1b8-8cc7e422037f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.163421 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5f757f81-9a44-488a-8a60-4814d2bc418d-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-sk5qf\" (UID: \"5f757f81-9a44-488a-8a60-4814d2bc418d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sk5qf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.163439 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f030cb7-e6bb-47bb-b4a4-821f4a21ad42-config\") pod \"authentication-operator-69f744f599-2z9q7\" (UID: \"0f030cb7-e6bb-47bb-b4a4-821f4a21ad42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.163465 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2edc4443-850c-48d0-a605-3debd5d38299-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-8ctmc\" (UID: \"2edc4443-850c-48d0-a605-3debd5d38299\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.163504 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4mtq\" (UniqueName: \"kubernetes.io/projected/2edc4443-850c-48d0-a605-3debd5d38299-kube-api-access-r4mtq\") pod \"cluster-image-registry-operator-dc59b4c8b-8ctmc\" (UID: \"2edc4443-850c-48d0-a605-3debd5d38299\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.163529 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3af890e6-9547-4a96-8719-a7599d1b1701-config\") pod \"controller-manager-879f6c89f-jt4v9\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.163564 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f030cb7-e6bb-47bb-b4a4-821f4a21ad42-serving-cert\") pod \"authentication-operator-69f744f599-2z9q7\" (UID: \"0f030cb7-e6bb-47bb-b4a4-821f4a21ad42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.163579 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1825f935-ad5f-4d85-aeef-e368841c5547-etcd-client\") pod \"etcd-operator-b45778765-x5npd\" (UID: \"1825f935-ad5f-4d85-aeef-e368841c5547\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.163598 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3c7d8ca9-574e-4b56-8ed6-b9155509a740-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zshgr\" (UID: \"3c7d8ca9-574e-4b56-8ed6-b9155509a740\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zshgr" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.163612 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/8bdbecf3-1fbc-4184-b33b-94031b7e3845-node-pullsecrets\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.163627 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/2860560e-929a-4dbe-84c0-23326bf7cdf4-stats-auth\") pod \"router-default-5444994796-77xhn\" (UID: \"2860560e-929a-4dbe-84c0-23326bf7cdf4\") " pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.163650 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/2edc4443-850c-48d0-a605-3debd5d38299-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-8ctmc\" (UID: \"2edc4443-850c-48d0-a605-3debd5d38299\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.163665 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/8bdbecf3-1fbc-4184-b33b-94031b7e3845-encryption-config\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.163680 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0f030cb7-e6bb-47bb-b4a4-821f4a21ad42-service-ca-bundle\") pod \"authentication-operator-69f744f599-2z9q7\" (UID: \"0f030cb7-e6bb-47bb-b4a4-821f4a21ad42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.163695 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2860560e-929a-4dbe-84c0-23326bf7cdf4-metrics-certs\") pod \"router-default-5444994796-77xhn\" (UID: \"2860560e-929a-4dbe-84c0-23326bf7cdf4\") " pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.164229 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1825f935-ad5f-4d85-aeef-e368841c5547-serving-cert\") pod \"etcd-operator-b45778765-x5npd\" (UID: \"1825f935-ad5f-4d85-aeef-e368841c5547\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.164289 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3af890e6-9547-4a96-8719-a7599d1b1701-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-jt4v9\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.164511 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9c2p\" (UniqueName: \"kubernetes.io/projected/2c4864a0-5981-4eef-a0db-c33a535e02de-kube-api-access-k9c2p\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.164661 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqst8\" (UniqueName: \"kubernetes.io/projected/c800ab14-5d0a-4078-91f5-b47d05d15ccc-kube-api-access-pqst8\") pod \"machine-api-operator-5694c8668f-6r979\" (UID: \"c800ab14-5d0a-4078-91f5-b47d05d15ccc\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6r979" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.164699 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/12969fb5-7977-4b64-baed-ad39d9524369-auth-proxy-config\") pod \"machine-approver-56656f9798-cgnhf\" (UID: \"12969fb5-7977-4b64-baed-ad39d9524369\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgnhf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.164721 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4a7dbd55-b911-42ff-a5e7-ceb61a071343-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-skfkj\" (UID: \"4a7dbd55-b911-42ff-a5e7-ceb61a071343\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skfkj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.164743 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/44595319-f5f2-4db3-9671-9c8680c2dfc7-serving-cert\") pod \"route-controller-manager-6576b87f9c-tzlk9\" (UID: \"44595319-f5f2-4db3-9671-9c8680c2dfc7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.164763 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1825f935-ad5f-4d85-aeef-e368841c5547-config\") pod \"etcd-operator-b45778765-x5npd\" (UID: \"1825f935-ad5f-4d85-aeef-e368841c5547\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.164783 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.164834 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3af890e6-9547-4a96-8719-a7599d1b1701-serving-cert\") pod \"controller-manager-879f6c89f-jt4v9\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.164871 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khqg7\" (UniqueName: \"kubernetes.io/projected/b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8-kube-api-access-khqg7\") pod \"machine-config-operator-74547568cd-wc9dx\" (UID: \"b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.164935 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3af890e6-9547-4a96-8719-a7599d1b1701-config\") pod \"controller-manager-879f6c89f-jt4v9\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165245 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3af890e6-9547-4a96-8719-a7599d1b1701-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-jt4v9\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165257 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165286 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5z5f\" (UniqueName: \"kubernetes.io/projected/1825f935-ad5f-4d85-aeef-e368841c5547-kube-api-access-w5z5f\") pod \"etcd-operator-b45778765-x5npd\" (UID: \"1825f935-ad5f-4d85-aeef-e368841c5547\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165309 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165349 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/8bdbecf3-1fbc-4184-b33b-94031b7e3845-image-import-ca\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165371 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165387 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjjtz\" (UniqueName: \"kubernetes.io/projected/80358407-ad1b-499f-868f-44e3388b0fac-kube-api-access-bjjtz\") pod \"downloads-7954f5f757-tm764\" (UID: \"80358407-ad1b-499f-868f-44e3388b0fac\") " pod="openshift-console/downloads-7954f5f757-tm764" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165409 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4x6f4\" (UniqueName: \"kubernetes.io/projected/12969fb5-7977-4b64-baed-ad39d9524369-kube-api-access-4x6f4\") pod \"machine-approver-56656f9798-cgnhf\" (UID: \"12969fb5-7977-4b64-baed-ad39d9524369\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgnhf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165438 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3c7d8ca9-574e-4b56-8ed6-b9155509a740-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zshgr\" (UID: \"3c7d8ca9-574e-4b56-8ed6-b9155509a740\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zshgr" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165455 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrrmr\" (UniqueName: \"kubernetes.io/projected/0f030cb7-e6bb-47bb-b4a4-821f4a21ad42-kube-api-access-xrrmr\") pod \"authentication-operator-69f744f599-2z9q7\" (UID: \"0f030cb7-e6bb-47bb-b4a4-821f4a21ad42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165471 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2860560e-929a-4dbe-84c0-23326bf7cdf4-service-ca-bundle\") pod \"router-default-5444994796-77xhn\" (UID: \"2860560e-929a-4dbe-84c0-23326bf7cdf4\") " pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165511 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4t957\" (UniqueName: \"kubernetes.io/projected/8bdbecf3-1fbc-4184-b33b-94031b7e3845-kube-api-access-4t957\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165558 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tj882\" (UniqueName: \"kubernetes.io/projected/1ba473a8-8e29-49fe-a1b8-8cc7e422037f-kube-api-access-tj882\") pod \"openshift-config-operator-7777fb866f-jtr4k\" (UID: \"1ba473a8-8e29-49fe-a1b8-8cc7e422037f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165580 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hc95z\" (UniqueName: \"kubernetes.io/projected/3af890e6-9547-4a96-8719-a7599d1b1701-kube-api-access-hc95z\") pod \"controller-manager-879f6c89f-jt4v9\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165609 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zc9dp\" (UniqueName: \"kubernetes.io/projected/6b02d39a-2e64-4035-abf2-99dcc7f32194-kube-api-access-zc9dp\") pod \"cluster-samples-operator-665b6dd947-9j8zq\" (UID: \"6b02d39a-2e64-4035-abf2-99dcc7f32194\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9j8zq" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165624 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c3867cf-bc74-4587-aa51-d30bf357f0f2-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-dgpsx\" (UID: \"4c3867cf-bc74-4587-aa51-d30bf357f0f2\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dgpsx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165732 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3af890e6-9547-4a96-8719-a7599d1b1701-client-ca\") pod \"controller-manager-879f6c89f-jt4v9\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165760 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c800ab14-5d0a-4078-91f5-b47d05d15ccc-images\") pod \"machine-api-operator-5694c8668f-6r979\" (UID: \"c800ab14-5d0a-4078-91f5-b47d05d15ccc\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6r979" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165779 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/44595319-f5f2-4db3-9671-9c8680c2dfc7-client-ca\") pod \"route-controller-manager-6576b87f9c-tzlk9\" (UID: \"44595319-f5f2-4db3-9671-9c8680c2dfc7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165800 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c800ab14-5d0a-4078-91f5-b47d05d15ccc-config\") pod \"machine-api-operator-5694c8668f-6r979\" (UID: \"c800ab14-5d0a-4078-91f5-b47d05d15ccc\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6r979" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165815 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/2860560e-929a-4dbe-84c0-23326bf7cdf4-default-certificate\") pod \"router-default-5444994796-77xhn\" (UID: \"2860560e-929a-4dbe-84c0-23326bf7cdf4\") " pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.165830 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12969fb5-7977-4b64-baed-ad39d9524369-config\") pod \"machine-approver-56656f9798-cgnhf\" (UID: \"12969fb5-7977-4b64-baed-ad39d9524369\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgnhf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166299 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c7d8ca9-574e-4b56-8ed6-b9155509a740-config\") pod \"kube-controller-manager-operator-78b949d7b-zshgr\" (UID: \"3c7d8ca9-574e-4b56-8ed6-b9155509a740\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zshgr" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166331 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cstl\" (UniqueName: \"kubernetes.io/projected/dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c-kube-api-access-9cstl\") pod \"openshift-apiserver-operator-796bbdcf4f-74lmm\" (UID: \"dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-74lmm" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166350 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-74lmm\" (UID: \"dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-74lmm" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166358 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3af890e6-9547-4a96-8719-a7599d1b1701-client-ca\") pod \"controller-manager-879f6c89f-jt4v9\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166370 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-audit-policies\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166388 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/99c9edc1-b1d6-459e-957f-aab91850d2e5-serving-cert\") pod \"console-operator-58897d9998-fntp2\" (UID: \"99c9edc1-b1d6-459e-957f-aab91850d2e5\") " pod="openshift-console-operator/console-operator-58897d9998-fntp2" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166417 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1ba473a8-8e29-49fe-a1b8-8cc7e422037f-serving-cert\") pod \"openshift-config-operator-7777fb866f-jtr4k\" (UID: \"1ba473a8-8e29-49fe-a1b8-8cc7e422037f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166434 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c800ab14-5d0a-4078-91f5-b47d05d15ccc-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-6r979\" (UID: \"c800ab14-5d0a-4078-91f5-b47d05d15ccc\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6r979" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166450 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44595319-f5f2-4db3-9671-9c8680c2dfc7-config\") pod \"route-controller-manager-6576b87f9c-tzlk9\" (UID: \"44595319-f5f2-4db3-9671-9c8680c2dfc7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166467 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8bdbecf3-1fbc-4184-b33b-94031b7e3845-serving-cert\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166483 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/99c9edc1-b1d6-459e-957f-aab91850d2e5-trusted-ca\") pod \"console-operator-58897d9998-fntp2\" (UID: \"99c9edc1-b1d6-459e-957f-aab91850d2e5\") " pod="openshift-console-operator/console-operator-58897d9998-fntp2" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166498 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sn4l5\" (UniqueName: \"kubernetes.io/projected/44595319-f5f2-4db3-9671-9c8680c2dfc7-kube-api-access-sn4l5\") pod \"route-controller-manager-6576b87f9c-tzlk9\" (UID: \"44595319-f5f2-4db3-9671-9c8680c2dfc7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166533 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/8bdbecf3-1fbc-4184-b33b-94031b7e3845-audit\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166557 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166586 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/6b02d39a-2e64-4035-abf2-99dcc7f32194-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-9j8zq\" (UID: \"6b02d39a-2e64-4035-abf2-99dcc7f32194\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9j8zq" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166605 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166621 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99c9edc1-b1d6-459e-957f-aab91850d2e5-config\") pod \"console-operator-58897d9998-fntp2\" (UID: \"99c9edc1-b1d6-459e-957f-aab91850d2e5\") " pod="openshift-console-operator/console-operator-58897d9998-fntp2" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166650 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4a7dbd55-b911-42ff-a5e7-ceb61a071343-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-skfkj\" (UID: \"4a7dbd55-b911-42ff-a5e7-ceb61a071343\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skfkj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166666 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c3867cf-bc74-4587-aa51-d30bf357f0f2-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-dgpsx\" (UID: \"4c3867cf-bc74-4587-aa51-d30bf357f0f2\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dgpsx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166684 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txqcb\" (UniqueName: \"kubernetes.io/projected/2860560e-929a-4dbe-84c0-23326bf7cdf4-kube-api-access-txqcb\") pod \"router-default-5444994796-77xhn\" (UID: \"2860560e-929a-4dbe-84c0-23326bf7cdf4\") " pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166698 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/1825f935-ad5f-4d85-aeef-e368841c5547-etcd-ca\") pod \"etcd-operator-b45778765-x5npd\" (UID: \"1825f935-ad5f-4d85-aeef-e368841c5547\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166716 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2edc4443-850c-48d0-a605-3debd5d38299-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-8ctmc\" (UID: \"2edc4443-850c-48d0-a605-3debd5d38299\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166731 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/12969fb5-7977-4b64-baed-ad39d9524369-machine-approver-tls\") pod \"machine-approver-56656f9798-cgnhf\" (UID: \"12969fb5-7977-4b64-baed-ad39d9524369\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgnhf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166766 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8-images\") pod \"machine-config-operator-74547568cd-wc9dx\" (UID: \"b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166781 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8-auth-proxy-config\") pod \"machine-config-operator-74547568cd-wc9dx\" (UID: \"b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166798 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bdbecf3-1fbc-4184-b33b-94031b7e3845-config\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166812 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c-config\") pod \"openshift-apiserver-operator-796bbdcf4f-74lmm\" (UID: \"dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-74lmm" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166827 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166843 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4n8w\" (UniqueName: \"kubernetes.io/projected/99c9edc1-b1d6-459e-957f-aab91850d2e5-kube-api-access-t4n8w\") pod \"console-operator-58897d9998-fntp2\" (UID: \"99c9edc1-b1d6-459e-957f-aab91850d2e5\") " pod="openshift-console-operator/console-operator-58897d9998-fntp2" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166860 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/8bdbecf3-1fbc-4184-b33b-94031b7e3845-etcd-client\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166875 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/1825f935-ad5f-4d85-aeef-e368841c5547-etcd-service-ca\") pod \"etcd-operator-b45778765-x5npd\" (UID: \"1825f935-ad5f-4d85-aeef-e368841c5547\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166891 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166910 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8-proxy-tls\") pod \"machine-config-operator-74547568cd-wc9dx\" (UID: \"b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166974 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/8bdbecf3-1fbc-4184-b33b-94031b7e3845-etcd-serving-ca\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.166997 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2c4864a0-5981-4eef-a0db-c33a535e02de-audit-dir\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.167013 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.169472 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-kq95z"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.172825 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-t7f4w" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.175775 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.189217 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3af890e6-9547-4a96-8719-a7599d1b1701-serving-cert\") pod \"controller-manager-879f6c89f-jt4v9\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.192332 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-jt4v9"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.192376 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.192761 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-4ptvd"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.193041 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.193141 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.193159 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-kq95z" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.193631 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.193991 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.194393 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-46xcf"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.195158 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-94w5l"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.195272 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.195307 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-46xcf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.195309 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.195813 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-q2rvb"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.196224 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.196562 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-llbq4"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.196581 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9j8zq"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.196593 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.196604 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-tnl5g"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.196612 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-sk5qf"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.196621 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-fntp2"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.196631 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zshgr"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.196638 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-rng6t"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.196647 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-74lmm"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.196659 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-tm7xw"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.198135 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-94w5l" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.199876 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-q2rvb" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.199213 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.205517 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skfkj"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.205635 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-pl49j"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.205735 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-tm7xw" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.206046 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dgpsx"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.206094 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gj98r"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.207196 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.212854 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.215419 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.215730 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.216362 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.221314 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.221364 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-tm764"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.221377 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-kq95z"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.222537 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.223525 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-4ptvd"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.224623 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.225704 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-94w5l"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.226801 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-x5npd"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.228662 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vb2np"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.229866 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.231359 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.231726 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-2z9q7"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.233033 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.234518 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-gqpfp"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.235507 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-jhjlm"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.236360 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-jhjlm" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.236604 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-lpvhh"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.237684 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-t7f4w"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.238707 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-48tlb"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.240032 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-q2rvb"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.240139 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.241330 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-46xcf"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.242833 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-tm7xw"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.243961 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-jhjlm"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.245169 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.246217 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-48tlb"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.247260 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-gzpgq"] Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.247798 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-gzpgq" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.251124 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.267944 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c800ab14-5d0a-4078-91f5-b47d05d15ccc-images\") pod \"machine-api-operator-5694c8668f-6r979\" (UID: \"c800ab14-5d0a-4078-91f5-b47d05d15ccc\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6r979" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.267979 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d24a8b16-9687-49f3-bed0-888340007876-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.267999 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-console-config\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268014 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkd78\" (UniqueName: \"kubernetes.io/projected/ae503948-5876-4b1e-ba9f-23ebb0e05b94-kube-api-access-pkd78\") pod \"collect-profiles-29484345-n7rrt\" (UID: \"ae503948-5876-4b1e-ba9f-23ebb0e05b94\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268032 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c800ab14-5d0a-4078-91f5-b47d05d15ccc-config\") pod \"machine-api-operator-5694c8668f-6r979\" (UID: \"c800ab14-5d0a-4078-91f5-b47d05d15ccc\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6r979" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268048 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/2860560e-929a-4dbe-84c0-23326bf7cdf4-default-certificate\") pod \"router-default-5444994796-77xhn\" (UID: \"2860560e-929a-4dbe-84c0-23326bf7cdf4\") " pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268065 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b2d54262-2b73-4ffa-80e6-beb88d8fa5b7-config-volume\") pod \"dns-default-jhjlm\" (UID: \"b2d54262-2b73-4ffa-80e6-beb88d8fa5b7\") " pod="openshift-dns/dns-default-jhjlm" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268117 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-74lmm\" (UID: \"dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-74lmm" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268134 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/99c9edc1-b1d6-459e-957f-aab91850d2e5-trusted-ca\") pod \"console-operator-58897d9998-fntp2\" (UID: \"99c9edc1-b1d6-459e-957f-aab91850d2e5\") " pod="openshift-console-operator/console-operator-58897d9998-fntp2" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268149 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-oauth-serving-cert\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268184 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c800ab14-5d0a-4078-91f5-b47d05d15ccc-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-6r979\" (UID: \"c800ab14-5d0a-4078-91f5-b47d05d15ccc\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6r979" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268204 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8bdbecf3-1fbc-4184-b33b-94031b7e3845-serving-cert\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268220 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/8bdbecf3-1fbc-4184-b33b-94031b7e3845-audit\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268235 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268251 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268267 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6x9zh\" (UniqueName: \"kubernetes.io/projected/63e15a9d-3476-43c1-93e0-6453f0fc9adb-kube-api-access-6x9zh\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268290 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/6b02d39a-2e64-4035-abf2-99dcc7f32194-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-9j8zq\" (UID: \"6b02d39a-2e64-4035-abf2-99dcc7f32194\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9j8zq" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268322 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sn4l5\" (UniqueName: \"kubernetes.io/projected/44595319-f5f2-4db3-9671-9c8680c2dfc7-kube-api-access-sn4l5\") pod \"route-controller-manager-6576b87f9c-tzlk9\" (UID: \"44595319-f5f2-4db3-9671-9c8680c2dfc7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268341 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c3867cf-bc74-4587-aa51-d30bf357f0f2-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-dgpsx\" (UID: \"4c3867cf-bc74-4587-aa51-d30bf357f0f2\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dgpsx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268362 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d24a8b16-9687-49f3-bed0-888340007876-serving-cert\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268379 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/1825f935-ad5f-4d85-aeef-e368841c5547-etcd-ca\") pod \"etcd-operator-b45778765-x5npd\" (UID: \"1825f935-ad5f-4d85-aeef-e368841c5547\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268395 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/12969fb5-7977-4b64-baed-ad39d9524369-machine-approver-tls\") pod \"machine-approver-56656f9798-cgnhf\" (UID: \"12969fb5-7977-4b64-baed-ad39d9524369\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgnhf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268411 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-service-ca\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268431 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8-auth-proxy-config\") pod \"machine-config-operator-74547568cd-wc9dx\" (UID: \"b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268446 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bdbecf3-1fbc-4184-b33b-94031b7e3845-config\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268462 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c-config\") pod \"openshift-apiserver-operator-796bbdcf4f-74lmm\" (UID: \"dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-74lmm" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268510 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268528 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268542 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/8bdbecf3-1fbc-4184-b33b-94031b7e3845-etcd-client\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268559 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/8bdbecf3-1fbc-4184-b33b-94031b7e3845-etcd-serving-ca\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268574 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2c4864a0-5981-4eef-a0db-c33a535e02de-audit-dir\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268591 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g96n4\" (UniqueName: \"kubernetes.io/projected/d24a8b16-9687-49f3-bed0-888340007876-kube-api-access-g96n4\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268609 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a7dbd55-b911-42ff-a5e7-ceb61a071343-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-skfkj\" (UID: \"4a7dbd55-b911-42ff-a5e7-ceb61a071343\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skfkj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268625 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpkpl\" (UniqueName: \"kubernetes.io/projected/4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc-kube-api-access-wpkpl\") pod \"ingress-operator-5b745b69d9-rhsbf\" (UID: \"4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268643 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268660 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268692 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d24a8b16-9687-49f3-bed0-888340007876-etcd-client\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268707 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/245c05e3-0c9d-4b20-8bef-b16bb0b492c1-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-vb2np\" (UID: \"245c05e3-0c9d-4b20-8bef-b16bb0b492c1\") " pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268723 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9fsn\" (UniqueName: \"kubernetes.io/projected/4c3867cf-bc74-4587-aa51-d30bf357f0f2-kube-api-access-f9fsn\") pod \"openshift-controller-manager-operator-756b6f6bc6-dgpsx\" (UID: \"4c3867cf-bc74-4587-aa51-d30bf357f0f2\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dgpsx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268740 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5f757f81-9a44-488a-8a60-4814d2bc418d-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-sk5qf\" (UID: \"5f757f81-9a44-488a-8a60-4814d2bc418d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sk5qf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268761 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjdmh\" (UniqueName: \"kubernetes.io/projected/5f757f81-9a44-488a-8a60-4814d2bc418d-kube-api-access-fjdmh\") pod \"multus-admission-controller-857f4d67dd-sk5qf\" (UID: \"5f757f81-9a44-488a-8a60-4814d2bc418d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sk5qf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268798 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbl9z\" (UniqueName: \"kubernetes.io/projected/8796a7df-de04-490e-b5a9-c9fad5483d61-kube-api-access-fbl9z\") pod \"control-plane-machine-set-operator-78cbb6b69f-kq95z\" (UID: \"8796a7df-de04-490e-b5a9-c9fad5483d61\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-kq95z" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268821 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b2d54262-2b73-4ffa-80e6-beb88d8fa5b7-metrics-tls\") pod \"dns-default-jhjlm\" (UID: \"b2d54262-2b73-4ffa-80e6-beb88d8fa5b7\") " pod="openshift-dns/dns-default-jhjlm" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268849 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/1ba473a8-8e29-49fe-a1b8-8cc7e422037f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-jtr4k\" (UID: \"1ba473a8-8e29-49fe-a1b8-8cc7e422037f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268873 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c800ab14-5d0a-4078-91f5-b47d05d15ccc-images\") pod \"machine-api-operator-5694c8668f-6r979\" (UID: \"c800ab14-5d0a-4078-91f5-b47d05d15ccc\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6r979" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268881 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2edc4443-850c-48d0-a605-3debd5d38299-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-8ctmc\" (UID: \"2edc4443-850c-48d0-a605-3debd5d38299\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268924 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdcg5\" (UniqueName: \"kubernetes.io/projected/3bd845c3-ce08-43a5-ad61-3fa92c8604fa-kube-api-access-mdcg5\") pod \"service-ca-operator-777779d784-rng6t\" (UID: \"3bd845c3-ce08-43a5-ad61-3fa92c8604fa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rng6t" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268955 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4mtq\" (UniqueName: \"kubernetes.io/projected/2edc4443-850c-48d0-a605-3debd5d38299-kube-api-access-r4mtq\") pod \"cluster-image-registry-operator-dc59b4c8b-8ctmc\" (UID: \"2edc4443-850c-48d0-a605-3debd5d38299\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.268996 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/8796a7df-de04-490e-b5a9-c9fad5483d61-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-kq95z\" (UID: \"8796a7df-de04-490e-b5a9-c9fad5483d61\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-kq95z" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269025 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-957m4\" (UniqueName: \"kubernetes.io/projected/48976c66-6d66-4d5b-bb2c-84f7b0cb292f-kube-api-access-957m4\") pod \"migrator-59844c95c7-46xcf\" (UID: \"48976c66-6d66-4d5b-bb2c-84f7b0cb292f\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-46xcf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269047 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f030cb7-e6bb-47bb-b4a4-821f4a21ad42-serving-cert\") pod \"authentication-operator-69f744f599-2z9q7\" (UID: \"0f030cb7-e6bb-47bb-b4a4-821f4a21ad42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269068 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1825f935-ad5f-4d85-aeef-e368841c5547-etcd-client\") pod \"etcd-operator-b45778765-x5npd\" (UID: \"1825f935-ad5f-4d85-aeef-e368841c5547\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269113 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/2860560e-929a-4dbe-84c0-23326bf7cdf4-stats-auth\") pod \"router-default-5444994796-77xhn\" (UID: \"2860560e-929a-4dbe-84c0-23326bf7cdf4\") " pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269135 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/2edc4443-850c-48d0-a605-3debd5d38299-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-8ctmc\" (UID: \"2edc4443-850c-48d0-a605-3debd5d38299\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269160 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0f030cb7-e6bb-47bb-b4a4-821f4a21ad42-service-ca-bundle\") pod \"authentication-operator-69f744f599-2z9q7\" (UID: \"0f030cb7-e6bb-47bb-b4a4-821f4a21ad42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269181 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2860560e-929a-4dbe-84c0-23326bf7cdf4-metrics-certs\") pod \"router-default-5444994796-77xhn\" (UID: \"2860560e-929a-4dbe-84c0-23326bf7cdf4\") " pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269204 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5a8e383c-706b-43f4-ac19-2bc2e4e83115-proxy-tls\") pod \"machine-config-controller-84d6567774-pl49j\" (UID: \"5a8e383c-706b-43f4-ac19-2bc2e4e83115\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-pl49j" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269236 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-np4r6\" (UniqueName: \"kubernetes.io/projected/b2d54262-2b73-4ffa-80e6-beb88d8fa5b7-kube-api-access-np4r6\") pod \"dns-default-jhjlm\" (UID: \"b2d54262-2b73-4ffa-80e6-beb88d8fa5b7\") " pod="openshift-dns/dns-default-jhjlm" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269260 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnrrs\" (UniqueName: \"kubernetes.io/projected/193f8e22-42fc-444d-92b6-7b44fcdc8200-kube-api-access-wnrrs\") pod \"catalog-operator-68c6474976-mppr6\" (UID: \"193f8e22-42fc-444d-92b6-7b44fcdc8200\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269287 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9c2p\" (UniqueName: \"kubernetes.io/projected/2c4864a0-5981-4eef-a0db-c33a535e02de-kube-api-access-k9c2p\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269311 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4a7dbd55-b911-42ff-a5e7-ceb61a071343-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-skfkj\" (UID: \"4a7dbd55-b911-42ff-a5e7-ceb61a071343\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skfkj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269334 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ae503948-5876-4b1e-ba9f-23ebb0e05b94-secret-volume\") pod \"collect-profiles-29484345-n7rrt\" (UID: \"ae503948-5876-4b1e-ba9f-23ebb0e05b94\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269359 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/193f8e22-42fc-444d-92b6-7b44fcdc8200-profile-collector-cert\") pod \"catalog-operator-68c6474976-mppr6\" (UID: \"193f8e22-42fc-444d-92b6-7b44fcdc8200\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269385 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1825f935-ad5f-4d85-aeef-e368841c5547-config\") pod \"etcd-operator-b45778765-x5npd\" (UID: \"1825f935-ad5f-4d85-aeef-e368841c5547\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269411 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269437 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chxqs\" (UniqueName: \"kubernetes.io/projected/245c05e3-0c9d-4b20-8bef-b16bb0b492c1-kube-api-access-chxqs\") pod \"marketplace-operator-79b997595-vb2np\" (UID: \"245c05e3-0c9d-4b20-8bef-b16bb0b492c1\") " pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269464 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrrmr\" (UniqueName: \"kubernetes.io/projected/0f030cb7-e6bb-47bb-b4a4-821f4a21ad42-kube-api-access-xrrmr\") pod \"authentication-operator-69f744f599-2z9q7\" (UID: \"0f030cb7-e6bb-47bb-b4a4-821f4a21ad42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269502 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4x6f4\" (UniqueName: \"kubernetes.io/projected/12969fb5-7977-4b64-baed-ad39d9524369-kube-api-access-4x6f4\") pod \"machine-approver-56656f9798-cgnhf\" (UID: \"12969fb5-7977-4b64-baed-ad39d9524369\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgnhf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269536 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2860560e-929a-4dbe-84c0-23326bf7cdf4-service-ca-bundle\") pod \"router-default-5444994796-77xhn\" (UID: \"2860560e-929a-4dbe-84c0-23326bf7cdf4\") " pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269575 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bd845c3-ce08-43a5-ad61-3fa92c8604fa-config\") pod \"service-ca-operator-777779d784-rng6t\" (UID: \"3bd845c3-ce08-43a5-ad61-3fa92c8604fa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rng6t" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269605 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4t957\" (UniqueName: \"kubernetes.io/projected/8bdbecf3-1fbc-4184-b33b-94031b7e3845-kube-api-access-4t957\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269628 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tj882\" (UniqueName: \"kubernetes.io/projected/1ba473a8-8e29-49fe-a1b8-8cc7e422037f-kube-api-access-tj882\") pod \"openshift-config-operator-7777fb866f-jtr4k\" (UID: \"1ba473a8-8e29-49fe-a1b8-8cc7e422037f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269669 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zc9dp\" (UniqueName: \"kubernetes.io/projected/6b02d39a-2e64-4035-abf2-99dcc7f32194-kube-api-access-zc9dp\") pod \"cluster-samples-operator-665b6dd947-9j8zq\" (UID: \"6b02d39a-2e64-4035-abf2-99dcc7f32194\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9j8zq" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269682 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c800ab14-5d0a-4078-91f5-b47d05d15ccc-config\") pod \"machine-api-operator-5694c8668f-6r979\" (UID: \"c800ab14-5d0a-4078-91f5-b47d05d15ccc\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6r979" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269694 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/44595319-f5f2-4db3-9671-9c8680c2dfc7-client-ca\") pod \"route-controller-manager-6576b87f9c-tzlk9\" (UID: \"44595319-f5f2-4db3-9671-9c8680c2dfc7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269719 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/63e15a9d-3476-43c1-93e0-6453f0fc9adb-console-oauth-config\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269743 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12969fb5-7977-4b64-baed-ad39d9524369-config\") pod \"machine-approver-56656f9798-cgnhf\" (UID: \"12969fb5-7977-4b64-baed-ad39d9524369\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgnhf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269765 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vtmlz\" (UniqueName: \"kubernetes.io/projected/5a8e383c-706b-43f4-ac19-2bc2e4e83115-kube-api-access-vtmlz\") pod \"machine-config-controller-84d6567774-pl49j\" (UID: \"5a8e383c-706b-43f4-ac19-2bc2e4e83115\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-pl49j" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269787 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/efc95799-4d8b-4adf-9d64-28717c1bdd76-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-q2rvb\" (UID: \"efc95799-4d8b-4adf-9d64-28717c1bdd76\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-q2rvb" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269813 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cstl\" (UniqueName: \"kubernetes.io/projected/dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c-kube-api-access-9cstl\") pod \"openshift-apiserver-operator-796bbdcf4f-74lmm\" (UID: \"dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-74lmm" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269836 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c7d8ca9-574e-4b56-8ed6-b9155509a740-config\") pod \"kube-controller-manager-operator-78b949d7b-zshgr\" (UID: \"3c7d8ca9-574e-4b56-8ed6-b9155509a740\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zshgr" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269859 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/99c9edc1-b1d6-459e-957f-aab91850d2e5-serving-cert\") pod \"console-operator-58897d9998-fntp2\" (UID: \"99c9edc1-b1d6-459e-957f-aab91850d2e5\") " pod="openshift-console-operator/console-operator-58897d9998-fntp2" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269881 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/28216100-24cb-4a52-af53-0a6fe5b54e4e-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-lpvhh\" (UID: \"28216100-24cb-4a52-af53-0a6fe5b54e4e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-lpvhh" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269902 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5726f2e0-cd2a-4dd2-923d-8b27042e3223-node-bootstrap-token\") pod \"machine-config-server-gzpgq\" (UID: \"5726f2e0-cd2a-4dd2-923d-8b27042e3223\") " pod="openshift-machine-config-operator/machine-config-server-gzpgq" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269921 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5726f2e0-cd2a-4dd2-923d-8b27042e3223-certs\") pod \"machine-config-server-gzpgq\" (UID: \"5726f2e0-cd2a-4dd2-923d-8b27042e3223\") " pod="openshift-machine-config-operator/machine-config-server-gzpgq" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269943 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1ba473a8-8e29-49fe-a1b8-8cc7e422037f-serving-cert\") pod \"openshift-config-operator-7777fb866f-jtr4k\" (UID: \"1ba473a8-8e29-49fe-a1b8-8cc7e422037f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269966 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44595319-f5f2-4db3-9671-9c8680c2dfc7-config\") pod \"route-controller-manager-6576b87f9c-tzlk9\" (UID: \"44595319-f5f2-4db3-9671-9c8680c2dfc7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.269988 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-audit-policies\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270009 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99c9edc1-b1d6-459e-957f-aab91850d2e5-config\") pod \"console-operator-58897d9998-fntp2\" (UID: \"99c9edc1-b1d6-459e-957f-aab91850d2e5\") " pod="openshift-console-operator/console-operator-58897d9998-fntp2" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270033 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4a7dbd55-b911-42ff-a5e7-ceb61a071343-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-skfkj\" (UID: \"4a7dbd55-b911-42ff-a5e7-ceb61a071343\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skfkj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270055 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gck98\" (UniqueName: \"kubernetes.io/projected/5726f2e0-cd2a-4dd2-923d-8b27042e3223-kube-api-access-gck98\") pod \"machine-config-server-gzpgq\" (UID: \"5726f2e0-cd2a-4dd2-923d-8b27042e3223\") " pod="openshift-machine-config-operator/machine-config-server-gzpgq" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270112 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc-metrics-tls\") pod \"ingress-operator-5b745b69d9-rhsbf\" (UID: \"4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270131 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/193f8e22-42fc-444d-92b6-7b44fcdc8200-srv-cert\") pod \"catalog-operator-68c6474976-mppr6\" (UID: \"193f8e22-42fc-444d-92b6-7b44fcdc8200\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270148 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txqcb\" (UniqueName: \"kubernetes.io/projected/2860560e-929a-4dbe-84c0-23326bf7cdf4-kube-api-access-txqcb\") pod \"router-default-5444994796-77xhn\" (UID: \"2860560e-929a-4dbe-84c0-23326bf7cdf4\") " pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270175 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2edc4443-850c-48d0-a605-3debd5d38299-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-8ctmc\" (UID: \"2edc4443-850c-48d0-a605-3debd5d38299\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270191 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8-images\") pod \"machine-config-operator-74547568cd-wc9dx\" (UID: \"b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270207 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/245c05e3-0c9d-4b20-8bef-b16bb0b492c1-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-vb2np\" (UID: \"245c05e3-0c9d-4b20-8bef-b16bb0b492c1\") " pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270225 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/1825f935-ad5f-4d85-aeef-e368841c5547-etcd-service-ca\") pod \"etcd-operator-b45778765-x5npd\" (UID: \"1825f935-ad5f-4d85-aeef-e368841c5547\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270216 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8-auth-proxy-config\") pod \"machine-config-operator-74547568cd-wc9dx\" (UID: \"b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270247 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4n8w\" (UniqueName: \"kubernetes.io/projected/99c9edc1-b1d6-459e-957f-aab91850d2e5-kube-api-access-t4n8w\") pod \"console-operator-58897d9998-fntp2\" (UID: \"99c9edc1-b1d6-459e-957f-aab91850d2e5\") " pod="openshift-console-operator/console-operator-58897d9998-fntp2" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270266 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hhh8\" (UniqueName: \"kubernetes.io/projected/4bbb457e-b621-4447-ab9b-c3337ff62905-kube-api-access-9hhh8\") pod \"dns-operator-744455d44c-tnl5g\" (UID: \"4bbb457e-b621-4447-ab9b-c3337ff62905\") " pod="openshift-dns-operator/dns-operator-744455d44c-tnl5g" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270288 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8-proxy-tls\") pod \"machine-config-operator-74547568cd-wc9dx\" (UID: \"b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270309 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3bd845c3-ce08-43a5-ad61-3fa92c8604fa-serving-cert\") pod \"service-ca-operator-777779d784-rng6t\" (UID: \"3bd845c3-ce08-43a5-ad61-3fa92c8604fa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rng6t" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270348 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvb9n\" (UniqueName: \"kubernetes.io/projected/28216100-24cb-4a52-af53-0a6fe5b54e4e-kube-api-access-mvb9n\") pod \"kube-storage-version-migrator-operator-b67b599dd-lpvhh\" (UID: \"28216100-24cb-4a52-af53-0a6fe5b54e4e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-lpvhh" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270368 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ae503948-5876-4b1e-ba9f-23ebb0e05b94-config-volume\") pod \"collect-profiles-29484345-n7rrt\" (UID: \"ae503948-5876-4b1e-ba9f-23ebb0e05b94\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270388 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5wbm\" (UniqueName: \"kubernetes.io/projected/efc95799-4d8b-4adf-9d64-28717c1bdd76-kube-api-access-r5wbm\") pod \"package-server-manager-789f6589d5-q2rvb\" (UID: \"efc95799-4d8b-4adf-9d64-28717c1bdd76\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-q2rvb" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270408 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xtgd\" (UniqueName: \"kubernetes.io/projected/3be04461-5ea1-4f2c-b87b-bf955dcd9bf1-kube-api-access-5xtgd\") pod \"ingress-canary-tm7xw\" (UID: \"3be04461-5ea1-4f2c-b87b-bf955dcd9bf1\") " pod="openshift-ingress-canary/ingress-canary-tm7xw" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270427 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270448 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/63e15a9d-3476-43c1-93e0-6453f0fc9adb-console-serving-cert\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270471 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0f030cb7-e6bb-47bb-b4a4-821f4a21ad42-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-2z9q7\" (UID: \"0f030cb7-e6bb-47bb-b4a4-821f4a21ad42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270487 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d24a8b16-9687-49f3-bed0-888340007876-audit-policies\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270503 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8bdbecf3-1fbc-4184-b33b-94031b7e3845-audit-dir\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270519 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc-bound-sa-token\") pod \"ingress-operator-5b745b69d9-rhsbf\" (UID: \"4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270534 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8bdbecf3-1fbc-4184-b33b-94031b7e3845-trusted-ca-bundle\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270550 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4bbb457e-b621-4447-ab9b-c3337ff62905-metrics-tls\") pod \"dns-operator-744455d44c-tnl5g\" (UID: \"4bbb457e-b621-4447-ab9b-c3337ff62905\") " pod="openshift-dns-operator/dns-operator-744455d44c-tnl5g" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270570 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f030cb7-e6bb-47bb-b4a4-821f4a21ad42-config\") pod \"authentication-operator-69f744f599-2z9q7\" (UID: \"0f030cb7-e6bb-47bb-b4a4-821f4a21ad42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270587 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q99gq\" (UniqueName: \"kubernetes.io/projected/2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7-kube-api-access-q99gq\") pod \"olm-operator-6b444d44fb-dr2tg\" (UID: \"2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270606 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-trusted-ca-bundle\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270625 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3c7d8ca9-574e-4b56-8ed6-b9155509a740-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zshgr\" (UID: \"3c7d8ca9-574e-4b56-8ed6-b9155509a740\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zshgr" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270641 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/8bdbecf3-1fbc-4184-b33b-94031b7e3845-node-pullsecrets\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270657 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7-srv-cert\") pod \"olm-operator-6b444d44fb-dr2tg\" (UID: \"2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270671 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d24a8b16-9687-49f3-bed0-888340007876-audit-dir\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270689 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/8bdbecf3-1fbc-4184-b33b-94031b7e3845-encryption-config\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270704 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1825f935-ad5f-4d85-aeef-e368841c5547-serving-cert\") pod \"etcd-operator-b45778765-x5npd\" (UID: \"1825f935-ad5f-4d85-aeef-e368841c5547\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270711 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8bdbecf3-1fbc-4184-b33b-94031b7e3845-config\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270721 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d24a8b16-9687-49f3-bed0-888340007876-encryption-config\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.270755 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc-trusted-ca\") pod \"ingress-operator-5b745b69d9-rhsbf\" (UID: \"4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.271242 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/1ba473a8-8e29-49fe-a1b8-8cc7e422037f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-jtr4k\" (UID: \"1ba473a8-8e29-49fe-a1b8-8cc7e422037f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.271715 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/44595319-f5f2-4db3-9671-9c8680c2dfc7-client-ca\") pod \"route-controller-manager-6576b87f9c-tzlk9\" (UID: \"44595319-f5f2-4db3-9671-9c8680c2dfc7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.272016 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2edc4443-850c-48d0-a605-3debd5d38299-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-8ctmc\" (UID: \"2edc4443-850c-48d0-a605-3debd5d38299\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.272156 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqst8\" (UniqueName: \"kubernetes.io/projected/c800ab14-5d0a-4078-91f5-b47d05d15ccc-kube-api-access-pqst8\") pod \"machine-api-operator-5694c8668f-6r979\" (UID: \"c800ab14-5d0a-4078-91f5-b47d05d15ccc\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6r979" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.272176 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/12969fb5-7977-4b64-baed-ad39d9524369-auth-proxy-config\") pod \"machine-approver-56656f9798-cgnhf\" (UID: \"12969fb5-7977-4b64-baed-ad39d9524369\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgnhf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.272210 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/44595319-f5f2-4db3-9671-9c8680c2dfc7-serving-cert\") pod \"route-controller-manager-6576b87f9c-tzlk9\" (UID: \"44595319-f5f2-4db3-9671-9c8680c2dfc7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.272229 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.272247 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khqg7\" (UniqueName: \"kubernetes.io/projected/b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8-kube-api-access-khqg7\") pod \"machine-config-operator-74547568cd-wc9dx\" (UID: \"b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.272268 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c7d8ca9-574e-4b56-8ed6-b9155509a740-config\") pod \"kube-controller-manager-operator-78b949d7b-zshgr\" (UID: \"3c7d8ca9-574e-4b56-8ed6-b9155509a740\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zshgr" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.272266 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3be04461-5ea1-4f2c-b87b-bf955dcd9bf1-cert\") pod \"ingress-canary-tm7xw\" (UID: \"3be04461-5ea1-4f2c-b87b-bf955dcd9bf1\") " pod="openshift-ingress-canary/ingress-canary-tm7xw" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.272304 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28216100-24cb-4a52-af53-0a6fe5b54e4e-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-lpvhh\" (UID: \"28216100-24cb-4a52-af53-0a6fe5b54e4e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-lpvhh" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.272334 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.272357 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d24a8b16-9687-49f3-bed0-888340007876-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.272374 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7-profile-collector-cert\") pod \"olm-operator-6b444d44fb-dr2tg\" (UID: \"2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.272391 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/8bdbecf3-1fbc-4184-b33b-94031b7e3845-image-import-ca\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.272410 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5z5f\" (UniqueName: \"kubernetes.io/projected/1825f935-ad5f-4d85-aeef-e368841c5547-kube-api-access-w5z5f\") pod \"etcd-operator-b45778765-x5npd\" (UID: \"1825f935-ad5f-4d85-aeef-e368841c5547\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.272428 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5a8e383c-706b-43f4-ac19-2bc2e4e83115-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-pl49j\" (UID: \"5a8e383c-706b-43f4-ac19-2bc2e4e83115\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-pl49j" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.272446 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3c7d8ca9-574e-4b56-8ed6-b9155509a740-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zshgr\" (UID: \"3c7d8ca9-574e-4b56-8ed6-b9155509a740\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zshgr" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.272510 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.272529 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjjtz\" (UniqueName: \"kubernetes.io/projected/80358407-ad1b-499f-868f-44e3388b0fac-kube-api-access-bjjtz\") pod \"downloads-7954f5f757-tm764\" (UID: \"80358407-ad1b-499f-868f-44e3388b0fac\") " pod="openshift-console/downloads-7954f5f757-tm764" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.272834 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/2860560e-929a-4dbe-84c0-23326bf7cdf4-default-certificate\") pod \"router-default-5444994796-77xhn\" (UID: \"2860560e-929a-4dbe-84c0-23326bf7cdf4\") " pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.273018 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c-config\") pod \"openshift-apiserver-operator-796bbdcf4f-74lmm\" (UID: \"dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-74lmm" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.275002 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2860560e-929a-4dbe-84c0-23326bf7cdf4-service-ca-bundle\") pod \"router-default-5444994796-77xhn\" (UID: \"2860560e-929a-4dbe-84c0-23326bf7cdf4\") " pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.275436 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.275826 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-74lmm\" (UID: \"dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-74lmm" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.275838 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/1825f935-ad5f-4d85-aeef-e368841c5547-etcd-service-ca\") pod \"etcd-operator-b45778765-x5npd\" (UID: \"1825f935-ad5f-4d85-aeef-e368841c5547\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.275935 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1ba473a8-8e29-49fe-a1b8-8cc7e422037f-serving-cert\") pod \"openshift-config-operator-7777fb866f-jtr4k\" (UID: \"1ba473a8-8e29-49fe-a1b8-8cc7e422037f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.276346 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44595319-f5f2-4db3-9671-9c8680c2dfc7-config\") pod \"route-controller-manager-6576b87f9c-tzlk9\" (UID: \"44595319-f5f2-4db3-9671-9c8680c2dfc7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.276444 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8bdbecf3-1fbc-4184-b33b-94031b7e3845-trusted-ca-bundle\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.277061 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/8bdbecf3-1fbc-4184-b33b-94031b7e3845-image-import-ca\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.277130 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/99c9edc1-b1d6-459e-957f-aab91850d2e5-trusted-ca\") pod \"console-operator-58897d9998-fntp2\" (UID: \"99c9edc1-b1d6-459e-957f-aab91850d2e5\") " pod="openshift-console-operator/console-operator-58897d9998-fntp2" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.277653 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/44595319-f5f2-4db3-9671-9c8680c2dfc7-serving-cert\") pod \"route-controller-manager-6576b87f9c-tzlk9\" (UID: \"44595319-f5f2-4db3-9671-9c8680c2dfc7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.277837 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.278260 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-audit-policies\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.278262 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.278537 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/2860560e-929a-4dbe-84c0-23326bf7cdf4-stats-auth\") pod \"router-default-5444994796-77xhn\" (UID: \"2860560e-929a-4dbe-84c0-23326bf7cdf4\") " pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.279128 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99c9edc1-b1d6-459e-957f-aab91850d2e5-config\") pod \"console-operator-58897d9998-fntp2\" (UID: \"99c9edc1-b1d6-459e-957f-aab91850d2e5\") " pod="openshift-console-operator/console-operator-58897d9998-fntp2" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.279261 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.279294 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/99c9edc1-b1d6-459e-957f-aab91850d2e5-serving-cert\") pod \"console-operator-58897d9998-fntp2\" (UID: \"99c9edc1-b1d6-459e-957f-aab91850d2e5\") " pod="openshift-console-operator/console-operator-58897d9998-fntp2" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.279330 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1825f935-ad5f-4d85-aeef-e368841c5547-etcd-client\") pod \"etcd-operator-b45778765-x5npd\" (UID: \"1825f935-ad5f-4d85-aeef-e368841c5547\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.279476 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.280166 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c3867cf-bc74-4587-aa51-d30bf357f0f2-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-dgpsx\" (UID: \"4c3867cf-bc74-4587-aa51-d30bf357f0f2\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dgpsx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.280179 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4a7dbd55-b911-42ff-a5e7-ceb61a071343-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-skfkj\" (UID: \"4a7dbd55-b911-42ff-a5e7-ceb61a071343\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skfkj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.280281 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.280849 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8bdbecf3-1fbc-4184-b33b-94031b7e3845-audit-dir\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.280875 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2c4864a0-5981-4eef-a0db-c33a535e02de-audit-dir\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.281060 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c3867cf-bc74-4587-aa51-d30bf357f0f2-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-dgpsx\" (UID: \"4c3867cf-bc74-4587-aa51-d30bf357f0f2\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dgpsx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.281144 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.281189 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/1825f935-ad5f-4d85-aeef-e368841c5547-etcd-ca\") pod \"etcd-operator-b45778765-x5npd\" (UID: \"1825f935-ad5f-4d85-aeef-e368841c5547\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.281773 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a7dbd55-b911-42ff-a5e7-ceb61a071343-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-skfkj\" (UID: \"4a7dbd55-b911-42ff-a5e7-ceb61a071343\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skfkj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.281818 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/2edc4443-850c-48d0-a605-3debd5d38299-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-8ctmc\" (UID: \"2edc4443-850c-48d0-a605-3debd5d38299\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.281932 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.282299 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5f757f81-9a44-488a-8a60-4814d2bc418d-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-sk5qf\" (UID: \"5f757f81-9a44-488a-8a60-4814d2bc418d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sk5qf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.282506 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1825f935-ad5f-4d85-aeef-e368841c5547-config\") pod \"etcd-operator-b45778765-x5npd\" (UID: \"1825f935-ad5f-4d85-aeef-e368841c5547\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.282559 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/8bdbecf3-1fbc-4184-b33b-94031b7e3845-node-pullsecrets\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.282740 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/8bdbecf3-1fbc-4184-b33b-94031b7e3845-etcd-client\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.282844 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1825f935-ad5f-4d85-aeef-e368841c5547-serving-cert\") pod \"etcd-operator-b45778765-x5npd\" (UID: \"1825f935-ad5f-4d85-aeef-e368841c5547\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.283291 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/8bdbecf3-1fbc-4184-b33b-94031b7e3845-audit\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.283568 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/8bdbecf3-1fbc-4184-b33b-94031b7e3845-etcd-serving-ca\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.283889 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/6b02d39a-2e64-4035-abf2-99dcc7f32194-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-9j8zq\" (UID: \"6b02d39a-2e64-4035-abf2-99dcc7f32194\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9j8zq" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.284319 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3c7d8ca9-574e-4b56-8ed6-b9155509a740-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zshgr\" (UID: \"3c7d8ca9-574e-4b56-8ed6-b9155509a740\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zshgr" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.284683 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.284734 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.284993 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.285009 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c800ab14-5d0a-4078-91f5-b47d05d15ccc-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-6r979\" (UID: \"c800ab14-5d0a-4078-91f5-b47d05d15ccc\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6r979" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.285351 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/8bdbecf3-1fbc-4184-b33b-94031b7e3845-encryption-config\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.285789 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2860560e-929a-4dbe-84c0-23326bf7cdf4-metrics-certs\") pod \"router-default-5444994796-77xhn\" (UID: \"2860560e-929a-4dbe-84c0-23326bf7cdf4\") " pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.286177 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8bdbecf3-1fbc-4184-b33b-94031b7e3845-serving-cert\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.286580 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.286779 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c3867cf-bc74-4587-aa51-d30bf357f0f2-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-dgpsx\" (UID: \"4c3867cf-bc74-4587-aa51-d30bf357f0f2\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dgpsx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.301820 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.310811 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.331740 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.335781 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f030cb7-e6bb-47bb-b4a4-821f4a21ad42-serving-cert\") pod \"authentication-operator-69f744f599-2z9q7\" (UID: \"0f030cb7-e6bb-47bb-b4a4-821f4a21ad42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.351027 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.361155 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0f030cb7-e6bb-47bb-b4a4-821f4a21ad42-service-ca-bundle\") pod \"authentication-operator-69f744f599-2z9q7\" (UID: \"0f030cb7-e6bb-47bb-b4a4-821f4a21ad42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.372145 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.376552 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f030cb7-e6bb-47bb-b4a4-821f4a21ad42-config\") pod \"authentication-operator-69f744f599-2z9q7\" (UID: \"0f030cb7-e6bb-47bb-b4a4-821f4a21ad42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.381823 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d24a8b16-9687-49f3-bed0-888340007876-encryption-config\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.381875 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc-trusted-ca\") pod \"ingress-operator-5b745b69d9-rhsbf\" (UID: \"4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.381957 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3be04461-5ea1-4f2c-b87b-bf955dcd9bf1-cert\") pod \"ingress-canary-tm7xw\" (UID: \"3be04461-5ea1-4f2c-b87b-bf955dcd9bf1\") " pod="openshift-ingress-canary/ingress-canary-tm7xw" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.381984 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28216100-24cb-4a52-af53-0a6fe5b54e4e-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-lpvhh\" (UID: \"28216100-24cb-4a52-af53-0a6fe5b54e4e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-lpvhh" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.382017 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d24a8b16-9687-49f3-bed0-888340007876-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.382054 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7-profile-collector-cert\") pod \"olm-operator-6b444d44fb-dr2tg\" (UID: \"2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.382114 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5a8e383c-706b-43f4-ac19-2bc2e4e83115-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-pl49j\" (UID: \"5a8e383c-706b-43f4-ac19-2bc2e4e83115\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-pl49j" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.382181 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d24a8b16-9687-49f3-bed0-888340007876-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.382254 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-console-config\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.382287 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkd78\" (UniqueName: \"kubernetes.io/projected/ae503948-5876-4b1e-ba9f-23ebb0e05b94-kube-api-access-pkd78\") pod \"collect-profiles-29484345-n7rrt\" (UID: \"ae503948-5876-4b1e-ba9f-23ebb0e05b94\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.382377 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b2d54262-2b73-4ffa-80e6-beb88d8fa5b7-config-volume\") pod \"dns-default-jhjlm\" (UID: \"b2d54262-2b73-4ffa-80e6-beb88d8fa5b7\") " pod="openshift-dns/dns-default-jhjlm" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.382445 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-oauth-serving-cert\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.382511 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6x9zh\" (UniqueName: \"kubernetes.io/projected/63e15a9d-3476-43c1-93e0-6453f0fc9adb-kube-api-access-6x9zh\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.382568 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d24a8b16-9687-49f3-bed0-888340007876-serving-cert\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.382663 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-service-ca\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.382734 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g96n4\" (UniqueName: \"kubernetes.io/projected/d24a8b16-9687-49f3-bed0-888340007876-kube-api-access-g96n4\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.382766 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpkpl\" (UniqueName: \"kubernetes.io/projected/4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc-kube-api-access-wpkpl\") pod \"ingress-operator-5b745b69d9-rhsbf\" (UID: \"4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.382801 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d24a8b16-9687-49f3-bed0-888340007876-etcd-client\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.382866 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/245c05e3-0c9d-4b20-8bef-b16bb0b492c1-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-vb2np\" (UID: \"245c05e3-0c9d-4b20-8bef-b16bb0b492c1\") " pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.382917 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbl9z\" (UniqueName: \"kubernetes.io/projected/8796a7df-de04-490e-b5a9-c9fad5483d61-kube-api-access-fbl9z\") pod \"control-plane-machine-set-operator-78cbb6b69f-kq95z\" (UID: \"8796a7df-de04-490e-b5a9-c9fad5483d61\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-kq95z" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.382964 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-console-config\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.383017 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b2d54262-2b73-4ffa-80e6-beb88d8fa5b7-metrics-tls\") pod \"dns-default-jhjlm\" (UID: \"b2d54262-2b73-4ffa-80e6-beb88d8fa5b7\") " pod="openshift-dns/dns-default-jhjlm" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.383055 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdcg5\" (UniqueName: \"kubernetes.io/projected/3bd845c3-ce08-43a5-ad61-3fa92c8604fa-kube-api-access-mdcg5\") pod \"service-ca-operator-777779d784-rng6t\" (UID: \"3bd845c3-ce08-43a5-ad61-3fa92c8604fa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rng6t" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.383433 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/8796a7df-de04-490e-b5a9-c9fad5483d61-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-kq95z\" (UID: \"8796a7df-de04-490e-b5a9-c9fad5483d61\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-kq95z" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.383545 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-957m4\" (UniqueName: \"kubernetes.io/projected/48976c66-6d66-4d5b-bb2c-84f7b0cb292f-kube-api-access-957m4\") pod \"migrator-59844c95c7-46xcf\" (UID: \"48976c66-6d66-4d5b-bb2c-84f7b0cb292f\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-46xcf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.383579 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5a8e383c-706b-43f4-ac19-2bc2e4e83115-proxy-tls\") pod \"machine-config-controller-84d6567774-pl49j\" (UID: \"5a8e383c-706b-43f4-ac19-2bc2e4e83115\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-pl49j" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.383633 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-np4r6\" (UniqueName: \"kubernetes.io/projected/b2d54262-2b73-4ffa-80e6-beb88d8fa5b7-kube-api-access-np4r6\") pod \"dns-default-jhjlm\" (UID: \"b2d54262-2b73-4ffa-80e6-beb88d8fa5b7\") " pod="openshift-dns/dns-default-jhjlm" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.383660 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnrrs\" (UniqueName: \"kubernetes.io/projected/193f8e22-42fc-444d-92b6-7b44fcdc8200-kube-api-access-wnrrs\") pod \"catalog-operator-68c6474976-mppr6\" (UID: \"193f8e22-42fc-444d-92b6-7b44fcdc8200\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.383727 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ae503948-5876-4b1e-ba9f-23ebb0e05b94-secret-volume\") pod \"collect-profiles-29484345-n7rrt\" (UID: \"ae503948-5876-4b1e-ba9f-23ebb0e05b94\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.383757 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/193f8e22-42fc-444d-92b6-7b44fcdc8200-profile-collector-cert\") pod \"catalog-operator-68c6474976-mppr6\" (UID: \"193f8e22-42fc-444d-92b6-7b44fcdc8200\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.383804 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chxqs\" (UniqueName: \"kubernetes.io/projected/245c05e3-0c9d-4b20-8bef-b16bb0b492c1-kube-api-access-chxqs\") pod \"marketplace-operator-79b997595-vb2np\" (UID: \"245c05e3-0c9d-4b20-8bef-b16bb0b492c1\") " pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.383859 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bd845c3-ce08-43a5-ad61-3fa92c8604fa-config\") pod \"service-ca-operator-777779d784-rng6t\" (UID: \"3bd845c3-ce08-43a5-ad61-3fa92c8604fa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rng6t" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.384041 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5a8e383c-706b-43f4-ac19-2bc2e4e83115-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-pl49j\" (UID: \"5a8e383c-706b-43f4-ac19-2bc2e4e83115\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-pl49j" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.384060 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-oauth-serving-cert\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.384131 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-service-ca\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.384353 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/63e15a9d-3476-43c1-93e0-6453f0fc9adb-console-oauth-config\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.384471 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vtmlz\" (UniqueName: \"kubernetes.io/projected/5a8e383c-706b-43f4-ac19-2bc2e4e83115-kube-api-access-vtmlz\") pod \"machine-config-controller-84d6567774-pl49j\" (UID: \"5a8e383c-706b-43f4-ac19-2bc2e4e83115\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-pl49j" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.384597 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/efc95799-4d8b-4adf-9d64-28717c1bdd76-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-q2rvb\" (UID: \"efc95799-4d8b-4adf-9d64-28717c1bdd76\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-q2rvb" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.384805 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/28216100-24cb-4a52-af53-0a6fe5b54e4e-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-lpvhh\" (UID: \"28216100-24cb-4a52-af53-0a6fe5b54e4e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-lpvhh" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.384886 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5726f2e0-cd2a-4dd2-923d-8b27042e3223-node-bootstrap-token\") pod \"machine-config-server-gzpgq\" (UID: \"5726f2e0-cd2a-4dd2-923d-8b27042e3223\") " pod="openshift-machine-config-operator/machine-config-server-gzpgq" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.385116 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5726f2e0-cd2a-4dd2-923d-8b27042e3223-certs\") pod \"machine-config-server-gzpgq\" (UID: \"5726f2e0-cd2a-4dd2-923d-8b27042e3223\") " pod="openshift-machine-config-operator/machine-config-server-gzpgq" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.385362 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gck98\" (UniqueName: \"kubernetes.io/projected/5726f2e0-cd2a-4dd2-923d-8b27042e3223-kube-api-access-gck98\") pod \"machine-config-server-gzpgq\" (UID: \"5726f2e0-cd2a-4dd2-923d-8b27042e3223\") " pod="openshift-machine-config-operator/machine-config-server-gzpgq" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.385412 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc-metrics-tls\") pod \"ingress-operator-5b745b69d9-rhsbf\" (UID: \"4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.385527 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/193f8e22-42fc-444d-92b6-7b44fcdc8200-srv-cert\") pod \"catalog-operator-68c6474976-mppr6\" (UID: \"193f8e22-42fc-444d-92b6-7b44fcdc8200\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.385670 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/245c05e3-0c9d-4b20-8bef-b16bb0b492c1-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-vb2np\" (UID: \"245c05e3-0c9d-4b20-8bef-b16bb0b492c1\") " pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.385858 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hhh8\" (UniqueName: \"kubernetes.io/projected/4bbb457e-b621-4447-ab9b-c3337ff62905-kube-api-access-9hhh8\") pod \"dns-operator-744455d44c-tnl5g\" (UID: \"4bbb457e-b621-4447-ab9b-c3337ff62905\") " pod="openshift-dns-operator/dns-operator-744455d44c-tnl5g" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.386061 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3bd845c3-ce08-43a5-ad61-3fa92c8604fa-serving-cert\") pod \"service-ca-operator-777779d784-rng6t\" (UID: \"3bd845c3-ce08-43a5-ad61-3fa92c8604fa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rng6t" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.386446 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvb9n\" (UniqueName: \"kubernetes.io/projected/28216100-24cb-4a52-af53-0a6fe5b54e4e-kube-api-access-mvb9n\") pod \"kube-storage-version-migrator-operator-b67b599dd-lpvhh\" (UID: \"28216100-24cb-4a52-af53-0a6fe5b54e4e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-lpvhh" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.386561 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ae503948-5876-4b1e-ba9f-23ebb0e05b94-config-volume\") pod \"collect-profiles-29484345-n7rrt\" (UID: \"ae503948-5876-4b1e-ba9f-23ebb0e05b94\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.386619 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5wbm\" (UniqueName: \"kubernetes.io/projected/efc95799-4d8b-4adf-9d64-28717c1bdd76-kube-api-access-r5wbm\") pod \"package-server-manager-789f6589d5-q2rvb\" (UID: \"efc95799-4d8b-4adf-9d64-28717c1bdd76\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-q2rvb" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.386700 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xtgd\" (UniqueName: \"kubernetes.io/projected/3be04461-5ea1-4f2c-b87b-bf955dcd9bf1-kube-api-access-5xtgd\") pod \"ingress-canary-tm7xw\" (UID: \"3be04461-5ea1-4f2c-b87b-bf955dcd9bf1\") " pod="openshift-ingress-canary/ingress-canary-tm7xw" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.386926 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/63e15a9d-3476-43c1-93e0-6453f0fc9adb-console-serving-cert\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.386997 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d24a8b16-9687-49f3-bed0-888340007876-audit-policies\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.387244 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc-bound-sa-token\") pod \"ingress-operator-5b745b69d9-rhsbf\" (UID: \"4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.387453 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4bbb457e-b621-4447-ab9b-c3337ff62905-metrics-tls\") pod \"dns-operator-744455d44c-tnl5g\" (UID: \"4bbb457e-b621-4447-ab9b-c3337ff62905\") " pod="openshift-dns-operator/dns-operator-744455d44c-tnl5g" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.387513 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q99gq\" (UniqueName: \"kubernetes.io/projected/2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7-kube-api-access-q99gq\") pod \"olm-operator-6b444d44fb-dr2tg\" (UID: \"2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.387630 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-trusted-ca-bundle\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.387741 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7-srv-cert\") pod \"olm-operator-6b444d44fb-dr2tg\" (UID: \"2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.387788 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d24a8b16-9687-49f3-bed0-888340007876-audit-dir\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.387995 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d24a8b16-9687-49f3-bed0-888340007876-audit-dir\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.388563 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/63e15a9d-3476-43c1-93e0-6453f0fc9adb-console-oauth-config\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.389341 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5a8e383c-706b-43f4-ac19-2bc2e4e83115-proxy-tls\") pod \"machine-config-controller-84d6567774-pl49j\" (UID: \"5a8e383c-706b-43f4-ac19-2bc2e4e83115\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-pl49j" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.392608 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/63e15a9d-3476-43c1-93e0-6453f0fc9adb-console-serving-cert\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.393723 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-trusted-ca-bundle\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.400307 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4bbb457e-b621-4447-ab9b-c3337ff62905-metrics-tls\") pod \"dns-operator-744455d44c-tnl5g\" (UID: \"4bbb457e-b621-4447-ab9b-c3337ff62905\") " pod="openshift-dns-operator/dns-operator-744455d44c-tnl5g" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.403814 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.410134 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0f030cb7-e6bb-47bb-b4a4-821f4a21ad42-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-2z9q7\" (UID: \"0f030cb7-e6bb-47bb-b4a4-821f4a21ad42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.411497 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.431668 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.451039 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.473432 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.475197 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bd845c3-ce08-43a5-ad61-3fa92c8604fa-config\") pod \"service-ca-operator-777779d784-rng6t\" (UID: \"3bd845c3-ce08-43a5-ad61-3fa92c8604fa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rng6t" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.491836 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.511820 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.520691 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3bd845c3-ce08-43a5-ad61-3fa92c8604fa-serving-cert\") pod \"service-ca-operator-777779d784-rng6t\" (UID: \"3bd845c3-ce08-43a5-ad61-3fa92c8604fa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rng6t" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.532312 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.551027 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.560882 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/245c05e3-0c9d-4b20-8bef-b16bb0b492c1-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-vb2np\" (UID: \"245c05e3-0c9d-4b20-8bef-b16bb0b492c1\") " pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.582926 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.585315 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/245c05e3-0c9d-4b20-8bef-b16bb0b492c1-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-vb2np\" (UID: \"245c05e3-0c9d-4b20-8bef-b16bb0b492c1\") " pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.591697 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.612744 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.632513 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.672686 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.692454 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.713449 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.716442 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/12969fb5-7977-4b64-baed-ad39d9524369-auth-proxy-config\") pod \"machine-approver-56656f9798-cgnhf\" (UID: \"12969fb5-7977-4b64-baed-ad39d9524369\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgnhf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.732401 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.741366 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/12969fb5-7977-4b64-baed-ad39d9524369-machine-approver-tls\") pod \"machine-approver-56656f9798-cgnhf\" (UID: \"12969fb5-7977-4b64-baed-ad39d9524369\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgnhf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.752019 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.762544 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12969fb5-7977-4b64-baed-ad39d9524369-config\") pod \"machine-approver-56656f9798-cgnhf\" (UID: \"12969fb5-7977-4b64-baed-ad39d9524369\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgnhf" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.771004 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.791832 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.812525 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.830830 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.835697 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d24a8b16-9687-49f3-bed0-888340007876-etcd-client\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.851005 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.856307 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d24a8b16-9687-49f3-bed0-888340007876-serving-cert\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.871548 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.874888 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d24a8b16-9687-49f3-bed0-888340007876-encryption-config\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.892573 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.911537 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.913253 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d24a8b16-9687-49f3-bed0-888340007876-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.931716 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.934620 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d24a8b16-9687-49f3-bed0-888340007876-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.952487 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.958843 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d24a8b16-9687-49f3-bed0-888340007876-audit-policies\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.973121 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 22 05:48:18 crc kubenswrapper[4933]: I0122 05:48:18.992489 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.000606 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8-proxy-tls\") pod \"machine-config-operator-74547568cd-wc9dx\" (UID: \"b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.012652 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.016743 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8-images\") pod \"machine-config-operator-74547568cd-wc9dx\" (UID: \"b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.032047 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.040404 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc-metrics-tls\") pod \"ingress-operator-5b745b69d9-rhsbf\" (UID: \"4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.061052 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.064656 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc-trusted-ca\") pod \"ingress-operator-5b745b69d9-rhsbf\" (UID: \"4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.072053 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.091839 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.112009 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.132106 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.140711 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/28216100-24cb-4a52-af53-0a6fe5b54e4e-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-lpvhh\" (UID: \"28216100-24cb-4a52-af53-0a6fe5b54e4e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-lpvhh" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.149805 4933 request.go:700] Waited for 1.010487658s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-storage-version-migrator-operator/secrets?fieldSelector=metadata.name%3Dkube-storage-version-migrator-operator-dockercfg-2bh8d&limit=500&resourceVersion=0 Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.152013 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.171951 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.173403 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28216100-24cb-4a52-af53-0a6fe5b54e4e-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-lpvhh\" (UID: \"28216100-24cb-4a52-af53-0a6fe5b54e4e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-lpvhh" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.191717 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.211861 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.250689 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hc95z\" (UniqueName: \"kubernetes.io/projected/3af890e6-9547-4a96-8719-a7599d1b1701-kube-api-access-hc95z\") pod \"controller-manager-879f6c89f-jt4v9\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.253182 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.272102 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.292823 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.312271 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.331924 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.339948 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/8796a7df-de04-490e-b5a9-c9fad5483d61-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-kq95z\" (UID: \"8796a7df-de04-490e-b5a9-c9fad5483d61\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-kq95z" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.352241 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.374144 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.384337 4933 configmap.go:193] Couldn't get configMap openshift-dns/dns-default: failed to sync configmap cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.384382 4933 secret.go:188] Couldn't get secret openshift-ingress-canary/canary-serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.384435 4933 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.384343 4933 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.384546 4933 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.384501 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/b2d54262-2b73-4ffa-80e6-beb88d8fa5b7-config-volume podName:b2d54262-2b73-4ffa-80e6-beb88d8fa5b7 nodeName:}" failed. No retries permitted until 2026-01-22 05:48:19.884463433 +0000 UTC m=+147.721588826 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/b2d54262-2b73-4ffa-80e6-beb88d8fa5b7-config-volume") pod "dns-default-jhjlm" (UID: "b2d54262-2b73-4ffa-80e6-beb88d8fa5b7") : failed to sync configmap cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.384614 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ae503948-5876-4b1e-ba9f-23ebb0e05b94-secret-volume podName:ae503948-5876-4b1e-ba9f-23ebb0e05b94 nodeName:}" failed. No retries permitted until 2026-01-22 05:48:19.884589396 +0000 UTC m=+147.721714789 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "secret-volume" (UniqueName: "kubernetes.io/secret/ae503948-5876-4b1e-ba9f-23ebb0e05b94-secret-volume") pod "collect-profiles-29484345-n7rrt" (UID: "ae503948-5876-4b1e-ba9f-23ebb0e05b94") : failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.384641 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3be04461-5ea1-4f2c-b87b-bf955dcd9bf1-cert podName:3be04461-5ea1-4f2c-b87b-bf955dcd9bf1 nodeName:}" failed. No retries permitted until 2026-01-22 05:48:19.884628297 +0000 UTC m=+147.721753690 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/3be04461-5ea1-4f2c-b87b-bf955dcd9bf1-cert") pod "ingress-canary-tm7xw" (UID: "3be04461-5ea1-4f2c-b87b-bf955dcd9bf1") : failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.384672 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/193f8e22-42fc-444d-92b6-7b44fcdc8200-profile-collector-cert podName:193f8e22-42fc-444d-92b6-7b44fcdc8200 nodeName:}" failed. No retries permitted until 2026-01-22 05:48:19.884651998 +0000 UTC m=+147.721777391 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/193f8e22-42fc-444d-92b6-7b44fcdc8200-profile-collector-cert") pod "catalog-operator-68c6474976-mppr6" (UID: "193f8e22-42fc-444d-92b6-7b44fcdc8200") : failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.384694 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7-profile-collector-cert podName:2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7 nodeName:}" failed. No retries permitted until 2026-01-22 05:48:19.884683728 +0000 UTC m=+147.721809111 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7-profile-collector-cert") pod "olm-operator-6b444d44fb-dr2tg" (UID: "2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7") : failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.385521 4933 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.385642 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/efc95799-4d8b-4adf-9d64-28717c1bdd76-package-server-manager-serving-cert podName:efc95799-4d8b-4adf-9d64-28717c1bdd76 nodeName:}" failed. No retries permitted until 2026-01-22 05:48:19.88560015 +0000 UTC m=+147.722725533 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/efc95799-4d8b-4adf-9d64-28717c1bdd76-package-server-manager-serving-cert") pod "package-server-manager-789f6589d5-q2rvb" (UID: "efc95799-4d8b-4adf-9d64-28717c1bdd76") : failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.385694 4933 secret.go:188] Couldn't get secret openshift-machine-config-operator/machine-config-server-tls: failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.385749 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5726f2e0-cd2a-4dd2-923d-8b27042e3223-certs podName:5726f2e0-cd2a-4dd2-923d-8b27042e3223 nodeName:}" failed. No retries permitted until 2026-01-22 05:48:19.885734893 +0000 UTC m=+147.722860276 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "certs" (UniqueName: "kubernetes.io/secret/5726f2e0-cd2a-4dd2-923d-8b27042e3223-certs") pod "machine-config-server-gzpgq" (UID: "5726f2e0-cd2a-4dd2-923d-8b27042e3223") : failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.385783 4933 secret.go:188] Couldn't get secret openshift-machine-config-operator/node-bootstrapper-token: failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.385833 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5726f2e0-cd2a-4dd2-923d-8b27042e3223-node-bootstrap-token podName:5726f2e0-cd2a-4dd2-923d-8b27042e3223 nodeName:}" failed. No retries permitted until 2026-01-22 05:48:19.885818785 +0000 UTC m=+147.722944168 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "node-bootstrap-token" (UniqueName: "kubernetes.io/secret/5726f2e0-cd2a-4dd2-923d-8b27042e3223-node-bootstrap-token") pod "machine-config-server-gzpgq" (UID: "5726f2e0-cd2a-4dd2-923d-8b27042e3223") : failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.386338 4933 secret.go:188] Couldn't get secret openshift-dns/dns-default-metrics-tls: failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.386409 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b2d54262-2b73-4ffa-80e6-beb88d8fa5b7-metrics-tls podName:b2d54262-2b73-4ffa-80e6-beb88d8fa5b7 nodeName:}" failed. No retries permitted until 2026-01-22 05:48:19.886386038 +0000 UTC m=+147.723511421 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/b2d54262-2b73-4ffa-80e6-beb88d8fa5b7-metrics-tls") pod "dns-default-jhjlm" (UID: "b2d54262-2b73-4ffa-80e6-beb88d8fa5b7") : failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.386929 4933 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.387423 4933 configmap.go:193] Couldn't get configMap openshift-operator-lifecycle-manager/collect-profiles-config: failed to sync configmap cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.389317 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/193f8e22-42fc-444d-92b6-7b44fcdc8200-srv-cert podName:193f8e22-42fc-444d-92b6-7b44fcdc8200 nodeName:}" failed. No retries permitted until 2026-01-22 05:48:19.887276029 +0000 UTC m=+147.724401412 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/193f8e22-42fc-444d-92b6-7b44fcdc8200-srv-cert") pod "catalog-operator-68c6474976-mppr6" (UID: "193f8e22-42fc-444d-92b6-7b44fcdc8200") : failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.389372 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ae503948-5876-4b1e-ba9f-23ebb0e05b94-config-volume podName:ae503948-5876-4b1e-ba9f-23ebb0e05b94 nodeName:}" failed. No retries permitted until 2026-01-22 05:48:19.889350697 +0000 UTC m=+147.726476090 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/ae503948-5876-4b1e-ba9f-23ebb0e05b94-config-volume") pod "collect-profiles-29484345-n7rrt" (UID: "ae503948-5876-4b1e-ba9f-23ebb0e05b94") : failed to sync configmap cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.389424 4933 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.393025 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7-srv-cert podName:2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7 nodeName:}" failed. No retries permitted until 2026-01-22 05:48:19.889503591 +0000 UTC m=+147.726628974 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7-srv-cert") pod "olm-operator-6b444d44fb-dr2tg" (UID: "2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7") : failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.393401 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.413346 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.431835 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.451391 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.472991 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.490170 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.490263 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.490169 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.491119 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.491544 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.498759 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.511534 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.531591 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.552182 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.572434 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.593026 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.611061 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:19 crc kubenswrapper[4933]: E0122 05:48:19.612084 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:50:21.612048916 +0000 UTC m=+269.449174279 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.612236 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.612399 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.613424 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.613549 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.613669 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.632380 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.652542 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.671187 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.692021 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.696764 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-jt4v9"] Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.711675 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.731510 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.751829 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.771331 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.792452 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.812470 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.831838 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.852166 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.872034 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.892581 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.912416 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.917675 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b2d54262-2b73-4ffa-80e6-beb88d8fa5b7-config-volume\") pod \"dns-default-jhjlm\" (UID: \"b2d54262-2b73-4ffa-80e6-beb88d8fa5b7\") " pod="openshift-dns/dns-default-jhjlm" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.917898 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b2d54262-2b73-4ffa-80e6-beb88d8fa5b7-metrics-tls\") pod \"dns-default-jhjlm\" (UID: \"b2d54262-2b73-4ffa-80e6-beb88d8fa5b7\") " pod="openshift-dns/dns-default-jhjlm" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.918151 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ae503948-5876-4b1e-ba9f-23ebb0e05b94-secret-volume\") pod \"collect-profiles-29484345-n7rrt\" (UID: \"ae503948-5876-4b1e-ba9f-23ebb0e05b94\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.918227 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/193f8e22-42fc-444d-92b6-7b44fcdc8200-profile-collector-cert\") pod \"catalog-operator-68c6474976-mppr6\" (UID: \"193f8e22-42fc-444d-92b6-7b44fcdc8200\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.918411 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/efc95799-4d8b-4adf-9d64-28717c1bdd76-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-q2rvb\" (UID: \"efc95799-4d8b-4adf-9d64-28717c1bdd76\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-q2rvb" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.918505 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5726f2e0-cd2a-4dd2-923d-8b27042e3223-node-bootstrap-token\") pod \"machine-config-server-gzpgq\" (UID: \"5726f2e0-cd2a-4dd2-923d-8b27042e3223\") " pod="openshift-machine-config-operator/machine-config-server-gzpgq" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.918564 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5726f2e0-cd2a-4dd2-923d-8b27042e3223-certs\") pod \"machine-config-server-gzpgq\" (UID: \"5726f2e0-cd2a-4dd2-923d-8b27042e3223\") " pod="openshift-machine-config-operator/machine-config-server-gzpgq" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.918694 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/193f8e22-42fc-444d-92b6-7b44fcdc8200-srv-cert\") pod \"catalog-operator-68c6474976-mppr6\" (UID: \"193f8e22-42fc-444d-92b6-7b44fcdc8200\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.918861 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ae503948-5876-4b1e-ba9f-23ebb0e05b94-config-volume\") pod \"collect-profiles-29484345-n7rrt\" (UID: \"ae503948-5876-4b1e-ba9f-23ebb0e05b94\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.918986 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7-srv-cert\") pod \"olm-operator-6b444d44fb-dr2tg\" (UID: \"2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.919132 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3be04461-5ea1-4f2c-b87b-bf955dcd9bf1-cert\") pod \"ingress-canary-tm7xw\" (UID: \"3be04461-5ea1-4f2c-b87b-bf955dcd9bf1\") " pod="openshift-ingress-canary/ingress-canary-tm7xw" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.919225 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7-profile-collector-cert\") pod \"olm-operator-6b444d44fb-dr2tg\" (UID: \"2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.920707 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b2d54262-2b73-4ffa-80e6-beb88d8fa5b7-config-volume\") pod \"dns-default-jhjlm\" (UID: \"b2d54262-2b73-4ffa-80e6-beb88d8fa5b7\") " pod="openshift-dns/dns-default-jhjlm" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.923046 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ae503948-5876-4b1e-ba9f-23ebb0e05b94-config-volume\") pod \"collect-profiles-29484345-n7rrt\" (UID: \"ae503948-5876-4b1e-ba9f-23ebb0e05b94\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.925236 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b2d54262-2b73-4ffa-80e6-beb88d8fa5b7-metrics-tls\") pod \"dns-default-jhjlm\" (UID: \"b2d54262-2b73-4ffa-80e6-beb88d8fa5b7\") " pod="openshift-dns/dns-default-jhjlm" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.925430 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7-srv-cert\") pod \"olm-operator-6b444d44fb-dr2tg\" (UID: \"2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.926117 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3be04461-5ea1-4f2c-b87b-bf955dcd9bf1-cert\") pod \"ingress-canary-tm7xw\" (UID: \"3be04461-5ea1-4f2c-b87b-bf955dcd9bf1\") " pod="openshift-ingress-canary/ingress-canary-tm7xw" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.926572 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/193f8e22-42fc-444d-92b6-7b44fcdc8200-srv-cert\") pod \"catalog-operator-68c6474976-mppr6\" (UID: \"193f8e22-42fc-444d-92b6-7b44fcdc8200\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.927236 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7-profile-collector-cert\") pod \"olm-operator-6b444d44fb-dr2tg\" (UID: \"2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.927646 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ae503948-5876-4b1e-ba9f-23ebb0e05b94-secret-volume\") pod \"collect-profiles-29484345-n7rrt\" (UID: \"ae503948-5876-4b1e-ba9f-23ebb0e05b94\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.927723 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/efc95799-4d8b-4adf-9d64-28717c1bdd76-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-q2rvb\" (UID: \"efc95799-4d8b-4adf-9d64-28717c1bdd76\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-q2rvb" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.927674 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/193f8e22-42fc-444d-92b6-7b44fcdc8200-profile-collector-cert\") pod \"catalog-operator-68c6474976-mppr6\" (UID: \"193f8e22-42fc-444d-92b6-7b44fcdc8200\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.932684 4933 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.951632 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.971200 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 22 05:48:19 crc kubenswrapper[4933]: I0122 05:48:19.991868 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.018593 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.025662 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5726f2e0-cd2a-4dd2-923d-8b27042e3223-node-bootstrap-token\") pod \"machine-config-server-gzpgq\" (UID: \"5726f2e0-cd2a-4dd2-923d-8b27042e3223\") " pod="openshift-machine-config-operator/machine-config-server-gzpgq" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.032068 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.045151 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5726f2e0-cd2a-4dd2-923d-8b27042e3223-certs\") pod \"machine-config-server-gzpgq\" (UID: \"5726f2e0-cd2a-4dd2-923d-8b27042e3223\") " pod="openshift-machine-config-operator/machine-config-server-gzpgq" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.099314 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tj882\" (UniqueName: \"kubernetes.io/projected/1ba473a8-8e29-49fe-a1b8-8cc7e422037f-kube-api-access-tj882\") pod \"openshift-config-operator-7777fb866f-jtr4k\" (UID: \"1ba473a8-8e29-49fe-a1b8-8cc7e422037f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.116209 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zc9dp\" (UniqueName: \"kubernetes.io/projected/6b02d39a-2e64-4035-abf2-99dcc7f32194-kube-api-access-zc9dp\") pod \"cluster-samples-operator-665b6dd947-9j8zq\" (UID: \"6b02d39a-2e64-4035-abf2-99dcc7f32194\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9j8zq" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.139191 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cstl\" (UniqueName: \"kubernetes.io/projected/dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c-kube-api-access-9cstl\") pod \"openshift-apiserver-operator-796bbdcf4f-74lmm\" (UID: \"dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-74lmm" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.154291 4933 request.go:700] Waited for 1.880239401s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress/serviceaccounts/router/token Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.159332 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4mtq\" (UniqueName: \"kubernetes.io/projected/2edc4443-850c-48d0-a605-3debd5d38299-kube-api-access-r4mtq\") pod \"cluster-image-registry-operator-dc59b4c8b-8ctmc\" (UID: \"2edc4443-850c-48d0-a605-3debd5d38299\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.164896 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9j8zq" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.175961 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txqcb\" (UniqueName: \"kubernetes.io/projected/2860560e-929a-4dbe-84c0-23326bf7cdf4-kube-api-access-txqcb\") pod \"router-default-5444994796-77xhn\" (UID: \"2860560e-929a-4dbe-84c0-23326bf7cdf4\") " pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.197646 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.202480 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.203210 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khqg7\" (UniqueName: \"kubernetes.io/projected/b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8-kube-api-access-khqg7\") pod \"machine-config-operator-74547568cd-wc9dx\" (UID: \"b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.234795 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4x6f4\" (UniqueName: \"kubernetes.io/projected/12969fb5-7977-4b64-baed-ad39d9524369-kube-api-access-4x6f4\") pod \"machine-approver-56656f9798-cgnhf\" (UID: \"12969fb5-7977-4b64-baed-ad39d9524369\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgnhf" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.254615 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-74lmm" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.255349 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9fsn\" (UniqueName: \"kubernetes.io/projected/4c3867cf-bc74-4587-aa51-d30bf357f0f2-kube-api-access-f9fsn\") pod \"openshift-controller-manager-operator-756b6f6bc6-dgpsx\" (UID: \"4c3867cf-bc74-4587-aa51-d30bf357f0f2\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dgpsx" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.260261 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4t957\" (UniqueName: \"kubernetes.io/projected/8bdbecf3-1fbc-4184-b33b-94031b7e3845-kube-api-access-4t957\") pod \"apiserver-76f77b778f-llbq4\" (UID: \"8bdbecf3-1fbc-4184-b33b-94031b7e3845\") " pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.275839 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqst8\" (UniqueName: \"kubernetes.io/projected/c800ab14-5d0a-4078-91f5-b47d05d15ccc-kube-api-access-pqst8\") pod \"machine-api-operator-5694c8668f-6r979\" (UID: \"c800ab14-5d0a-4078-91f5-b47d05d15ccc\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6r979" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.290494 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjdmh\" (UniqueName: \"kubernetes.io/projected/5f757f81-9a44-488a-8a60-4814d2bc418d-kube-api-access-fjdmh\") pod \"multus-admission-controller-857f4d67dd-sk5qf\" (UID: \"5f757f81-9a44-488a-8a60-4814d2bc418d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sk5qf" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.304526 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dgpsx" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.318031 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2edc4443-850c-48d0-a605-3debd5d38299-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-8ctmc\" (UID: \"2edc4443-850c-48d0-a605-3debd5d38299\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.327033 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4n8w\" (UniqueName: \"kubernetes.io/projected/99c9edc1-b1d6-459e-957f-aab91850d2e5-kube-api-access-t4n8w\") pod \"console-operator-58897d9998-fntp2\" (UID: \"99c9edc1-b1d6-459e-957f-aab91850d2e5\") " pod="openshift-console-operator/console-operator-58897d9998-fntp2" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.337190 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.349722 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-sk5qf" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.350533 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3c7d8ca9-574e-4b56-8ed6-b9155509a740-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zshgr\" (UID: \"3c7d8ca9-574e-4b56-8ed6-b9155509a740\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zshgr" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.405609 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.408373 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-6r979" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.450685 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgnhf" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.466061 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.483461 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zshgr" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.562185 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-fntp2" Jan 22 05:48:20 crc kubenswrapper[4933]: E0122 05:48:20.613513 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Jan 22 05:48:20 crc kubenswrapper[4933]: E0122 05:48:20.621071 4933 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: failed to sync configmap cache: timed out waiting for the condition Jan 22 05:48:20 crc kubenswrapper[4933]: E0122 05:48:20.621099 4933 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Jan 22 05:48:20 crc kubenswrapper[4933]: E0122 05:48:20.621166 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:50:22.621147561 +0000 UTC m=+270.458272914 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : failed to sync configmap cache: timed out waiting for the condition Jan 22 05:48:20 crc kubenswrapper[4933]: E0122 05:48:20.621301 4933 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:20 crc kubenswrapper[4933]: E0122 05:48:20.621388 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:50:22.621366176 +0000 UTC m=+270.458491589 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : failed to sync secret cache: timed out waiting for the condition Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.643957 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9c2p\" (UniqueName: \"kubernetes.io/projected/2c4864a0-5981-4eef-a0db-c33a535e02de-kube-api-access-k9c2p\") pod \"oauth-openshift-558db77b4-gj98r\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.650908 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrrmr\" (UniqueName: \"kubernetes.io/projected/0f030cb7-e6bb-47bb-b4a4-821f4a21ad42-kube-api-access-xrrmr\") pod \"authentication-operator-69f744f599-2z9q7\" (UID: \"0f030cb7-e6bb-47bb-b4a4-821f4a21ad42\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.653495 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chxqs\" (UniqueName: \"kubernetes.io/projected/245c05e3-0c9d-4b20-8bef-b16bb0b492c1-kube-api-access-chxqs\") pod \"marketplace-operator-79b997595-vb2np\" (UID: \"245c05e3-0c9d-4b20-8bef-b16bb0b492c1\") " pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.653688 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.656719 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjjtz\" (UniqueName: \"kubernetes.io/projected/80358407-ad1b-499f-868f-44e3388b0fac-kube-api-access-bjjtz\") pod \"downloads-7954f5f757-tm764\" (UID: \"80358407-ad1b-499f-868f-44e3388b0fac\") " pod="openshift-console/downloads-7954f5f757-tm764" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.660595 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" event={"ID":"3af890e6-9547-4a96-8719-a7599d1b1701","Type":"ContainerStarted","Data":"a216542fbb5e6883c5ea5f326e32538de556b3bd9ab6fad8f25875f42b06ce8b"} Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.660649 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" event={"ID":"3af890e6-9547-4a96-8719-a7599d1b1701","Type":"ContainerStarted","Data":"61294885fbd6e819e78ae84d309fe1c301eb30c1a003b84022a822dd7505b42e"} Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.662017 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.662848 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpkpl\" (UniqueName: \"kubernetes.io/projected/4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc-kube-api-access-wpkpl\") pod \"ingress-operator-5b745b69d9-rhsbf\" (UID: \"4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.663167 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-tm764" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.665262 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5z5f\" (UniqueName: \"kubernetes.io/projected/1825f935-ad5f-4d85-aeef-e368841c5547-kube-api-access-w5z5f\") pod \"etcd-operator-b45778765-x5npd\" (UID: \"1825f935-ad5f-4d85-aeef-e368841c5547\") " pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.668970 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-957m4\" (UniqueName: \"kubernetes.io/projected/48976c66-6d66-4d5b-bb2c-84f7b0cb292f-kube-api-access-957m4\") pod \"migrator-59844c95c7-46xcf\" (UID: \"48976c66-6d66-4d5b-bb2c-84f7b0cb292f\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-46xcf" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.668980 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6x9zh\" (UniqueName: \"kubernetes.io/projected/63e15a9d-3476-43c1-93e0-6453f0fc9adb-kube-api-access-6x9zh\") pod \"console-f9d7485db-gqpfp\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.671904 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbl9z\" (UniqueName: \"kubernetes.io/projected/8796a7df-de04-490e-b5a9-c9fad5483d61-kube-api-access-fbl9z\") pod \"control-plane-machine-set-operator-78cbb6b69f-kq95z\" (UID: \"8796a7df-de04-490e-b5a9-c9fad5483d61\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-kq95z" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.671923 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g96n4\" (UniqueName: \"kubernetes.io/projected/d24a8b16-9687-49f3-bed0-888340007876-kube-api-access-g96n4\") pod \"apiserver-7bbb656c7d-khtrj\" (UID: \"d24a8b16-9687-49f3-bed0-888340007876\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.676303 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdcg5\" (UniqueName: \"kubernetes.io/projected/3bd845c3-ce08-43a5-ad61-3fa92c8604fa-kube-api-access-mdcg5\") pod \"service-ca-operator-777779d784-rng6t\" (UID: \"3bd845c3-ce08-43a5-ad61-3fa92c8604fa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-rng6t" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.678154 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnrrs\" (UniqueName: \"kubernetes.io/projected/193f8e22-42fc-444d-92b6-7b44fcdc8200-kube-api-access-wnrrs\") pod \"catalog-operator-68c6474976-mppr6\" (UID: \"193f8e22-42fc-444d-92b6-7b44fcdc8200\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.679865 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-77xhn" event={"ID":"2860560e-929a-4dbe-84c0-23326bf7cdf4","Type":"ContainerStarted","Data":"b8e1a0aa85a30560321c033c8b42d857ce3e99a9f0108ea61f2ae0d27797dace"} Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.680023 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sn4l5\" (UniqueName: \"kubernetes.io/projected/44595319-f5f2-4db3-9671-9c8680c2dfc7-kube-api-access-sn4l5\") pod \"route-controller-manager-6576b87f9c-tzlk9\" (UID: \"44595319-f5f2-4db3-9671-9c8680c2dfc7\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.680536 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4a7dbd55-b911-42ff-a5e7-ceb61a071343-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-skfkj\" (UID: \"4a7dbd55-b911-42ff-a5e7-ceb61a071343\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skfkj" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.691273 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.704523 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkd78\" (UniqueName: \"kubernetes.io/projected/ae503948-5876-4b1e-ba9f-23ebb0e05b94-kube-api-access-pkd78\") pod \"collect-profiles-29484345-n7rrt\" (UID: \"ae503948-5876-4b1e-ba9f-23ebb0e05b94\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.704811 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9j8zq"] Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.707422 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-np4r6\" (UniqueName: \"kubernetes.io/projected/b2d54262-2b73-4ffa-80e6-beb88d8fa5b7-kube-api-access-np4r6\") pod \"dns-default-jhjlm\" (UID: \"b2d54262-2b73-4ffa-80e6-beb88d8fa5b7\") " pod="openshift-dns/dns-default-jhjlm" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.714496 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vtmlz\" (UniqueName: \"kubernetes.io/projected/5a8e383c-706b-43f4-ac19-2bc2e4e83115-kube-api-access-vtmlz\") pod \"machine-config-controller-84d6567774-pl49j\" (UID: \"5a8e383c-706b-43f4-ac19-2bc2e4e83115\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-pl49j" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.722385 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gck98\" (UniqueName: \"kubernetes.io/projected/5726f2e0-cd2a-4dd2-923d-8b27042e3223-kube-api-access-gck98\") pod \"machine-config-server-gzpgq\" (UID: \"5726f2e0-cd2a-4dd2-923d-8b27042e3223\") " pod="openshift-machine-config-operator/machine-config-server-gzpgq" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.734051 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.739700 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hhh8\" (UniqueName: \"kubernetes.io/projected/4bbb457e-b621-4447-ab9b-c3337ff62905-kube-api-access-9hhh8\") pod \"dns-operator-744455d44c-tnl5g\" (UID: \"4bbb457e-b621-4447-ab9b-c3337ff62905\") " pod="openshift-dns-operator/dns-operator-744455d44c-tnl5g" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.758475 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvb9n\" (UniqueName: \"kubernetes.io/projected/28216100-24cb-4a52-af53-0a6fe5b54e4e-kube-api-access-mvb9n\") pod \"kube-storage-version-migrator-operator-b67b599dd-lpvhh\" (UID: \"28216100-24cb-4a52-af53-0a6fe5b54e4e\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-lpvhh" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.760194 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.772191 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.777306 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5wbm\" (UniqueName: \"kubernetes.io/projected/efc95799-4d8b-4adf-9d64-28717c1bdd76-kube-api-access-r5wbm\") pod \"package-server-manager-789f6589d5-q2rvb\" (UID: \"efc95799-4d8b-4adf-9d64-28717c1bdd76\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-q2rvb" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.782056 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-lpvhh" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.787410 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xtgd\" (UniqueName: \"kubernetes.io/projected/3be04461-5ea1-4f2c-b87b-bf955dcd9bf1-kube-api-access-5xtgd\") pod \"ingress-canary-tm7xw\" (UID: \"3be04461-5ea1-4f2c-b87b-bf955dcd9bf1\") " pod="openshift-ingress-canary/ingress-canary-tm7xw" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.819782 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-kq95z" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.823276 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc-bound-sa-token\") pod \"ingress-operator-5b745b69d9-rhsbf\" (UID: \"4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.830991 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.838573 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-46xcf" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.839669 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.851998 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.856179 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q99gq\" (UniqueName: \"kubernetes.io/projected/2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7-kube-api-access-q99gq\") pod \"olm-operator-6b444d44fb-dr2tg\" (UID: \"2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.871435 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.871703 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-q2rvb" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.876004 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.879152 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.879387 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-pl49j" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.879509 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.888633 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-tm7xw" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.894204 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.898410 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-jhjlm" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.925039 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-tnl5g" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.925467 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skfkj" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.925658 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-gzpgq" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.937384 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 22 05:48:20 crc kubenswrapper[4933]: E0122 05:48:20.944777 4933 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: failed to sync configmap cache: timed out waiting for the condition Jan 22 05:48:20 crc kubenswrapper[4933]: E0122 05:48:20.944883 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 05:50:22.944860508 +0000 UTC m=+270.781985861 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : failed to sync configmap cache: timed out waiting for the condition Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.953737 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 22 05:48:20 crc kubenswrapper[4933]: E0122 05:48:20.956001 4933 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: failed to sync configmap cache: timed out waiting for the condition Jan 22 05:48:20 crc kubenswrapper[4933]: E0122 05:48:20.956141 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 05:50:22.956119941 +0000 UTC m=+270.793245294 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : failed to sync configmap cache: timed out waiting for the condition Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.956227 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.956452 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 22 05:48:20 crc kubenswrapper[4933]: I0122 05:48:20.968438 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-rng6t" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.069743 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/26b5f8af-bb33-40cc-8ef7-03b0c931896c-registry-tls\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.069784 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/beeaabac-5adb-4389-a41d-fcd84b8b7259-plugins-dir\") pod \"csi-hostpathplugin-48tlb\" (UID: \"beeaabac-5adb-4389-a41d-fcd84b8b7259\") " pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.069814 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/e1ccd71f-0b06-473b-a421-9e2744377453-signing-cabundle\") pod \"service-ca-9c57cc56f-94w5l\" (UID: \"e1ccd71f-0b06-473b-a421-9e2744377453\") " pod="openshift-service-ca/service-ca-9c57cc56f-94w5l" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.069828 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2-apiservice-cert\") pod \"packageserver-d55dfcdfc-zxdg4\" (UID: \"0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.069854 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/e1ccd71f-0b06-473b-a421-9e2744377453-signing-key\") pod \"service-ca-9c57cc56f-94w5l\" (UID: \"e1ccd71f-0b06-473b-a421-9e2744377453\") " pod="openshift-service-ca/service-ca-9c57cc56f-94w5l" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.069884 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.069900 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2-webhook-cert\") pod \"packageserver-d55dfcdfc-zxdg4\" (UID: \"0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.069951 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cb5qf\" (UniqueName: \"kubernetes.io/projected/beeaabac-5adb-4389-a41d-fcd84b8b7259-kube-api-access-cb5qf\") pod \"csi-hostpathplugin-48tlb\" (UID: \"beeaabac-5adb-4389-a41d-fcd84b8b7259\") " pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.069970 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/beeaabac-5adb-4389-a41d-fcd84b8b7259-socket-dir\") pod \"csi-hostpathplugin-48tlb\" (UID: \"beeaabac-5adb-4389-a41d-fcd84b8b7259\") " pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.070015 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/26b5f8af-bb33-40cc-8ef7-03b0c931896c-registry-certificates\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.070043 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f41c466d-c9ac-45c6-9bf4-fb0be3242267-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-t7f4w\" (UID: \"f41c466d-c9ac-45c6-9bf4-fb0be3242267\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-t7f4w" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.070059 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7gfx\" (UniqueName: \"kubernetes.io/projected/0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2-kube-api-access-w7gfx\") pod \"packageserver-d55dfcdfc-zxdg4\" (UID: \"0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.070098 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9l844\" (UniqueName: \"kubernetes.io/projected/e1ccd71f-0b06-473b-a421-9e2744377453-kube-api-access-9l844\") pod \"service-ca-9c57cc56f-94w5l\" (UID: \"e1ccd71f-0b06-473b-a421-9e2744377453\") " pod="openshift-service-ca/service-ca-9c57cc56f-94w5l" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.070167 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/beeaabac-5adb-4389-a41d-fcd84b8b7259-csi-data-dir\") pod \"csi-hostpathplugin-48tlb\" (UID: \"beeaabac-5adb-4389-a41d-fcd84b8b7259\") " pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.070193 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/26b5f8af-bb33-40cc-8ef7-03b0c931896c-installation-pull-secrets\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.070207 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gsr5\" (UniqueName: \"kubernetes.io/projected/26b5f8af-bb33-40cc-8ef7-03b0c931896c-kube-api-access-4gsr5\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.070221 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f41c466d-c9ac-45c6-9bf4-fb0be3242267-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-t7f4w\" (UID: \"f41c466d-c9ac-45c6-9bf4-fb0be3242267\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-t7f4w" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.070237 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/beeaabac-5adb-4389-a41d-fcd84b8b7259-mountpoint-dir\") pod \"csi-hostpathplugin-48tlb\" (UID: \"beeaabac-5adb-4389-a41d-fcd84b8b7259\") " pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.070252 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/beeaabac-5adb-4389-a41d-fcd84b8b7259-registration-dir\") pod \"csi-hostpathplugin-48tlb\" (UID: \"beeaabac-5adb-4389-a41d-fcd84b8b7259\") " pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.070278 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/26b5f8af-bb33-40cc-8ef7-03b0c931896c-ca-trust-extracted\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.070301 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f41c466d-c9ac-45c6-9bf4-fb0be3242267-config\") pod \"kube-apiserver-operator-766d6c64bb-t7f4w\" (UID: \"f41c466d-c9ac-45c6-9bf4-fb0be3242267\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-t7f4w" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.070324 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/26b5f8af-bb33-40cc-8ef7-03b0c931896c-trusted-ca\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.070336 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2-tmpfs\") pod \"packageserver-d55dfcdfc-zxdg4\" (UID: \"0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.070371 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/26b5f8af-bb33-40cc-8ef7-03b0c931896c-bound-sa-token\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: E0122 05:48:21.071196 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:21.571179887 +0000 UTC m=+149.408305240 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.078354 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.094169 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-74lmm"] Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.137534 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k"] Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.137577 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dgpsx"] Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.175703 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:21 crc kubenswrapper[4933]: E0122 05:48:21.175906 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:21.675882511 +0000 UTC m=+149.513007864 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.175944 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/26b5f8af-bb33-40cc-8ef7-03b0c931896c-bound-sa-token\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176052 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/26b5f8af-bb33-40cc-8ef7-03b0c931896c-registry-tls\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176104 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/beeaabac-5adb-4389-a41d-fcd84b8b7259-plugins-dir\") pod \"csi-hostpathplugin-48tlb\" (UID: \"beeaabac-5adb-4389-a41d-fcd84b8b7259\") " pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176144 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/e1ccd71f-0b06-473b-a421-9e2744377453-signing-cabundle\") pod \"service-ca-9c57cc56f-94w5l\" (UID: \"e1ccd71f-0b06-473b-a421-9e2744377453\") " pod="openshift-service-ca/service-ca-9c57cc56f-94w5l" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176162 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2-apiservice-cert\") pod \"packageserver-d55dfcdfc-zxdg4\" (UID: \"0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176228 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/e1ccd71f-0b06-473b-a421-9e2744377453-signing-key\") pod \"service-ca-9c57cc56f-94w5l\" (UID: \"e1ccd71f-0b06-473b-a421-9e2744377453\") " pod="openshift-service-ca/service-ca-9c57cc56f-94w5l" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176272 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176290 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2-webhook-cert\") pod \"packageserver-d55dfcdfc-zxdg4\" (UID: \"0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176326 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cb5qf\" (UniqueName: \"kubernetes.io/projected/beeaabac-5adb-4389-a41d-fcd84b8b7259-kube-api-access-cb5qf\") pod \"csi-hostpathplugin-48tlb\" (UID: \"beeaabac-5adb-4389-a41d-fcd84b8b7259\") " pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176359 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/beeaabac-5adb-4389-a41d-fcd84b8b7259-socket-dir\") pod \"csi-hostpathplugin-48tlb\" (UID: \"beeaabac-5adb-4389-a41d-fcd84b8b7259\") " pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176413 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/26b5f8af-bb33-40cc-8ef7-03b0c931896c-registry-certificates\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176446 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f41c466d-c9ac-45c6-9bf4-fb0be3242267-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-t7f4w\" (UID: \"f41c466d-c9ac-45c6-9bf4-fb0be3242267\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-t7f4w" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176464 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7gfx\" (UniqueName: \"kubernetes.io/projected/0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2-kube-api-access-w7gfx\") pod \"packageserver-d55dfcdfc-zxdg4\" (UID: \"0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176498 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9l844\" (UniqueName: \"kubernetes.io/projected/e1ccd71f-0b06-473b-a421-9e2744377453-kube-api-access-9l844\") pod \"service-ca-9c57cc56f-94w5l\" (UID: \"e1ccd71f-0b06-473b-a421-9e2744377453\") " pod="openshift-service-ca/service-ca-9c57cc56f-94w5l" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176593 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/beeaabac-5adb-4389-a41d-fcd84b8b7259-csi-data-dir\") pod \"csi-hostpathplugin-48tlb\" (UID: \"beeaabac-5adb-4389-a41d-fcd84b8b7259\") " pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176641 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/26b5f8af-bb33-40cc-8ef7-03b0c931896c-installation-pull-secrets\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176661 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4gsr5\" (UniqueName: \"kubernetes.io/projected/26b5f8af-bb33-40cc-8ef7-03b0c931896c-kube-api-access-4gsr5\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176686 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f41c466d-c9ac-45c6-9bf4-fb0be3242267-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-t7f4w\" (UID: \"f41c466d-c9ac-45c6-9bf4-fb0be3242267\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-t7f4w" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176708 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/beeaabac-5adb-4389-a41d-fcd84b8b7259-mountpoint-dir\") pod \"csi-hostpathplugin-48tlb\" (UID: \"beeaabac-5adb-4389-a41d-fcd84b8b7259\") " pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176745 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/beeaabac-5adb-4389-a41d-fcd84b8b7259-registration-dir\") pod \"csi-hostpathplugin-48tlb\" (UID: \"beeaabac-5adb-4389-a41d-fcd84b8b7259\") " pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176791 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/26b5f8af-bb33-40cc-8ef7-03b0c931896c-ca-trust-extracted\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176835 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f41c466d-c9ac-45c6-9bf4-fb0be3242267-config\") pod \"kube-apiserver-operator-766d6c64bb-t7f4w\" (UID: \"f41c466d-c9ac-45c6-9bf4-fb0be3242267\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-t7f4w" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176861 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/26b5f8af-bb33-40cc-8ef7-03b0c931896c-trusted-ca\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.176874 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2-tmpfs\") pod \"packageserver-d55dfcdfc-zxdg4\" (UID: \"0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.179473 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/beeaabac-5adb-4389-a41d-fcd84b8b7259-csi-data-dir\") pod \"csi-hostpathplugin-48tlb\" (UID: \"beeaabac-5adb-4389-a41d-fcd84b8b7259\") " pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.179722 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/beeaabac-5adb-4389-a41d-fcd84b8b7259-plugins-dir\") pod \"csi-hostpathplugin-48tlb\" (UID: \"beeaabac-5adb-4389-a41d-fcd84b8b7259\") " pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.180934 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/26b5f8af-bb33-40cc-8ef7-03b0c931896c-registry-certificates\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: E0122 05:48:21.182264 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:21.68225053 +0000 UTC m=+149.519375883 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.182326 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/beeaabac-5adb-4389-a41d-fcd84b8b7259-socket-dir\") pod \"csi-hostpathplugin-48tlb\" (UID: \"beeaabac-5adb-4389-a41d-fcd84b8b7259\") " pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.182597 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/beeaabac-5adb-4389-a41d-fcd84b8b7259-mountpoint-dir\") pod \"csi-hostpathplugin-48tlb\" (UID: \"beeaabac-5adb-4389-a41d-fcd84b8b7259\") " pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.183019 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/beeaabac-5adb-4389-a41d-fcd84b8b7259-registration-dir\") pod \"csi-hostpathplugin-48tlb\" (UID: \"beeaabac-5adb-4389-a41d-fcd84b8b7259\") " pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.183064 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/e1ccd71f-0b06-473b-a421-9e2744377453-signing-cabundle\") pod \"service-ca-9c57cc56f-94w5l\" (UID: \"e1ccd71f-0b06-473b-a421-9e2744377453\") " pod="openshift-service-ca/service-ca-9c57cc56f-94w5l" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.183452 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/26b5f8af-bb33-40cc-8ef7-03b0c931896c-ca-trust-extracted\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.183759 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2-tmpfs\") pod \"packageserver-d55dfcdfc-zxdg4\" (UID: \"0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.183910 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f41c466d-c9ac-45c6-9bf4-fb0be3242267-config\") pod \"kube-apiserver-operator-766d6c64bb-t7f4w\" (UID: \"f41c466d-c9ac-45c6-9bf4-fb0be3242267\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-t7f4w" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.184595 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/26b5f8af-bb33-40cc-8ef7-03b0c931896c-trusted-ca\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.234655 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f41c466d-c9ac-45c6-9bf4-fb0be3242267-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-t7f4w\" (UID: \"f41c466d-c9ac-45c6-9bf4-fb0be3242267\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-t7f4w" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.235319 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/26b5f8af-bb33-40cc-8ef7-03b0c931896c-bound-sa-token\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.278952 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:21 crc kubenswrapper[4933]: E0122 05:48:21.279486 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:21.779462189 +0000 UTC m=+149.616587542 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.301961 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/e1ccd71f-0b06-473b-a421-9e2744377453-signing-key\") pod \"service-ca-9c57cc56f-94w5l\" (UID: \"e1ccd71f-0b06-473b-a421-9e2744377453\") " pod="openshift-service-ca/service-ca-9c57cc56f-94w5l" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.302423 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2-apiservice-cert\") pod \"packageserver-d55dfcdfc-zxdg4\" (UID: \"0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.302486 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cb5qf\" (UniqueName: \"kubernetes.io/projected/beeaabac-5adb-4389-a41d-fcd84b8b7259-kube-api-access-cb5qf\") pod \"csi-hostpathplugin-48tlb\" (UID: \"beeaabac-5adb-4389-a41d-fcd84b8b7259\") " pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.302551 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/26b5f8af-bb33-40cc-8ef7-03b0c931896c-registry-tls\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.303690 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2-webhook-cert\") pod \"packageserver-d55dfcdfc-zxdg4\" (UID: \"0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.307567 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/26b5f8af-bb33-40cc-8ef7-03b0c931896c-installation-pull-secrets\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.313513 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f41c466d-c9ac-45c6-9bf4-fb0be3242267-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-t7f4w\" (UID: \"f41c466d-c9ac-45c6-9bf4-fb0be3242267\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-t7f4w" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.335718 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gsr5\" (UniqueName: \"kubernetes.io/projected/26b5f8af-bb33-40cc-8ef7-03b0c931896c-kube-api-access-4gsr5\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.336331 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9l844\" (UniqueName: \"kubernetes.io/projected/e1ccd71f-0b06-473b-a421-9e2744377453-kube-api-access-9l844\") pod \"service-ca-9c57cc56f-94w5l\" (UID: \"e1ccd71f-0b06-473b-a421-9e2744377453\") " pod="openshift-service-ca/service-ca-9c57cc56f-94w5l" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.336784 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7gfx\" (UniqueName: \"kubernetes.io/projected/0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2-kube-api-access-w7gfx\") pod \"packageserver-d55dfcdfc-zxdg4\" (UID: \"0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.380789 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: E0122 05:48:21.381130 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:21.881106092 +0000 UTC m=+149.718231445 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:21 crc kubenswrapper[4933]: W0122 05:48:21.394009 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddbf1986a_8c34_4d34_bbf0_eaaf9d89eb8c.slice/crio-0500004d9d8eb6ee69571f20eb04c3098d50a4ccf083a8f567b142aca20ff4a4 WatchSource:0}: Error finding container 0500004d9d8eb6ee69571f20eb04c3098d50a4ccf083a8f567b142aca20ff4a4: Status 404 returned error can't find the container with id 0500004d9d8eb6ee69571f20eb04c3098d50a4ccf083a8f567b142aca20ff4a4 Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.414598 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-t7f4w" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.415959 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.455347 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-94w5l" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.486086 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:21 crc kubenswrapper[4933]: E0122 05:48:21.486727 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:21.986707847 +0000 UTC m=+149.823833200 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.534856 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-48tlb" Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.587319 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: E0122 05:48:21.587612 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:22.087600602 +0000 UTC m=+149.924725945 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.696844 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:21 crc kubenswrapper[4933]: E0122 05:48:21.697136 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:22.197120059 +0000 UTC m=+150.034245412 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.736970 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-gzpgq" event={"ID":"5726f2e0-cd2a-4dd2-923d-8b27042e3223","Type":"ContainerStarted","Data":"1e04543553a00ea15cd6ebee83ce4a0bc16ddd93c01c9065142f1c4b4c5bfaf9"} Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.738154 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgnhf" event={"ID":"12969fb5-7977-4b64-baed-ad39d9524369","Type":"ContainerStarted","Data":"4140e481247c05219a2ae9785a797df052e47837b38b7d3ad6c8248cf72fa466"} Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.752226 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9j8zq" event={"ID":"6b02d39a-2e64-4035-abf2-99dcc7f32194","Type":"ContainerStarted","Data":"42fe9489368826b5ae2b92ea8e56bacb9b21238dbc22e68c494f1b2829122499"} Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.758286 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k" event={"ID":"1ba473a8-8e29-49fe-a1b8-8cc7e422037f","Type":"ContainerStarted","Data":"b60bf709b698834864b8d0d701ab8fbbcd383892fdb9959f7be8fd106aa5063c"} Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.759104 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dgpsx" event={"ID":"4c3867cf-bc74-4587-aa51-d30bf357f0f2","Type":"ContainerStarted","Data":"f27a59178dc4f42a71e9930d4265156e175e42d68d9b92adcca04e7d13312e05"} Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.759761 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-74lmm" event={"ID":"dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c","Type":"ContainerStarted","Data":"0500004d9d8eb6ee69571f20eb04c3098d50a4ccf083a8f567b142aca20ff4a4"} Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.760909 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-77xhn" event={"ID":"2860560e-929a-4dbe-84c0-23326bf7cdf4","Type":"ContainerStarted","Data":"d5fa29f071e5d8af4ae8fab49a82b8063a1db70369c4e01a30fb6a77c0dc7a24"} Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.797891 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:21 crc kubenswrapper[4933]: E0122 05:48:21.798330 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:22.298316451 +0000 UTC m=+150.135441804 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:21 crc kubenswrapper[4933]: I0122 05:48:21.910618 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:21 crc kubenswrapper[4933]: E0122 05:48:21.912681 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:22.412664831 +0000 UTC m=+150.249790184 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.019747 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:22 crc kubenswrapper[4933]: E0122 05:48:22.020065 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:22.520052037 +0000 UTC m=+150.357177400 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.121055 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:22 crc kubenswrapper[4933]: E0122 05:48:22.121405 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:22.621389043 +0000 UTC m=+150.458514396 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.127483 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.205154 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.228830 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:22 crc kubenswrapper[4933]: E0122 05:48:22.229745 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:22.729730592 +0000 UTC m=+150.566855945 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.332955 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:22 crc kubenswrapper[4933]: E0122 05:48:22.333418 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:22.833400092 +0000 UTC m=+150.670525445 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.435816 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:22 crc kubenswrapper[4933]: E0122 05:48:22.436142 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:22.93613039 +0000 UTC m=+150.773255733 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.556882 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:22 crc kubenswrapper[4933]: E0122 05:48:22.557167 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:23.057138535 +0000 UTC m=+150.894263888 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.557427 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:22 crc kubenswrapper[4933]: E0122 05:48:22.557738 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:23.057727069 +0000 UTC m=+150.894852422 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.657784 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:22 crc kubenswrapper[4933]: E0122 05:48:22.658023 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:23.157983599 +0000 UTC m=+150.995108982 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.707452 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-lpvhh"] Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.719535 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-fntp2"] Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.719580 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-2z9q7"] Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.769845 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:22 crc kubenswrapper[4933]: E0122 05:48:22.770221 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:23.270210109 +0000 UTC m=+151.107335462 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.780815 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-74lmm" event={"ID":"dbf1986a-8c34-4d34-bbf0-eaaf9d89eb8c","Type":"ContainerStarted","Data":"053a2fface2cab9572fb96bde948495d03d1aa60e2c2cd3611f0e4671491c07b"} Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.790248 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-gzpgq" event={"ID":"5726f2e0-cd2a-4dd2-923d-8b27042e3223","Type":"ContainerStarted","Data":"354c89921fd42f76e9b133e4efebbdbf2e1b4902e1ed039981b0b7a94a1a514a"} Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.807018 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgnhf" event={"ID":"12969fb5-7977-4b64-baed-ad39d9524369","Type":"ContainerStarted","Data":"754c9d5a23b04800638c229d5a16c943daee1a282d2ce53b026cb15e9317a4ee"} Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.809396 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9j8zq" event={"ID":"6b02d39a-2e64-4035-abf2-99dcc7f32194","Type":"ContainerStarted","Data":"aab05ecaa26364b487e369a9587de68da3f1f462b33de1c2701da91852afa1b3"} Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.811130 4933 generic.go:334] "Generic (PLEG): container finished" podID="1ba473a8-8e29-49fe-a1b8-8cc7e422037f" containerID="ab020a8e0b1b2ad548b72a229bd1636ef75ff55b08befb69079718f66471a3cc" exitCode=0 Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.811206 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k" event={"ID":"1ba473a8-8e29-49fe-a1b8-8cc7e422037f","Type":"ContainerDied","Data":"ab020a8e0b1b2ad548b72a229bd1636ef75ff55b08befb69079718f66471a3cc"} Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.836565 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dgpsx" event={"ID":"4c3867cf-bc74-4587-aa51-d30bf357f0f2","Type":"ContainerStarted","Data":"acbd0caf04595c326fc34670078c8b2daae0527a30b3b6693528d82e978e29c4"} Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.887573 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:22 crc kubenswrapper[4933]: E0122 05:48:22.887930 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:23.387906187 +0000 UTC m=+151.225031540 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.888289 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:22 crc kubenswrapper[4933]: E0122 05:48:22.890720 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:23.390700932 +0000 UTC m=+151.227826355 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.988676 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-77xhn" podStartSLOduration=131.988658008 podStartE2EDuration="2m11.988658008s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:22.922378022 +0000 UTC m=+150.759503375" watchObservedRunningTime="2026-01-22 05:48:22.988658008 +0000 UTC m=+150.825783361" Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.990685 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-74lmm" podStartSLOduration=132.990678295 podStartE2EDuration="2m12.990678295s" podCreationTimestamp="2026-01-22 05:46:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:22.988176868 +0000 UTC m=+150.825302221" watchObservedRunningTime="2026-01-22 05:48:22.990678295 +0000 UTC m=+150.827803648" Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.992008 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:22 crc kubenswrapper[4933]: E0122 05:48:22.992217 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:23.492193341 +0000 UTC m=+151.329318694 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:22 crc kubenswrapper[4933]: I0122 05:48:22.992316 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:22 crc kubenswrapper[4933]: E0122 05:48:22.992643 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:23.492635021 +0000 UTC m=+151.329760374 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.066947 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-gzpgq" podStartSLOduration=5.066927936 podStartE2EDuration="5.066927936s" podCreationTimestamp="2026-01-22 05:48:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:23.033052035 +0000 UTC m=+150.870177408" watchObservedRunningTime="2026-01-22 05:48:23.066927936 +0000 UTC m=+150.904053289" Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.067174 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" podStartSLOduration=132.067166191 podStartE2EDuration="2m12.067166191s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:23.064841536 +0000 UTC m=+150.901966889" watchObservedRunningTime="2026-01-22 05:48:23.067166191 +0000 UTC m=+150.904291544" Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.093159 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:23 crc kubenswrapper[4933]: E0122 05:48:23.093598 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:23.593583588 +0000 UTC m=+151.430708941 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.126604 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dgpsx" podStartSLOduration=132.126585899 podStartE2EDuration="2m12.126585899s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:23.124282204 +0000 UTC m=+150.961407567" watchObservedRunningTime="2026-01-22 05:48:23.126585899 +0000 UTC m=+150.963711252" Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.211144 4933 patch_prober.go:28] interesting pod/router-default-5444994796-77xhn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:48:23 crc kubenswrapper[4933]: [-]has-synced failed: reason withheld Jan 22 05:48:23 crc kubenswrapper[4933]: [+]process-running ok Jan 22 05:48:23 crc kubenswrapper[4933]: healthz check failed Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.211197 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-77xhn" podUID="2860560e-929a-4dbe-84c0-23326bf7cdf4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.212236 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:23 crc kubenswrapper[4933]: E0122 05:48:23.212555 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:23.712545625 +0000 UTC m=+151.549670978 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.318643 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:23 crc kubenswrapper[4933]: E0122 05:48:23.319312 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:23.819295266 +0000 UTC m=+151.656420619 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.419934 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:23 crc kubenswrapper[4933]: E0122 05:48:23.420414 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:23.920396606 +0000 UTC m=+151.757521959 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.532900 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:23 crc kubenswrapper[4933]: E0122 05:48:23.533102 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:24.033049377 +0000 UTC m=+151.870174730 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.533281 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:23 crc kubenswrapper[4933]: E0122 05:48:23.533598 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:24.033585769 +0000 UTC m=+151.870711122 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.626110 4933 patch_prober.go:28] interesting pod/router-default-5444994796-77xhn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:48:23 crc kubenswrapper[4933]: [-]has-synced failed: reason withheld Jan 22 05:48:23 crc kubenswrapper[4933]: [+]process-running ok Jan 22 05:48:23 crc kubenswrapper[4933]: healthz check failed Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.626162 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-77xhn" podUID="2860560e-929a-4dbe-84c0-23326bf7cdf4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.634430 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:23 crc kubenswrapper[4933]: E0122 05:48:23.634604 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:24.134581206 +0000 UTC m=+151.971706579 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.634725 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:23 crc kubenswrapper[4933]: E0122 05:48:23.634967 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:24.134959976 +0000 UTC m=+151.972085329 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.747912 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:23 crc kubenswrapper[4933]: E0122 05:48:23.748048 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:24.248030784 +0000 UTC m=+152.085156137 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.748242 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:23 crc kubenswrapper[4933]: E0122 05:48:23.748541 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:24.248530167 +0000 UTC m=+152.085655520 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.850339 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:23 crc kubenswrapper[4933]: E0122 05:48:23.850861 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:24.350847235 +0000 UTC m=+152.187972578 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.850980 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9j8zq" event={"ID":"6b02d39a-2e64-4035-abf2-99dcc7f32194","Type":"ContainerStarted","Data":"006b20f7fe3d7c61c5d16fa03e0a1c037f22c967630c645d4b6f4fc27d2a5258"} Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.853984 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-lpvhh" event={"ID":"28216100-24cb-4a52-af53-0a6fe5b54e4e","Type":"ContainerStarted","Data":"2a1105b1c35efee91ce1df776f3e8ed15aff5f8126056ad9a65303abc0fdbd7e"} Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.854010 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-lpvhh" event={"ID":"28216100-24cb-4a52-af53-0a6fe5b54e4e","Type":"ContainerStarted","Data":"adf82cc179e2fc9931c08f9478b9d6cb6afd648737f7919eb1d7713831906cb0"} Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.855056 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-fntp2" event={"ID":"99c9edc1-b1d6-459e-957f-aab91850d2e5","Type":"ContainerStarted","Data":"e23bccc127fe21422e5dcb829d99bda7b380193475879724cc65496d23621a1d"} Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.855105 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-fntp2" event={"ID":"99c9edc1-b1d6-459e-957f-aab91850d2e5","Type":"ContainerStarted","Data":"c512881aa1e5b0eb7157e3cf9dcdf23a54ef97f37949fcf5a5187d9c7a3e560f"} Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.855463 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-fntp2" Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.856465 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" event={"ID":"0f030cb7-e6bb-47bb-b4a4-821f4a21ad42","Type":"ContainerStarted","Data":"ee41ab6398d996fb0dbbaa97d8771f7a5b3effc3fbfa92d04bfafc6cf681cbd2"} Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.856504 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" event={"ID":"0f030cb7-e6bb-47bb-b4a4-821f4a21ad42","Type":"ContainerStarted","Data":"d464454a03cfb28335a226b83dd7c15b746b9d1bca0a53523f3c7f4c165ccd14"} Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.861426 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgnhf" event={"ID":"12969fb5-7977-4b64-baed-ad39d9524369","Type":"ContainerStarted","Data":"f0c1f8abee08dbc8b465a1ec71fb4bb125c733057f8afb6d556985f4057d52ba"} Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.868235 4933 patch_prober.go:28] interesting pod/console-operator-58897d9998-fntp2 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.17:8443/readyz\": dial tcp 10.217.0.17:8443: connect: connection refused" start-of-body= Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.868302 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-fntp2" podUID="99c9edc1-b1d6-459e-957f-aab91850d2e5" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.17:8443/readyz\": dial tcp 10.217.0.17:8443: connect: connection refused" Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.872158 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k" event={"ID":"1ba473a8-8e29-49fe-a1b8-8cc7e422037f","Type":"ContainerStarted","Data":"e8356f36013313d727904413e3dfeea3877d13aa5ff4475eca5cc6e49468282f"} Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.891304 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9j8zq" podStartSLOduration=132.891283529 podStartE2EDuration="2m12.891283529s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:23.880381505 +0000 UTC m=+151.717506858" watchObservedRunningTime="2026-01-22 05:48:23.891283529 +0000 UTC m=+151.728408882" Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.904277 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cgnhf" podStartSLOduration=133.904259962 podStartE2EDuration="2m13.904259962s" podCreationTimestamp="2026-01-22 05:46:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:23.901326244 +0000 UTC m=+151.738451597" watchObservedRunningTime="2026-01-22 05:48:23.904259962 +0000 UTC m=+151.741385315" Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.919994 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k" podStartSLOduration=132.919981119 podStartE2EDuration="2m12.919981119s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:23.919380735 +0000 UTC m=+151.756506088" watchObservedRunningTime="2026-01-22 05:48:23.919981119 +0000 UTC m=+151.757106472" Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.943786 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-lpvhh" podStartSLOduration=132.943771964 podStartE2EDuration="2m12.943771964s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:23.941352267 +0000 UTC m=+151.778477630" watchObservedRunningTime="2026-01-22 05:48:23.943771964 +0000 UTC m=+151.780897307" Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.952702 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:23 crc kubenswrapper[4933]: E0122 05:48:23.954580 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:24.454567506 +0000 UTC m=+152.291692859 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.989942 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-fntp2" podStartSLOduration=132.989921981 podStartE2EDuration="2m12.989921981s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:23.969835443 +0000 UTC m=+151.806960796" watchObservedRunningTime="2026-01-22 05:48:23.989921981 +0000 UTC m=+151.827047334" Jan 22 05:48:23 crc kubenswrapper[4933]: I0122 05:48:23.990462 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-2z9q7" podStartSLOduration=133.990456473 podStartE2EDuration="2m13.990456473s" podCreationTimestamp="2026-01-22 05:46:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:23.988855396 +0000 UTC m=+151.825980759" watchObservedRunningTime="2026-01-22 05:48:23.990456473 +0000 UTC m=+151.827581826" Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.000393 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-sk5qf"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.023253 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-6r979"] Jan 22 05:48:24 crc kubenswrapper[4933]: W0122 05:48:24.042378 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc800ab14_5d0a_4078_91f5_b47d05d15ccc.slice/crio-90577d59d55bb5ad0852e836d3ada3660c2532979aea1d167e10af1ec4c7ec3a WatchSource:0}: Error finding container 90577d59d55bb5ad0852e836d3ada3660c2532979aea1d167e10af1ec4c7ec3a: Status 404 returned error can't find the container with id 90577d59d55bb5ad0852e836d3ada3660c2532979aea1d167e10af1ec4c7ec3a Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.055886 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:24 crc kubenswrapper[4933]: E0122 05:48:24.056321 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:24.556306872 +0000 UTC m=+152.393432225 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.056350 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:24 crc kubenswrapper[4933]: E0122 05:48:24.056715 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:24.556707671 +0000 UTC m=+152.393833014 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.114215 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.158417 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:24 crc kubenswrapper[4933]: E0122 05:48:24.159045 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:24.659031059 +0000 UTC m=+152.496156412 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.208389 4933 patch_prober.go:28] interesting pod/router-default-5444994796-77xhn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:48:24 crc kubenswrapper[4933]: [-]has-synced failed: reason withheld Jan 22 05:48:24 crc kubenswrapper[4933]: [+]process-running ok Jan 22 05:48:24 crc kubenswrapper[4933]: healthz check failed Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.208440 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-77xhn" podUID="2860560e-929a-4dbe-84c0-23326bf7cdf4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.208768 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-t7f4w"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.224137 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-tm764"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.233102 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-kq95z"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.240367 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-46xcf"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.285464 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:24 crc kubenswrapper[4933]: E0122 05:48:24.288017 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:24.787924578 +0000 UTC m=+152.625049931 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.289336 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.311907 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.312061 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.387095 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:24 crc kubenswrapper[4933]: E0122 05:48:24.387540 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:24.887519923 +0000 UTC m=+152.724645276 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.395581 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-rng6t"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.412600 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-llbq4"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.413643 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-pl49j"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.423372 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zshgr"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.444192 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.494556 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:24 crc kubenswrapper[4933]: E0122 05:48:24.494858 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:24.994846079 +0000 UTC m=+152.831971432 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.505275 4933 csr.go:261] certificate signing request csr-pn47d is approved, waiting to be issued Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.517597 4933 csr.go:257] certificate signing request csr-pn47d is issued Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.522911 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-tnl5g"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.543183 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-94w5l"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.548501 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.578240 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.595555 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:24 crc kubenswrapper[4933]: E0122 05:48:24.595918 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:25.095903407 +0000 UTC m=+152.933028760 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.607629 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vb2np"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.607678 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-gqpfp"] Jan 22 05:48:24 crc kubenswrapper[4933]: W0122 05:48:24.628652 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0aaa5850_80d2_4e5c_bf1e_8115fe8b19a2.slice/crio-8284ce761009b71211c4b4a9cd1c6dcd772aff1d452b78aea00b6c2d1af9a0c4 WatchSource:0}: Error finding container 8284ce761009b71211c4b4a9cd1c6dcd772aff1d452b78aea00b6c2d1af9a0c4: Status 404 returned error can't find the container with id 8284ce761009b71211c4b4a9cd1c6dcd772aff1d452b78aea00b6c2d1af9a0c4 Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.633014 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-jhjlm"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.633059 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.634172 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gj98r"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.661958 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-tm7xw"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.661999 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-48tlb"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.696490 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:24 crc kubenswrapper[4933]: E0122 05:48:24.696766 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:25.196754902 +0000 UTC m=+153.033880255 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.704765 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skfkj"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.765793 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-x5npd"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.766922 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.799222 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-q2rvb"] Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.800851 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:24 crc kubenswrapper[4933]: E0122 05:48:24.800901 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:25.300887552 +0000 UTC m=+153.138012895 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.803604 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:24 crc kubenswrapper[4933]: E0122 05:48:24.803973 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:25.303960655 +0000 UTC m=+153.141086008 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.904535 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:24 crc kubenswrapper[4933]: E0122 05:48:24.904829 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:25.404814209 +0000 UTC m=+153.241939562 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.943481 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-rng6t" event={"ID":"3bd845c3-ce08-43a5-ad61-3fa92c8604fa","Type":"ContainerStarted","Data":"d1e904e9593c954a2b0b1e7b5d1a82f7d7e58efe3dc70cd48863627f8ee3073d"} Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.949170 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-48tlb" event={"ID":"beeaabac-5adb-4389-a41d-fcd84b8b7259","Type":"ContainerStarted","Data":"0f73e5f3bc8a9428a50f4840f7fa609e76f3fac05073c40b6c085e160f9ebeb6"} Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.951808 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf" event={"ID":"4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc","Type":"ContainerStarted","Data":"40882bd93eceb1719f6921b329e778d1973080d4ef6c8150a83928eeb886ab7e"} Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.953315 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skfkj" event={"ID":"4a7dbd55-b911-42ff-a5e7-ceb61a071343","Type":"ContainerStarted","Data":"a6709c2270e41ffd8361e02fec36a715c2d5718ec1448129464b024f6cf82bed"} Jan 22 05:48:24 crc kubenswrapper[4933]: W0122 05:48:24.969294 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podefc95799_4d8b_4adf_9d64_28717c1bdd76.slice/crio-26a4d11a04897cc6a1c97ced0e779f1a8fc0aeae70a7d80451a1ebfffc3efa87 WatchSource:0}: Error finding container 26a4d11a04897cc6a1c97ced0e779f1a8fc0aeae70a7d80451a1ebfffc3efa87: Status 404 returned error can't find the container with id 26a4d11a04897cc6a1c97ced0e779f1a8fc0aeae70a7d80451a1ebfffc3efa87 Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.991915 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-tm764" event={"ID":"80358407-ad1b-499f-868f-44e3388b0fac","Type":"ContainerStarted","Data":"2e6ac5ecc91d1f837f4ec0e2c49af2102481db317d06534e072d3f39a1594196"} Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.991952 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-tm764" event={"ID":"80358407-ad1b-499f-868f-44e3388b0fac","Type":"ContainerStarted","Data":"456b665f92ccbfe853ae6ad28d5229ddbbf2d2fc894f1101f06e6e4866f06103"} Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.995918 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-tm764" Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.996965 4933 patch_prober.go:28] interesting pod/downloads-7954f5f757-tm764 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" start-of-body= Jan 22 05:48:24 crc kubenswrapper[4933]: I0122 05:48:24.997038 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-tm764" podUID="80358407-ad1b-499f-868f-44e3388b0fac" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.004663 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-kq95z" event={"ID":"8796a7df-de04-490e-b5a9-c9fad5483d61","Type":"ContainerStarted","Data":"36e2e688f5fe114f74c096c262fa21cc42a956949d0e42c30e402a5c86eb6df0"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.004704 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-kq95z" event={"ID":"8796a7df-de04-490e-b5a9-c9fad5483d61","Type":"ContainerStarted","Data":"5a5a3f82c766604165df58752a8c3047919d685fd5f9dce647d4d6393af56389"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.005468 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:25 crc kubenswrapper[4933]: E0122 05:48:25.005712 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:25.505701434 +0000 UTC m=+153.342826787 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.016061 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-tm764" podStartSLOduration=134.016044595 podStartE2EDuration="2m14.016044595s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:25.014640672 +0000 UTC m=+152.851766035" watchObservedRunningTime="2026-01-22 05:48:25.016044595 +0000 UTC m=+152.853169938" Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.040887 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-gqpfp" event={"ID":"63e15a9d-3476-43c1-93e0-6453f0fc9adb","Type":"ContainerStarted","Data":"ccfc5285ff70506f1eaffb8a8c7cf6110ceff3040a35c9d5e1943feb1092ff66"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.062275 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-kq95z" podStartSLOduration=134.062257214 podStartE2EDuration="2m14.062257214s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:25.062015838 +0000 UTC m=+152.899141201" watchObservedRunningTime="2026-01-22 05:48:25.062257214 +0000 UTC m=+152.899382567" Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.107057 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:25 crc kubenswrapper[4933]: E0122 05:48:25.108123 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:25.608101214 +0000 UTC m=+153.445226617 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.158660 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx" event={"ID":"b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8","Type":"ContainerStarted","Data":"a2b46a70dec57fa96e7b542f6649a1c4bdd8eba93a27b9ee0684bc7df1a3e7e3"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.164634 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-pl49j" event={"ID":"5a8e383c-706b-43f4-ac19-2bc2e4e83115","Type":"ContainerStarted","Data":"eac0d223264cd74efe8e8dc80aa00d54976b721399af1ecf25e0f51cc442f7ae"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.173434 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-6r979" event={"ID":"c800ab14-5d0a-4078-91f5-b47d05d15ccc","Type":"ContainerStarted","Data":"18beaf1fcc10460430d3ca33a4b21109d3e3e65429cbe961144d79ff028864bc"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.173480 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-6r979" event={"ID":"c800ab14-5d0a-4078-91f5-b47d05d15ccc","Type":"ContainerStarted","Data":"90577d59d55bb5ad0852e836d3ada3660c2532979aea1d167e10af1ec4c7ec3a"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.175776 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zshgr" event={"ID":"3c7d8ca9-574e-4b56-8ed6-b9155509a740","Type":"ContainerStarted","Data":"60f1df53471cd3d0e01db44b20a61d0f2f690a9ed8bbed72bbfd09d4b2f25430"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.176575 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" event={"ID":"2c4864a0-5981-4eef-a0db-c33a535e02de","Type":"ContainerStarted","Data":"9afbe5332bbb521deaa995917ae79f51154ec419d0ef122d54e9879f4a01b79b"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.177320 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-tm7xw" event={"ID":"3be04461-5ea1-4f2c-b87b-bf955dcd9bf1","Type":"ContainerStarted","Data":"c84394266b71016f04d526d6dc29908d6c565a1aab5372f2e973394520e91fb7"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.179384 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jhjlm" event={"ID":"b2d54262-2b73-4ffa-80e6-beb88d8fa5b7","Type":"ContainerStarted","Data":"785967afdf668f2342fc6ecaef7350bee6c8a8c3d516479a11dbc72fa820e0ac"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.180524 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" event={"ID":"d24a8b16-9687-49f3-bed0-888340007876","Type":"ContainerStarted","Data":"6a3b85bb50eae63a90ffd9921c8be27406687bea94d7f9d71d96f101e262ea86"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.181220 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" event={"ID":"245c05e3-0c9d-4b20-8bef-b16bb0b492c1","Type":"ContainerStarted","Data":"67252655a871b4a981865fcc0234ec1317250be407ebcd390e47bb87cf977ee5"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.187387 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg" event={"ID":"2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7","Type":"ContainerStarted","Data":"2b950bff820e804c31e953d966ea13795c98470806d3d96ac4d68c2eb141000c"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.224746 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:25 crc kubenswrapper[4933]: E0122 05:48:25.225706 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:25.725688039 +0000 UTC m=+153.562813432 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.259806 4933 patch_prober.go:28] interesting pod/router-default-5444994796-77xhn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:48:25 crc kubenswrapper[4933]: [-]has-synced failed: reason withheld Jan 22 05:48:25 crc kubenswrapper[4933]: [+]process-running ok Jan 22 05:48:25 crc kubenswrapper[4933]: healthz check failed Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.259860 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-77xhn" podUID="2860560e-929a-4dbe-84c0-23326bf7cdf4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.262365 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-llbq4" event={"ID":"8bdbecf3-1fbc-4184-b33b-94031b7e3845","Type":"ContainerStarted","Data":"3db14a7a10d48ed372dcf7ecfaaac7d10ce10d274852e430b6413467911caacd"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.319730 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc" event={"ID":"2edc4443-850c-48d0-a605-3debd5d38299","Type":"ContainerStarted","Data":"3178a5cebb0127a263b1f55b76dea284d25c2ac298d7880df1b0cc2f3e533ece"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.319974 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc" event={"ID":"2edc4443-850c-48d0-a605-3debd5d38299","Type":"ContainerStarted","Data":"f43da691706ffa53bdd2559a4c0664380b12db0b0e546c725cd118e5f9d2cb0a"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.322936 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-sk5qf" event={"ID":"5f757f81-9a44-488a-8a60-4814d2bc418d","Type":"ContainerStarted","Data":"98c9df1cf56501be045d8a87a6152776d8aaba6dca75bbe59a3318a587e9be4d"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.322960 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-sk5qf" event={"ID":"5f757f81-9a44-488a-8a60-4814d2bc418d","Type":"ContainerStarted","Data":"c43a27e908fe98ed1a01d18a4ec590bfaf75b7f47644ca030eeb9008ca511f46"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.325443 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:25 crc kubenswrapper[4933]: E0122 05:48:25.325572 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:25.82555433 +0000 UTC m=+153.662679683 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.325864 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:25 crc kubenswrapper[4933]: E0122 05:48:25.326227 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:25.826219466 +0000 UTC m=+153.663344819 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.326948 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" event={"ID":"44595319-f5f2-4db3-9671-9c8680c2dfc7","Type":"ContainerStarted","Data":"6d32637900b1a1c19ced0771c46d6d20697304dbe8d93d0a7d36597627c65d6d"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.326983 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.329296 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" event={"ID":"ae503948-5876-4b1e-ba9f-23ebb0e05b94","Type":"ContainerStarted","Data":"a9be9efeb63748b0eb39b65860041f7e99005e81a6b9aa006a5eb88d57392450"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.352637 4933 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-tzlk9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.352677 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" podUID="44595319-f5f2-4db3-9671-9c8680c2dfc7" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.400776 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-6r979" podStartSLOduration=134.400759716 podStartE2EDuration="2m14.400759716s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:25.236151633 +0000 UTC m=+153.073277006" watchObservedRunningTime="2026-01-22 05:48:25.400759716 +0000 UTC m=+153.237885069" Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.401740 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8ctmc" podStartSLOduration=134.401733918 podStartE2EDuration="2m14.401733918s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:25.400244203 +0000 UTC m=+153.237369556" watchObservedRunningTime="2026-01-22 05:48:25.401733918 +0000 UTC m=+153.238859271" Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.463332 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-94w5l" event={"ID":"e1ccd71f-0b06-473b-a421-9e2744377453","Type":"ContainerStarted","Data":"db2c60b915163bfe157be6b1f76a5c74317e7434cc9072c4cfa32a6f1f8dba78"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.463513 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:25 crc kubenswrapper[4933]: E0122 05:48:25.464199 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:25.964180806 +0000 UTC m=+153.801306159 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.464298 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" podStartSLOduration=134.464287298 podStartE2EDuration="2m14.464287298s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:25.463690455 +0000 UTC m=+153.300815808" watchObservedRunningTime="2026-01-22 05:48:25.464287298 +0000 UTC m=+153.301412661" Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.521202 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-22 05:43:24 +0000 UTC, rotation deadline is 2026-10-13 11:56:50.294139139 +0000 UTC Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.521239 4933 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 6342h8m24.772902921s for next certificate rotation Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.544387 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-tnl5g" event={"ID":"4bbb457e-b621-4447-ab9b-c3337ff62905","Type":"ContainerStarted","Data":"2df8a90d013a4f91dfc98213c28441ecd43d4ee95f09a6e941a4b0ce39f1cd71"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.559442 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" event={"ID":"0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2","Type":"ContainerStarted","Data":"8284ce761009b71211c4b4a9cd1c6dcd772aff1d452b78aea00b6c2d1af9a0c4"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.562156 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-t7f4w" event={"ID":"f41c466d-c9ac-45c6-9bf4-fb0be3242267","Type":"ContainerStarted","Data":"49ec06868892a377ea0b3f6bd5addd94854f65899fe9067a502ebedfbdf6c99d"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.564816 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-46xcf" event={"ID":"48976c66-6d66-4d5b-bb2c-84f7b0cb292f","Type":"ContainerStarted","Data":"3bd910f1358231b9eb3630951d3ea3d6e243aa05d3b3510633b88e1d2d6c9b80"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.564839 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-46xcf" event={"ID":"48976c66-6d66-4d5b-bb2c-84f7b0cb292f","Type":"ContainerStarted","Data":"b35cf84758f7bed89e9bbba025f6771556c313befaba166e6353f009dec0abda"} Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.566516 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k" Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.569084 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:25 crc kubenswrapper[4933]: E0122 05:48:25.570414 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:26.070399566 +0000 UTC m=+153.907524919 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.573545 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-fntp2" Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.634466 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" podStartSLOduration=134.634450491 podStartE2EDuration="2m14.634450491s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:25.491931874 +0000 UTC m=+153.329057227" watchObservedRunningTime="2026-01-22 05:48:25.634450491 +0000 UTC m=+153.471575844" Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.676122 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:25 crc kubenswrapper[4933]: E0122 05:48:25.676440 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:26.176424911 +0000 UTC m=+154.013550264 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.681336 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-t7f4w" podStartSLOduration=134.681313804 podStartE2EDuration="2m14.681313804s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:25.634174374 +0000 UTC m=+153.471299727" watchObservedRunningTime="2026-01-22 05:48:25.681313804 +0000 UTC m=+153.518439157" Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.777372 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:25 crc kubenswrapper[4933]: E0122 05:48:25.778132 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:26.278107704 +0000 UTC m=+154.115233057 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:25 crc kubenswrapper[4933]: I0122 05:48:25.882389 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:25 crc kubenswrapper[4933]: E0122 05:48:25.882781 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:26.382763177 +0000 UTC m=+154.219888530 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:25.984291 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:26 crc kubenswrapper[4933]: E0122 05:48:25.984603 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:26.484591634 +0000 UTC m=+154.321716987 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.084788 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:26 crc kubenswrapper[4933]: E0122 05:48:26.085327 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:26.585309376 +0000 UTC m=+154.422434729 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.189181 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:26 crc kubenswrapper[4933]: E0122 05:48:26.189661 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:26.689648531 +0000 UTC m=+154.526773884 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.208418 4933 patch_prober.go:28] interesting pod/router-default-5444994796-77xhn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:48:26 crc kubenswrapper[4933]: [-]has-synced failed: reason withheld Jan 22 05:48:26 crc kubenswrapper[4933]: [+]process-running ok Jan 22 05:48:26 crc kubenswrapper[4933]: healthz check failed Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.208473 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-77xhn" podUID="2860560e-929a-4dbe-84c0-23326bf7cdf4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.213485 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-jtr4k" Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.306533 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:26 crc kubenswrapper[4933]: E0122 05:48:26.306965 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:26.806949369 +0000 UTC m=+154.644074722 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.422579 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:26 crc kubenswrapper[4933]: E0122 05:48:26.422870 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:26.922857005 +0000 UTC m=+154.759982358 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.540388 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:26 crc kubenswrapper[4933]: E0122 05:48:26.540994 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:27.040979362 +0000 UTC m=+154.878104715 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.653534 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:26 crc kubenswrapper[4933]: E0122 05:48:26.653884 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:27.153873288 +0000 UTC m=+154.990998631 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.667098 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-6r979" event={"ID":"c800ab14-5d0a-4078-91f5-b47d05d15ccc","Type":"ContainerStarted","Data":"633adbe99e93f2871479d797320c0aad95874b72b362ca17cc81c167184e4306"} Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.668593 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" event={"ID":"ae503948-5876-4b1e-ba9f-23ebb0e05b94","Type":"ContainerStarted","Data":"7eb8cf5f766b8084fb5d0a37be26d574a884da83588b699a614e4561c39f4d54"} Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.669975 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jhjlm" event={"ID":"b2d54262-2b73-4ffa-80e6-beb88d8fa5b7","Type":"ContainerStarted","Data":"6d265e6c698fe0cd194207ea11669a730784bc5f7889f91d1535b10a0042d73a"} Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.711924 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-rng6t" event={"ID":"3bd845c3-ce08-43a5-ad61-3fa92c8604fa","Type":"ContainerStarted","Data":"6bb5513b08835363fb180c085d0aecc71b5b07931dff48a1725c3b2c97d4bffa"} Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.723627 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-q2rvb" event={"ID":"efc95799-4d8b-4adf-9d64-28717c1bdd76","Type":"ContainerStarted","Data":"26a4d11a04897cc6a1c97ced0e779f1a8fc0aeae70a7d80451a1ebfffc3efa87"} Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.755577 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:26 crc kubenswrapper[4933]: E0122 05:48:26.756873 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:27.256852881 +0000 UTC m=+155.093978244 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.768467 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-rng6t" podStartSLOduration=135.768449493 podStartE2EDuration="2m15.768449493s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:26.766835554 +0000 UTC m=+154.603960927" watchObservedRunningTime="2026-01-22 05:48:26.768449493 +0000 UTC m=+154.605574846" Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.834178 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" event={"ID":"0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2","Type":"ContainerStarted","Data":"43c18081e67500d6fe2bd969eacf5853913b86ac7dbaea7895398bc51462abaf"} Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.835227 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.841096 4933 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zxdg4 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.23:5443/healthz\": dial tcp 10.217.0.23:5443: connect: connection refused" start-of-body= Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.841138 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" podUID="0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.23:5443/healthz\": dial tcp 10.217.0.23:5443: connect: connection refused" Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.856242 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-pl49j" event={"ID":"5a8e383c-706b-43f4-ac19-2bc2e4e83115","Type":"ContainerStarted","Data":"e964d0c26a8943311eb9f7a449ca852d3b6b57e9627e7f0fc2f1bb25fa9d5cdd"} Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.857104 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:26 crc kubenswrapper[4933]: E0122 05:48:26.857680 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:27.357661604 +0000 UTC m=+155.194787047 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.862303 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-t7f4w" event={"ID":"f41c466d-c9ac-45c6-9bf4-fb0be3242267","Type":"ContainerStarted","Data":"97774578c46e8bc677a7cf5b631a601c3e3344119ae60a1539d07219ec4f5147"} Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.863925 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" event={"ID":"2c4864a0-5981-4eef-a0db-c33a535e02de","Type":"ContainerStarted","Data":"e6931cc142b2e1c5f3a3175149720060774695b264fe43588a371489467574bf"} Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.864531 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.867064 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf" event={"ID":"4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc","Type":"ContainerStarted","Data":"dc51f3a07607cf1828c824a89f8b0ae86d121440e500634e02e380988d258ec0"} Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.875273 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx" event={"ID":"b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8","Type":"ContainerStarted","Data":"08033b32b5952cea3d8cfdd0f7d9394b4d19e26baa8436ae155745ecae45cc48"} Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.876757 4933 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-gj98r container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.18:6443/healthz\": dial tcp 10.217.0.18:6443: connect: connection refused" start-of-body= Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.876817 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" podUID="2c4864a0-5981-4eef-a0db-c33a535e02de" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.18:6443/healthz\": dial tcp 10.217.0.18:6443: connect: connection refused" Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.885434 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" podStartSLOduration=135.885419183 podStartE2EDuration="2m15.885419183s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:26.884592894 +0000 UTC m=+154.721718257" watchObservedRunningTime="2026-01-22 05:48:26.885419183 +0000 UTC m=+154.722544536" Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.894052 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" event={"ID":"44595319-f5f2-4db3-9671-9c8680c2dfc7","Type":"ContainerStarted","Data":"e30d8f80c85f5e587204ffacd864dfbbd2eaee05f711406e5fe051e7b167636b"} Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.941800 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" podStartSLOduration=136.941778408 podStartE2EDuration="2m16.941778408s" podCreationTimestamp="2026-01-22 05:46:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:26.941376019 +0000 UTC m=+154.778501382" watchObservedRunningTime="2026-01-22 05:48:26.941778408 +0000 UTC m=+154.778903771" Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.952866 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-94w5l" event={"ID":"e1ccd71f-0b06-473b-a421-9e2744377453","Type":"ContainerStarted","Data":"61a19eafd66f666a53bc71ca74c50a25f8e22e0cf0d58d53c9bcf30e2ab710d0"} Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.967873 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-sk5qf" event={"ID":"5f757f81-9a44-488a-8a60-4814d2bc418d","Type":"ContainerStarted","Data":"0ead213edc8323e29dfa524b2716945264da44ff3d65f5603943df6b373b5165"} Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.971883 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:26 crc kubenswrapper[4933]: E0122 05:48:26.971962 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:27.471945373 +0000 UTC m=+155.309070726 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.972674 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.975220 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" event={"ID":"1825f935-ad5f-4d85-aeef-e368841c5547","Type":"ContainerStarted","Data":"65bbb19872a1e43e9b111421a72cecbb3a0d6a66c3e55e02bdeaed4e60ddf349"} Jan 22 05:48:26 crc kubenswrapper[4933]: E0122 05:48:26.976383 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:27.476368016 +0000 UTC m=+155.313493459 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.981646 4933 generic.go:334] "Generic (PLEG): container finished" podID="d24a8b16-9687-49f3-bed0-888340007876" containerID="7085bb4ff4ae071a2c45194842c049bc7b91784b248743677ffb731eafd5cafe" exitCode=0 Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.981698 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" event={"ID":"d24a8b16-9687-49f3-bed0-888340007876","Type":"ContainerDied","Data":"7085bb4ff4ae071a2c45194842c049bc7b91784b248743677ffb731eafd5cafe"} Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.984747 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" event={"ID":"193f8e22-42fc-444d-92b6-7b44fcdc8200","Type":"ContainerStarted","Data":"11131dda82c99ff405567e004f3491be8522fcba3ff0d849419a4a42c43467a5"} Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.985272 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.986686 4933 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-mppr6 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.986789 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" podUID="193f8e22-42fc-444d-92b6-7b44fcdc8200" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.987320 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg" event={"ID":"2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7","Type":"ContainerStarted","Data":"5ae3bead11cb9946e397b7730fd807a484ded118ba853bd63eafd2a66d85394f"} Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.988284 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg" Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.989524 4933 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-dr2tg container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.43:8443/healthz\": dial tcp 10.217.0.43:8443: connect: connection refused" start-of-body= Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.989551 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg" podUID="2a0dd9a3-0af3-4ee5-bbad-7ee1126de8b7" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.43:8443/healthz\": dial tcp 10.217.0.43:8443: connect: connection refused" Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.990699 4933 generic.go:334] "Generic (PLEG): container finished" podID="8bdbecf3-1fbc-4184-b33b-94031b7e3845" containerID="f5e0675536875180cdc9c675326d6525fb9d598eff4546cd232542b748da9c38" exitCode=0 Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.991565 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-llbq4" event={"ID":"8bdbecf3-1fbc-4184-b33b-94031b7e3845","Type":"ContainerDied","Data":"f5e0675536875180cdc9c675326d6525fb9d598eff4546cd232542b748da9c38"} Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.995357 4933 patch_prober.go:28] interesting pod/downloads-7954f5f757-tm764 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" start-of-body= Jan 22 05:48:26 crc kubenswrapper[4933]: I0122 05:48:26.995402 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-tm764" podUID="80358407-ad1b-499f-868f-44e3388b0fac" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" Jan 22 05:48:27 crc kubenswrapper[4933]: I0122 05:48:27.073469 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:27 crc kubenswrapper[4933]: E0122 05:48:27.073831 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:27.573815001 +0000 UTC m=+155.410940354 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:27 crc kubenswrapper[4933]: I0122 05:48:27.074436 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:27 crc kubenswrapper[4933]: E0122 05:48:27.077171 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:27.577156509 +0000 UTC m=+155.414281862 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:27 crc kubenswrapper[4933]: I0122 05:48:27.077975 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-94w5l" podStartSLOduration=136.077958318 podStartE2EDuration="2m16.077958318s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:27.072707675 +0000 UTC m=+154.909833038" watchObservedRunningTime="2026-01-22 05:48:27.077958318 +0000 UTC m=+154.915083681" Jan 22 05:48:27 crc kubenswrapper[4933]: I0122 05:48:27.167962 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg" podStartSLOduration=136.167946038 podStartE2EDuration="2m16.167946038s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:27.132296605 +0000 UTC m=+154.969421958" watchObservedRunningTime="2026-01-22 05:48:27.167946038 +0000 UTC m=+155.005071391" Jan 22 05:48:27 crc kubenswrapper[4933]: I0122 05:48:27.176180 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:27 crc kubenswrapper[4933]: E0122 05:48:27.176703 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:27.676683162 +0000 UTC m=+155.513808515 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:27 crc kubenswrapper[4933]: I0122 05:48:27.209217 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-sk5qf" podStartSLOduration=136.209195661 podStartE2EDuration="2m16.209195661s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:27.208798182 +0000 UTC m=+155.045923535" watchObservedRunningTime="2026-01-22 05:48:27.209195661 +0000 UTC m=+155.046321014" Jan 22 05:48:27 crc kubenswrapper[4933]: I0122 05:48:27.213973 4933 patch_prober.go:28] interesting pod/router-default-5444994796-77xhn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:48:27 crc kubenswrapper[4933]: [-]has-synced failed: reason withheld Jan 22 05:48:27 crc kubenswrapper[4933]: [+]process-running ok Jan 22 05:48:27 crc kubenswrapper[4933]: healthz check failed Jan 22 05:48:27 crc kubenswrapper[4933]: I0122 05:48:27.214032 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-77xhn" podUID="2860560e-929a-4dbe-84c0-23326bf7cdf4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:48:27 crc kubenswrapper[4933]: I0122 05:48:27.280591 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:27 crc kubenswrapper[4933]: E0122 05:48:27.280914 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:27.780899105 +0000 UTC m=+155.618024458 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:27 crc kubenswrapper[4933]: I0122 05:48:27.353483 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" podStartSLOduration=136.353463848 podStartE2EDuration="2m16.353463848s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:27.352702201 +0000 UTC m=+155.189827554" watchObservedRunningTime="2026-01-22 05:48:27.353463848 +0000 UTC m=+155.190589211" Jan 22 05:48:27 crc kubenswrapper[4933]: I0122 05:48:27.382174 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:27 crc kubenswrapper[4933]: E0122 05:48:27.382601 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:27.882583758 +0000 UTC m=+155.719709111 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:27 crc kubenswrapper[4933]: I0122 05:48:27.488573 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:27 crc kubenswrapper[4933]: E0122 05:48:27.488926 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:27.98891203 +0000 UTC m=+155.826037383 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:27 crc kubenswrapper[4933]: I0122 05:48:27.536090 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" Jan 22 05:48:27 crc kubenswrapper[4933]: I0122 05:48:27.601264 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:27 crc kubenswrapper[4933]: E0122 05:48:27.601775 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:28.101754385 +0000 UTC m=+155.938879738 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:27 crc kubenswrapper[4933]: I0122 05:48:27.702882 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:27 crc kubenswrapper[4933]: E0122 05:48:27.703442 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:28.203431108 +0000 UTC m=+156.040556451 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:27 crc kubenswrapper[4933]: I0122 05:48:27.808243 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:27 crc kubenswrapper[4933]: E0122 05:48:27.808624 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:28.308610283 +0000 UTC m=+156.145735636 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:27 crc kubenswrapper[4933]: I0122 05:48:27.930999 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:27 crc kubenswrapper[4933]: E0122 05:48:27.931382 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:28.431364119 +0000 UTC m=+156.268489472 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.018612 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" event={"ID":"1825f935-ad5f-4d85-aeef-e368841c5547","Type":"ContainerStarted","Data":"a02ead44c9230b7bdb7ae847a3c8c637914f0bf24ce58dfadf07b92ae20cdbb8"} Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.021321 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zshgr" event={"ID":"3c7d8ca9-574e-4b56-8ed6-b9155509a740","Type":"ContainerStarted","Data":"79c2f5ca12d7745b648a21ba3805e81158c38fc1279e7648c4b0896ff8c42f38"} Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.023145 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-46xcf" event={"ID":"48976c66-6d66-4d5b-bb2c-84f7b0cb292f","Type":"ContainerStarted","Data":"d08c7b4b8a25793732c5090f6de1b2e287b3b758a1443a00fbe57c662ae9bdd9"} Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.031617 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:28 crc kubenswrapper[4933]: E0122 05:48:28.031784 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:28.531753083 +0000 UTC m=+156.368878446 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.031983 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:28 crc kubenswrapper[4933]: E0122 05:48:28.032307 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:28.532294725 +0000 UTC m=+156.369420078 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.033807 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-llbq4" event={"ID":"8bdbecf3-1fbc-4184-b33b-94031b7e3845","Type":"ContainerStarted","Data":"ede69d8919e703ebbd652ba32a1b07a3121b472e7ad784b0ce1ae3aaba0b685f"} Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.039355 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-48tlb" event={"ID":"beeaabac-5adb-4389-a41d-fcd84b8b7259","Type":"ContainerStarted","Data":"056f654d1f0abde8753db6fee6a7e283d8cf8cf6f608ef7f9904dd52eae1e196"} Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.071089 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-x5npd" podStartSLOduration=137.07105942 podStartE2EDuration="2m17.07105942s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:28.070262461 +0000 UTC m=+155.907387814" watchObservedRunningTime="2026-01-22 05:48:28.07105942 +0000 UTC m=+155.908184763" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.072416 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" event={"ID":"193f8e22-42fc-444d-92b6-7b44fcdc8200","Type":"ContainerStarted","Data":"9f04515f2ae7cd070d11e79a73832ade92ef37c0dfd10a6455e1f15bd91718e8"} Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.082665 4933 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-mppr6 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.082721 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" podUID="193f8e22-42fc-444d-92b6-7b44fcdc8200" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.113402 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-pl49j" event={"ID":"5a8e383c-706b-43f4-ac19-2bc2e4e83115","Type":"ContainerStarted","Data":"63677d80b312f326933019d9030a3dcc898bfe598d60e6f4105c385ec304cd47"} Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.132496 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-gqpfp" event={"ID":"63e15a9d-3476-43c1-93e0-6453f0fc9adb","Type":"ContainerStarted","Data":"703beb457ce4279f36298587f57df0a2043207bb7d5012efd72b2fdcc10f0ed0"} Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.153209 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-46xcf" podStartSLOduration=137.153193988 podStartE2EDuration="2m17.153193988s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:28.108827572 +0000 UTC m=+155.945952925" watchObservedRunningTime="2026-01-22 05:48:28.153193988 +0000 UTC m=+155.990319341" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.153547 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.153924 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zshgr" podStartSLOduration=137.153917794 podStartE2EDuration="2m17.153917794s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:28.152349427 +0000 UTC m=+155.989474780" watchObservedRunningTime="2026-01-22 05:48:28.153917794 +0000 UTC m=+155.991043147" Jan 22 05:48:28 crc kubenswrapper[4933]: E0122 05:48:28.164631 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:28.664605814 +0000 UTC m=+156.501731157 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.176775 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-pl49j" podStartSLOduration=137.176748557 podStartE2EDuration="2m17.176748557s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:28.174684289 +0000 UTC m=+156.011809642" watchObservedRunningTime="2026-01-22 05:48:28.176748557 +0000 UTC m=+156.013873910" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.210866 4933 patch_prober.go:28] interesting pod/router-default-5444994796-77xhn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:48:28 crc kubenswrapper[4933]: [-]has-synced failed: reason withheld Jan 22 05:48:28 crc kubenswrapper[4933]: [+]process-running ok Jan 22 05:48:28 crc kubenswrapper[4933]: healthz check failed Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.210918 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-77xhn" podUID="2860560e-929a-4dbe-84c0-23326bf7cdf4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.215445 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jhjlm" event={"ID":"b2d54262-2b73-4ffa-80e6-beb88d8fa5b7","Type":"ContainerStarted","Data":"ad12d9cb796b46e4224288363712562c081069e343e89c17c06512a2c1902114"} Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.215506 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-jhjlm" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.218853 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-q2rvb" event={"ID":"efc95799-4d8b-4adf-9d64-28717c1bdd76","Type":"ContainerStarted","Data":"2947bdd2deb06f93c9fc10253bbe8643cad68acbd732a9e5790138a543b43edb"} Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.218970 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-q2rvb" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.220684 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx" event={"ID":"b3240d3a-10ab-4df0-9d7b-69e3cfb1aaf8","Type":"ContainerStarted","Data":"b68f10d8f2a401aecd9c3073085c7a10ffb28b823d41f3346307e7e07fefd0ee"} Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.222023 4933 generic.go:334] "Generic (PLEG): container finished" podID="ae503948-5876-4b1e-ba9f-23ebb0e05b94" containerID="7eb8cf5f766b8084fb5d0a37be26d574a884da83588b699a614e4561c39f4d54" exitCode=0 Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.222067 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" event={"ID":"ae503948-5876-4b1e-ba9f-23ebb0e05b94","Type":"ContainerDied","Data":"7eb8cf5f766b8084fb5d0a37be26d574a884da83588b699a614e4561c39f4d54"} Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.231158 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-tm7xw" event={"ID":"3be04461-5ea1-4f2c-b87b-bf955dcd9bf1","Type":"ContainerStarted","Data":"7c87d98531cbdf03e0b7e0a327ea8ad983c7cb3e4a93bde36af0fb6705d09127"} Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.261764 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:28 crc kubenswrapper[4933]: E0122 05:48:28.263977 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:28.763965243 +0000 UTC m=+156.601090596 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.304294 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skfkj" event={"ID":"4a7dbd55-b911-42ff-a5e7-ceb61a071343","Type":"ContainerStarted","Data":"26387ba4966482ed8061cf59b0ece5963902c1ce66f2c4321104349c8799a15d"} Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.310597 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" event={"ID":"d24a8b16-9687-49f3-bed0-888340007876","Type":"ContainerStarted","Data":"1db6ea2b620c99e40b5c2d18da01b1cbe57874c6430b4604c1cdef92fd6cd007"} Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.310649 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-gqpfp" podStartSLOduration=137.310627532 podStartE2EDuration="2m17.310627532s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:28.21499996 +0000 UTC m=+156.052125313" watchObservedRunningTime="2026-01-22 05:48:28.310627532 +0000 UTC m=+156.147752885" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.311876 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-jhjlm" podStartSLOduration=10.311869471 podStartE2EDuration="10.311869471s" podCreationTimestamp="2026-01-22 05:48:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:28.31051637 +0000 UTC m=+156.147641723" watchObservedRunningTime="2026-01-22 05:48:28.311869471 +0000 UTC m=+156.148994834" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.317859 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf" event={"ID":"4fdcd737-7ae0-40f3-9b0c-7e1cacafebdc","Type":"ContainerStarted","Data":"a55a5ee057342c4589c733e5031e010b28b35fa6ef2bbbe2d0cd379881f63b8e"} Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.363680 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:28 crc kubenswrapper[4933]: E0122 05:48:28.364710 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:28.864695215 +0000 UTC m=+156.701820568 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.365237 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-tnl5g" event={"ID":"4bbb457e-b621-4447-ab9b-c3337ff62905","Type":"ContainerStarted","Data":"db216368be9b59708d65ebce724e8cfdca451143a63b62f7eff2378952a8a55b"} Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.365292 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-tnl5g" event={"ID":"4bbb457e-b621-4447-ab9b-c3337ff62905","Type":"ContainerStarted","Data":"2405064668b296b8a990e96e88add740a48427b1c07c42a18d4ca12547c0d0e1"} Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.366769 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wc9dx" podStartSLOduration=137.366752742 podStartE2EDuration="2m17.366752742s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:28.365918973 +0000 UTC m=+156.203044326" watchObservedRunningTime="2026-01-22 05:48:28.366752742 +0000 UTC m=+156.203878095" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.394847 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-q2rvb" podStartSLOduration=137.394827778 podStartE2EDuration="2m17.394827778s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:28.39275089 +0000 UTC m=+156.229876243" watchObservedRunningTime="2026-01-22 05:48:28.394827778 +0000 UTC m=+156.231953131" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.410327 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" event={"ID":"245c05e3-0c9d-4b20-8bef-b16bb0b492c1","Type":"ContainerStarted","Data":"1967073f5c3232ad17533da893effca10e97bdb0639fd872b7ba9e0bc687b32b"} Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.410361 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.413354 4933 patch_prober.go:28] interesting pod/downloads-7954f5f757-tm764 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" start-of-body= Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.413396 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-tm764" podUID="80358407-ad1b-499f-868f-44e3388b0fac" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.432129 4933 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-vb2np container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" start-of-body= Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.432478 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" podUID="245c05e3-0c9d-4b20-8bef-b16bb0b492c1" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.433152 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-skfkj" podStartSLOduration=137.433141883 podStartE2EDuration="2m17.433141883s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:28.432689971 +0000 UTC m=+156.269815324" watchObservedRunningTime="2026-01-22 05:48:28.433141883 +0000 UTC m=+156.270267236" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.441870 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dr2tg" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.465416 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:28 crc kubenswrapper[4933]: E0122 05:48:28.468863 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:28.968849266 +0000 UTC m=+156.805974619 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.523456 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-tm7xw" podStartSLOduration=11.52343523 podStartE2EDuration="11.52343523s" podCreationTimestamp="2026-01-22 05:48:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:28.522573299 +0000 UTC m=+156.359698652" watchObservedRunningTime="2026-01-22 05:48:28.52343523 +0000 UTC m=+156.360560583" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.566858 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:28 crc kubenswrapper[4933]: E0122 05:48:28.567391 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:29.067376315 +0000 UTC m=+156.904501668 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.658626 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" podStartSLOduration=137.658610856 podStartE2EDuration="2m17.658610856s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:28.657436658 +0000 UTC m=+156.494562011" watchObservedRunningTime="2026-01-22 05:48:28.658610856 +0000 UTC m=+156.495736209" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.659694 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-tnl5g" podStartSLOduration=137.65969 podStartE2EDuration="2m17.65969s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:28.606800336 +0000 UTC m=+156.443925689" watchObservedRunningTime="2026-01-22 05:48:28.65969 +0000 UTC m=+156.496815353" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.675815 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:28 crc kubenswrapper[4933]: E0122 05:48:28.676152 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:29.176138384 +0000 UTC m=+157.013263737 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.685210 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" podStartSLOduration=137.685190656 podStartE2EDuration="2m17.685190656s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:28.683860795 +0000 UTC m=+156.520986148" watchObservedRunningTime="2026-01-22 05:48:28.685190656 +0000 UTC m=+156.522316009" Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.776645 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:28 crc kubenswrapper[4933]: E0122 05:48:28.776993 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:29.276975898 +0000 UTC m=+157.114101251 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.877673 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:28 crc kubenswrapper[4933]: E0122 05:48:28.878139 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:29.378127189 +0000 UTC m=+157.215252542 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:28 crc kubenswrapper[4933]: I0122 05:48:28.988394 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:28 crc kubenswrapper[4933]: E0122 05:48:28.988876 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:29.488855564 +0000 UTC m=+157.325980917 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.053489 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.089841 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:29 crc kubenswrapper[4933]: E0122 05:48:29.090419 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:29.590406035 +0000 UTC m=+157.427531388 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.113118 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-rhsbf" podStartSLOduration=138.113099125 podStartE2EDuration="2m18.113099125s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:28.71534836 +0000 UTC m=+156.552473713" watchObservedRunningTime="2026-01-22 05:48:29.113099125 +0000 UTC m=+156.950224478" Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.191593 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:29 crc kubenswrapper[4933]: E0122 05:48:29.192174 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:29.692154631 +0000 UTC m=+157.529279994 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.206257 4933 patch_prober.go:28] interesting pod/router-default-5444994796-77xhn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:48:29 crc kubenswrapper[4933]: [-]has-synced failed: reason withheld Jan 22 05:48:29 crc kubenswrapper[4933]: [+]process-running ok Jan 22 05:48:29 crc kubenswrapper[4933]: healthz check failed Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.206318 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-77xhn" podUID="2860560e-929a-4dbe-84c0-23326bf7cdf4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.271660 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.293348 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:29 crc kubenswrapper[4933]: E0122 05:48:29.293869 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:29.793851934 +0000 UTC m=+157.630977287 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.413631 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:29 crc kubenswrapper[4933]: E0122 05:48:29.413901 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:29.913887416 +0000 UTC m=+157.751012769 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.442452 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-q2rvb" event={"ID":"efc95799-4d8b-4adf-9d64-28717c1bdd76","Type":"ContainerStarted","Data":"5276781dbcaf94153ccafe5b7806ae1aff677acd91bd22ce49cef290383d5427"} Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.444644 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-llbq4" event={"ID":"8bdbecf3-1fbc-4184-b33b-94031b7e3845","Type":"ContainerStarted","Data":"ff8eda6ba96519871ac3944050f9239c3694de62820d713c883b283f15eea08c"} Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.448458 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-48tlb" event={"ID":"beeaabac-5adb-4389-a41d-fcd84b8b7259","Type":"ContainerStarted","Data":"ee0eaf8275fa98e38827e8126d8772e99bef6e7b5a754e7389c0990225e06dcc"} Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.454445 4933 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-vb2np container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" start-of-body= Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.454487 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" podUID="245c05e3-0c9d-4b20-8bef-b16bb0b492c1" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.486211 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-mppr6" Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.515558 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:29 crc kubenswrapper[4933]: E0122 05:48:29.517609 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:30.017594237 +0000 UTC m=+157.854719590 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.528584 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-llbq4" podStartSLOduration=139.528570303 podStartE2EDuration="2m19.528570303s" podCreationTimestamp="2026-01-22 05:46:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:29.527215552 +0000 UTC m=+157.364340915" watchObservedRunningTime="2026-01-22 05:48:29.528570303 +0000 UTC m=+157.365695656" Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.621172 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:29 crc kubenswrapper[4933]: E0122 05:48:29.621520 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:30.121503582 +0000 UTC m=+157.958628935 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.725785 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:29 crc kubenswrapper[4933]: E0122 05:48:29.726124 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:30.226111895 +0000 UTC m=+158.063237248 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.826448 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:29 crc kubenswrapper[4933]: E0122 05:48:29.827057 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:30.327042341 +0000 UTC m=+158.164167694 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:29 crc kubenswrapper[4933]: I0122 05:48:29.928891 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:29 crc kubenswrapper[4933]: E0122 05:48:29.929308 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:30.429293178 +0000 UTC m=+158.266418541 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.009133 4933 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.032565 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:30 crc kubenswrapper[4933]: E0122 05:48:30.032853 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:30.532839355 +0000 UTC m=+158.369964708 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.133841 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:30 crc kubenswrapper[4933]: E0122 05:48:30.134166 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:30.6341549 +0000 UTC m=+158.471280253 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.211022 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.219923 4933 patch_prober.go:28] interesting pod/router-default-5444994796-77xhn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:48:30 crc kubenswrapper[4933]: [-]has-synced failed: reason withheld Jan 22 05:48:30 crc kubenswrapper[4933]: [+]process-running ok Jan 22 05:48:30 crc kubenswrapper[4933]: healthz check failed Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.219980 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-77xhn" podUID="2860560e-929a-4dbe-84c0-23326bf7cdf4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.235634 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:30 crc kubenswrapper[4933]: E0122 05:48:30.236862 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:30.736843967 +0000 UTC m=+158.573969320 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.248873 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.336212 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ae503948-5876-4b1e-ba9f-23ebb0e05b94-config-volume\") pod \"ae503948-5876-4b1e-ba9f-23ebb0e05b94\" (UID: \"ae503948-5876-4b1e-ba9f-23ebb0e05b94\") " Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.336589 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ae503948-5876-4b1e-ba9f-23ebb0e05b94-secret-volume\") pod \"ae503948-5876-4b1e-ba9f-23ebb0e05b94\" (UID: \"ae503948-5876-4b1e-ba9f-23ebb0e05b94\") " Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.336690 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkd78\" (UniqueName: \"kubernetes.io/projected/ae503948-5876-4b1e-ba9f-23ebb0e05b94-kube-api-access-pkd78\") pod \"ae503948-5876-4b1e-ba9f-23ebb0e05b94\" (UID: \"ae503948-5876-4b1e-ba9f-23ebb0e05b94\") " Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.336918 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.337098 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae503948-5876-4b1e-ba9f-23ebb0e05b94-config-volume" (OuterVolumeSpecName: "config-volume") pod "ae503948-5876-4b1e-ba9f-23ebb0e05b94" (UID: "ae503948-5876-4b1e-ba9f-23ebb0e05b94"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:48:30 crc kubenswrapper[4933]: E0122 05:48:30.337245 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:30.83723394 +0000 UTC m=+158.674359293 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.337576 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qxtfm"] Jan 22 05:48:30 crc kubenswrapper[4933]: E0122 05:48:30.337817 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae503948-5876-4b1e-ba9f-23ebb0e05b94" containerName="collect-profiles" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.337834 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae503948-5876-4b1e-ba9f-23ebb0e05b94" containerName="collect-profiles" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.337933 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae503948-5876-4b1e-ba9f-23ebb0e05b94" containerName="collect-profiles" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.338641 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qxtfm" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.342780 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.348814 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae503948-5876-4b1e-ba9f-23ebb0e05b94-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ae503948-5876-4b1e-ba9f-23ebb0e05b94" (UID: "ae503948-5876-4b1e-ba9f-23ebb0e05b94"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.349124 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae503948-5876-4b1e-ba9f-23ebb0e05b94-kube-api-access-pkd78" (OuterVolumeSpecName: "kube-api-access-pkd78") pod "ae503948-5876-4b1e-ba9f-23ebb0e05b94" (UID: "ae503948-5876-4b1e-ba9f-23ebb0e05b94"). InnerVolumeSpecName "kube-api-access-pkd78". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.366540 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qxtfm"] Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.409436 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.409490 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.439018 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:30 crc kubenswrapper[4933]: E0122 05:48:30.439219 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:30.93917551 +0000 UTC m=+158.776300863 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.439395 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec6f4762-c94a-4c73-a84f-469729ae7bae-catalog-content\") pod \"community-operators-qxtfm\" (UID: \"ec6f4762-c94a-4c73-a84f-469729ae7bae\") " pod="openshift-marketplace/community-operators-qxtfm" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.439464 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpsgr\" (UniqueName: \"kubernetes.io/projected/ec6f4762-c94a-4c73-a84f-469729ae7bae-kube-api-access-fpsgr\") pod \"community-operators-qxtfm\" (UID: \"ec6f4762-c94a-4c73-a84f-469729ae7bae\") " pod="openshift-marketplace/community-operators-qxtfm" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.439506 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.439556 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec6f4762-c94a-4c73-a84f-469729ae7bae-utilities\") pod \"community-operators-qxtfm\" (UID: \"ec6f4762-c94a-4c73-a84f-469729ae7bae\") " pod="openshift-marketplace/community-operators-qxtfm" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.439614 4933 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ae503948-5876-4b1e-ba9f-23ebb0e05b94-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.439629 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkd78\" (UniqueName: \"kubernetes.io/projected/ae503948-5876-4b1e-ba9f-23ebb0e05b94-kube-api-access-pkd78\") on node \"crc\" DevicePath \"\"" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.439638 4933 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ae503948-5876-4b1e-ba9f-23ebb0e05b94-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 05:48:30 crc kubenswrapper[4933]: E0122 05:48:30.439872 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:30.939865827 +0000 UTC m=+158.776991180 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.475258 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-48tlb" event={"ID":"beeaabac-5adb-4389-a41d-fcd84b8b7259","Type":"ContainerStarted","Data":"6b7187d8e968d75da733c83893f78aded4dd099ec940351ef6bcfdf2e931ea81"} Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.475308 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-48tlb" event={"ID":"beeaabac-5adb-4389-a41d-fcd84b8b7259","Type":"ContainerStarted","Data":"ba390974aeaab008a259f949a05d91b06685e6946c6ea87d86be67293d291bf0"} Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.477381 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.483285 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt" event={"ID":"ae503948-5876-4b1e-ba9f-23ebb0e05b94","Type":"ContainerDied","Data":"a9be9efeb63748b0eb39b65860041f7e99005e81a6b9aa006a5eb88d57392450"} Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.483409 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a9be9efeb63748b0eb39b65860041f7e99005e81a6b9aa006a5eb88d57392450" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.517165 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-48tlb" podStartSLOduration=12.51714244 podStartE2EDuration="12.51714244s" podCreationTimestamp="2026-01-22 05:48:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:30.510909204 +0000 UTC m=+158.348034557" watchObservedRunningTime="2026-01-22 05:48:30.51714244 +0000 UTC m=+158.354267813" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.540813 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:30 crc kubenswrapper[4933]: E0122 05:48:30.541187 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:31.041166471 +0000 UTC m=+158.878291824 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.541403 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec6f4762-c94a-4c73-a84f-469729ae7bae-catalog-content\") pod \"community-operators-qxtfm\" (UID: \"ec6f4762-c94a-4c73-a84f-469729ae7bae\") " pod="openshift-marketplace/community-operators-qxtfm" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.541461 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpsgr\" (UniqueName: \"kubernetes.io/projected/ec6f4762-c94a-4c73-a84f-469729ae7bae-kube-api-access-fpsgr\") pod \"community-operators-qxtfm\" (UID: \"ec6f4762-c94a-4c73-a84f-469729ae7bae\") " pod="openshift-marketplace/community-operators-qxtfm" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.541550 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.541709 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec6f4762-c94a-4c73-a84f-469729ae7bae-utilities\") pod \"community-operators-qxtfm\" (UID: \"ec6f4762-c94a-4c73-a84f-469729ae7bae\") " pod="openshift-marketplace/community-operators-qxtfm" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.543444 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec6f4762-c94a-4c73-a84f-469729ae7bae-catalog-content\") pod \"community-operators-qxtfm\" (UID: \"ec6f4762-c94a-4c73-a84f-469729ae7bae\") " pod="openshift-marketplace/community-operators-qxtfm" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.544848 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec6f4762-c94a-4c73-a84f-469729ae7bae-utilities\") pod \"community-operators-qxtfm\" (UID: \"ec6f4762-c94a-4c73-a84f-469729ae7bae\") " pod="openshift-marketplace/community-operators-qxtfm" Jan 22 05:48:30 crc kubenswrapper[4933]: E0122 05:48:30.544951 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:48:31.044855237 +0000 UTC m=+158.881980580 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-4ptvd" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.556440 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jcpdb"] Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.557414 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jcpdb" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.559467 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jcpdb"] Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.560313 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.588178 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpsgr\" (UniqueName: \"kubernetes.io/projected/ec6f4762-c94a-4c73-a84f-469729ae7bae-kube-api-access-fpsgr\") pod \"community-operators-qxtfm\" (UID: \"ec6f4762-c94a-4c73-a84f-469729ae7bae\") " pod="openshift-marketplace/community-operators-qxtfm" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.642733 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.642956 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgjnd\" (UniqueName: \"kubernetes.io/projected/4c343d48-14c7-4862-ab0a-7851d4e0e72a-kube-api-access-sgjnd\") pod \"certified-operators-jcpdb\" (UID: \"4c343d48-14c7-4862-ab0a-7851d4e0e72a\") " pod="openshift-marketplace/certified-operators-jcpdb" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.642984 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c343d48-14c7-4862-ab0a-7851d4e0e72a-catalog-content\") pod \"certified-operators-jcpdb\" (UID: \"4c343d48-14c7-4862-ab0a-7851d4e0e72a\") " pod="openshift-marketplace/certified-operators-jcpdb" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.643146 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c343d48-14c7-4862-ab0a-7851d4e0e72a-utilities\") pod \"certified-operators-jcpdb\" (UID: \"4c343d48-14c7-4862-ab0a-7851d4e0e72a\") " pod="openshift-marketplace/certified-operators-jcpdb" Jan 22 05:48:30 crc kubenswrapper[4933]: E0122 05:48:30.643246 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:48:31.143228164 +0000 UTC m=+158.980353507 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.663371 4933 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-22T05:48:30.009162662Z","Handler":null,"Name":""} Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.664645 4933 patch_prober.go:28] interesting pod/downloads-7954f5f757-tm764 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" start-of-body= Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.664777 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-tm764" podUID="80358407-ad1b-499f-868f-44e3388b0fac" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.664676 4933 patch_prober.go:28] interesting pod/downloads-7954f5f757-tm764 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" start-of-body= Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.665246 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-tm764" podUID="80358407-ad1b-499f-868f-44e3388b0fac" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.674688 4933 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.674735 4933 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.675294 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qxtfm" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.724521 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jrc2t"] Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.726090 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jrc2t" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.738213 4933 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-vb2np container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" start-of-body= Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.738289 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" podUID="245c05e3-0c9d-4b20-8bef-b16bb0b492c1" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.738704 4933 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-vb2np container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" start-of-body= Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.738726 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" podUID="245c05e3-0c9d-4b20-8bef-b16bb0b492c1" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.744423 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6skvb\" (UniqueName: \"kubernetes.io/projected/731fa9cf-182b-4086-8696-ec971095cb38-kube-api-access-6skvb\") pod \"community-operators-jrc2t\" (UID: \"731fa9cf-182b-4086-8696-ec971095cb38\") " pod="openshift-marketplace/community-operators-jrc2t" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.744609 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c343d48-14c7-4862-ab0a-7851d4e0e72a-utilities\") pod \"certified-operators-jcpdb\" (UID: \"4c343d48-14c7-4862-ab0a-7851d4e0e72a\") " pod="openshift-marketplace/certified-operators-jcpdb" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.744710 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/731fa9cf-182b-4086-8696-ec971095cb38-utilities\") pod \"community-operators-jrc2t\" (UID: \"731fa9cf-182b-4086-8696-ec971095cb38\") " pod="openshift-marketplace/community-operators-jrc2t" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.744831 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgjnd\" (UniqueName: \"kubernetes.io/projected/4c343d48-14c7-4862-ab0a-7851d4e0e72a-kube-api-access-sgjnd\") pod \"certified-operators-jcpdb\" (UID: \"4c343d48-14c7-4862-ab0a-7851d4e0e72a\") " pod="openshift-marketplace/certified-operators-jcpdb" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.744927 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c343d48-14c7-4862-ab0a-7851d4e0e72a-catalog-content\") pod \"certified-operators-jcpdb\" (UID: \"4c343d48-14c7-4862-ab0a-7851d4e0e72a\") " pod="openshift-marketplace/certified-operators-jcpdb" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.748601 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/731fa9cf-182b-4086-8696-ec971095cb38-catalog-content\") pod \"community-operators-jrc2t\" (UID: \"731fa9cf-182b-4086-8696-ec971095cb38\") " pod="openshift-marketplace/community-operators-jrc2t" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.748775 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.748555 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jrc2t"] Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.745470 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c343d48-14c7-4862-ab0a-7851d4e0e72a-utilities\") pod \"certified-operators-jcpdb\" (UID: \"4c343d48-14c7-4862-ab0a-7851d4e0e72a\") " pod="openshift-marketplace/certified-operators-jcpdb" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.747530 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c343d48-14c7-4862-ab0a-7851d4e0e72a-catalog-content\") pod \"certified-operators-jcpdb\" (UID: \"4c343d48-14c7-4862-ab0a-7851d4e0e72a\") " pod="openshift-marketplace/certified-operators-jcpdb" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.763179 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.763209 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.765403 4933 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.765445 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.771363 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgjnd\" (UniqueName: \"kubernetes.io/projected/4c343d48-14c7-4862-ab0a-7851d4e0e72a-kube-api-access-sgjnd\") pod \"certified-operators-jcpdb\" (UID: \"4c343d48-14c7-4862-ab0a-7851d4e0e72a\") " pod="openshift-marketplace/certified-operators-jcpdb" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.776255 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.842195 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-4ptvd\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.850415 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.850953 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6skvb\" (UniqueName: \"kubernetes.io/projected/731fa9cf-182b-4086-8696-ec971095cb38-kube-api-access-6skvb\") pod \"community-operators-jrc2t\" (UID: \"731fa9cf-182b-4086-8696-ec971095cb38\") " pod="openshift-marketplace/community-operators-jrc2t" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.851124 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/731fa9cf-182b-4086-8696-ec971095cb38-utilities\") pod \"community-operators-jrc2t\" (UID: \"731fa9cf-182b-4086-8696-ec971095cb38\") " pod="openshift-marketplace/community-operators-jrc2t" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.851283 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/731fa9cf-182b-4086-8696-ec971095cb38-catalog-content\") pod \"community-operators-jrc2t\" (UID: \"731fa9cf-182b-4086-8696-ec971095cb38\") " pod="openshift-marketplace/community-operators-jrc2t" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.852103 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/731fa9cf-182b-4086-8696-ec971095cb38-utilities\") pod \"community-operators-jrc2t\" (UID: \"731fa9cf-182b-4086-8696-ec971095cb38\") " pod="openshift-marketplace/community-operators-jrc2t" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.852326 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/731fa9cf-182b-4086-8696-ec971095cb38-catalog-content\") pod \"community-operators-jrc2t\" (UID: \"731fa9cf-182b-4086-8696-ec971095cb38\") " pod="openshift-marketplace/community-operators-jrc2t" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.861518 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.878663 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6skvb\" (UniqueName: \"kubernetes.io/projected/731fa9cf-182b-4086-8696-ec971095cb38-kube-api-access-6skvb\") pod \"community-operators-jrc2t\" (UID: \"731fa9cf-182b-4086-8696-ec971095cb38\") " pod="openshift-marketplace/community-operators-jrc2t" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.890222 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.891016 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.916196 4933 patch_prober.go:28] interesting pod/console-f9d7485db-gqpfp container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.916244 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-gqpfp" podUID="63e15a9d-3476-43c1-93e0-6453f0fc9adb" containerName="console" probeResult="failure" output="Get \"https://10.217.0.28:8443/health\": dial tcp 10.217.0.28:8443: connect: connection refused" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.927203 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4748k"] Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.928054 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4748k" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.930263 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jcpdb" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.951479 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4748k"] Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.953876 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb89501a-64c9-4b40-84cc-e18896cb53ec-utilities\") pod \"certified-operators-4748k\" (UID: \"fb89501a-64c9-4b40-84cc-e18896cb53ec\") " pod="openshift-marketplace/certified-operators-4748k" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.953955 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cl67\" (UniqueName: \"kubernetes.io/projected/fb89501a-64c9-4b40-84cc-e18896cb53ec-kube-api-access-9cl67\") pod \"certified-operators-4748k\" (UID: \"fb89501a-64c9-4b40-84cc-e18896cb53ec\") " pod="openshift-marketplace/certified-operators-4748k" Jan 22 05:48:30 crc kubenswrapper[4933]: I0122 05:48:30.954113 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb89501a-64c9-4b40-84cc-e18896cb53ec-catalog-content\") pod \"certified-operators-4748k\" (UID: \"fb89501a-64c9-4b40-84cc-e18896cb53ec\") " pod="openshift-marketplace/certified-operators-4748k" Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.025014 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.046324 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jrc2t" Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.055697 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb89501a-64c9-4b40-84cc-e18896cb53ec-catalog-content\") pod \"certified-operators-4748k\" (UID: \"fb89501a-64c9-4b40-84cc-e18896cb53ec\") " pod="openshift-marketplace/certified-operators-4748k" Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.055855 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb89501a-64c9-4b40-84cc-e18896cb53ec-utilities\") pod \"certified-operators-4748k\" (UID: \"fb89501a-64c9-4b40-84cc-e18896cb53ec\") " pod="openshift-marketplace/certified-operators-4748k" Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.055899 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cl67\" (UniqueName: \"kubernetes.io/projected/fb89501a-64c9-4b40-84cc-e18896cb53ec-kube-api-access-9cl67\") pod \"certified-operators-4748k\" (UID: \"fb89501a-64c9-4b40-84cc-e18896cb53ec\") " pod="openshift-marketplace/certified-operators-4748k" Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.056156 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb89501a-64c9-4b40-84cc-e18896cb53ec-catalog-content\") pod \"certified-operators-4748k\" (UID: \"fb89501a-64c9-4b40-84cc-e18896cb53ec\") " pod="openshift-marketplace/certified-operators-4748k" Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.056193 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb89501a-64c9-4b40-84cc-e18896cb53ec-utilities\") pod \"certified-operators-4748k\" (UID: \"fb89501a-64c9-4b40-84cc-e18896cb53ec\") " pod="openshift-marketplace/certified-operators-4748k" Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.100404 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cl67\" (UniqueName: \"kubernetes.io/projected/fb89501a-64c9-4b40-84cc-e18896cb53ec-kube-api-access-9cl67\") pod \"certified-operators-4748k\" (UID: \"fb89501a-64c9-4b40-84cc-e18896cb53ec\") " pod="openshift-marketplace/certified-operators-4748k" Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.205895 4933 patch_prober.go:28] interesting pod/router-default-5444994796-77xhn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:48:31 crc kubenswrapper[4933]: [-]has-synced failed: reason withheld Jan 22 05:48:31 crc kubenswrapper[4933]: [+]process-running ok Jan 22 05:48:31 crc kubenswrapper[4933]: healthz check failed Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.205935 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-77xhn" podUID="2860560e-929a-4dbe-84c0-23326bf7cdf4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.283723 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4748k" Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.317786 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qxtfm"] Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.415734 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jcpdb"] Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.499005 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-4ptvd"] Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.523358 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" event={"ID":"26b5f8af-bb33-40cc-8ef7-03b0c931896c","Type":"ContainerStarted","Data":"a6579b8b789ed946ec8b920e6700606151f70f6e0acea9805509b5caf9587243"} Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.539090 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxtfm" event={"ID":"ec6f4762-c94a-4c73-a84f-469729ae7bae","Type":"ContainerStarted","Data":"7bde1b9339824c20e786404fb59e84045e564639a843fcd52557dd0f2b1e4528"} Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.539144 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxtfm" event={"ID":"ec6f4762-c94a-4c73-a84f-469729ae7bae","Type":"ContainerStarted","Data":"9030a143aad277e7ed43f0a5bcaf04a3ba5f8b4c51ce39f7fc34face79482428"} Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.540328 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jcpdb" event={"ID":"4c343d48-14c7-4862-ab0a-7851d4e0e72a","Type":"ContainerStarted","Data":"e325e8f31faf9929e9c3eec0c5fa251af8ea7aa3dfe6b0cfea13a9868daa89bb"} Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.546124 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-khtrj" Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.580633 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jrc2t"] Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.850434 4933 patch_prober.go:28] interesting pod/apiserver-76f77b778f-llbq4 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 22 05:48:31 crc kubenswrapper[4933]: [+]log ok Jan 22 05:48:31 crc kubenswrapper[4933]: [+]etcd ok Jan 22 05:48:31 crc kubenswrapper[4933]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 22 05:48:31 crc kubenswrapper[4933]: [+]poststarthook/generic-apiserver-start-informers ok Jan 22 05:48:31 crc kubenswrapper[4933]: [+]poststarthook/max-in-flight-filter ok Jan 22 05:48:31 crc kubenswrapper[4933]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 22 05:48:31 crc kubenswrapper[4933]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 22 05:48:31 crc kubenswrapper[4933]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 22 05:48:31 crc kubenswrapper[4933]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Jan 22 05:48:31 crc kubenswrapper[4933]: [+]poststarthook/project.openshift.io-projectcache ok Jan 22 05:48:31 crc kubenswrapper[4933]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Jan 22 05:48:31 crc kubenswrapper[4933]: [+]poststarthook/openshift.io-startinformers ok Jan 22 05:48:31 crc kubenswrapper[4933]: [+]poststarthook/openshift.io-restmapperupdater ok Jan 22 05:48:31 crc kubenswrapper[4933]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 22 05:48:31 crc kubenswrapper[4933]: livez check failed Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.850722 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-llbq4" podUID="8bdbecf3-1fbc-4184-b33b-94031b7e3845" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:48:31 crc kubenswrapper[4933]: I0122 05:48:31.871250 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4748k"] Jan 22 05:48:31 crc kubenswrapper[4933]: W0122 05:48:31.875098 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb89501a_64c9_4b40_84cc_e18896cb53ec.slice/crio-8fe0de50ecb8ce6366487e9841ad33408182892a47a72addfc5f91628323d037 WatchSource:0}: Error finding container 8fe0de50ecb8ce6366487e9841ad33408182892a47a72addfc5f91628323d037: Status 404 returned error can't find the container with id 8fe0de50ecb8ce6366487e9841ad33408182892a47a72addfc5f91628323d037 Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.236258 4933 patch_prober.go:28] interesting pod/router-default-5444994796-77xhn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:48:32 crc kubenswrapper[4933]: [-]has-synced failed: reason withheld Jan 22 05:48:32 crc kubenswrapper[4933]: [+]process-running ok Jan 22 05:48:32 crc kubenswrapper[4933]: healthz check failed Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.236326 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-77xhn" podUID="2860560e-929a-4dbe-84c0-23326bf7cdf4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.328640 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6cws8"] Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.329609 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6cws8" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.332435 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.350918 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6cws8"] Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.387622 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0992ece5-d7dd-40c1-adc4-12711a7b3b69-catalog-content\") pod \"redhat-marketplace-6cws8\" (UID: \"0992ece5-d7dd-40c1-adc4-12711a7b3b69\") " pod="openshift-marketplace/redhat-marketplace-6cws8" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.387730 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0992ece5-d7dd-40c1-adc4-12711a7b3b69-utilities\") pod \"redhat-marketplace-6cws8\" (UID: \"0992ece5-d7dd-40c1-adc4-12711a7b3b69\") " pod="openshift-marketplace/redhat-marketplace-6cws8" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.387759 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6fb4\" (UniqueName: \"kubernetes.io/projected/0992ece5-d7dd-40c1-adc4-12711a7b3b69-kube-api-access-v6fb4\") pod \"redhat-marketplace-6cws8\" (UID: \"0992ece5-d7dd-40c1-adc4-12711a7b3b69\") " pod="openshift-marketplace/redhat-marketplace-6cws8" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.489122 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0992ece5-d7dd-40c1-adc4-12711a7b3b69-catalog-content\") pod \"redhat-marketplace-6cws8\" (UID: \"0992ece5-d7dd-40c1-adc4-12711a7b3b69\") " pod="openshift-marketplace/redhat-marketplace-6cws8" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.489276 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0992ece5-d7dd-40c1-adc4-12711a7b3b69-utilities\") pod \"redhat-marketplace-6cws8\" (UID: \"0992ece5-d7dd-40c1-adc4-12711a7b3b69\") " pod="openshift-marketplace/redhat-marketplace-6cws8" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.489318 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6fb4\" (UniqueName: \"kubernetes.io/projected/0992ece5-d7dd-40c1-adc4-12711a7b3b69-kube-api-access-v6fb4\") pod \"redhat-marketplace-6cws8\" (UID: \"0992ece5-d7dd-40c1-adc4-12711a7b3b69\") " pod="openshift-marketplace/redhat-marketplace-6cws8" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.489698 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0992ece5-d7dd-40c1-adc4-12711a7b3b69-utilities\") pod \"redhat-marketplace-6cws8\" (UID: \"0992ece5-d7dd-40c1-adc4-12711a7b3b69\") " pod="openshift-marketplace/redhat-marketplace-6cws8" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.489675 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0992ece5-d7dd-40c1-adc4-12711a7b3b69-catalog-content\") pod \"redhat-marketplace-6cws8\" (UID: \"0992ece5-d7dd-40c1-adc4-12711a7b3b69\") " pod="openshift-marketplace/redhat-marketplace-6cws8" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.501099 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.529798 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6fb4\" (UniqueName: \"kubernetes.io/projected/0992ece5-d7dd-40c1-adc4-12711a7b3b69-kube-api-access-v6fb4\") pod \"redhat-marketplace-6cws8\" (UID: \"0992ece5-d7dd-40c1-adc4-12711a7b3b69\") " pod="openshift-marketplace/redhat-marketplace-6cws8" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.551314 4933 generic.go:334] "Generic (PLEG): container finished" podID="4c343d48-14c7-4862-ab0a-7851d4e0e72a" containerID="cd289e74b2354873776c2812136e49480804104fbd74ed32096474a7c4834622" exitCode=0 Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.551493 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jcpdb" event={"ID":"4c343d48-14c7-4862-ab0a-7851d4e0e72a","Type":"ContainerDied","Data":"cd289e74b2354873776c2812136e49480804104fbd74ed32096474a7c4834622"} Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.554480 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.555240 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" event={"ID":"26b5f8af-bb33-40cc-8ef7-03b0c931896c","Type":"ContainerStarted","Data":"e401e638793baaebb51ebc3a0056b236cf22b5ad4d2202f61b825f30983bdce5"} Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.555687 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.557420 4933 generic.go:334] "Generic (PLEG): container finished" podID="731fa9cf-182b-4086-8696-ec971095cb38" containerID="52157e67eabae2582e3276c15218656d53da92eccff79d8bc890fc4ee72a40c7" exitCode=0 Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.557486 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jrc2t" event={"ID":"731fa9cf-182b-4086-8696-ec971095cb38","Type":"ContainerDied","Data":"52157e67eabae2582e3276c15218656d53da92eccff79d8bc890fc4ee72a40c7"} Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.557506 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jrc2t" event={"ID":"731fa9cf-182b-4086-8696-ec971095cb38","Type":"ContainerStarted","Data":"aeb8c7ea5b29fb9c1ba769c8a630b2311e7e47142ba30266823b13028c291c1a"} Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.583040 4933 generic.go:334] "Generic (PLEG): container finished" podID="ec6f4762-c94a-4c73-a84f-469729ae7bae" containerID="7bde1b9339824c20e786404fb59e84045e564639a843fcd52557dd0f2b1e4528" exitCode=0 Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.583129 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxtfm" event={"ID":"ec6f4762-c94a-4c73-a84f-469729ae7bae","Type":"ContainerDied","Data":"7bde1b9339824c20e786404fb59e84045e564639a843fcd52557dd0f2b1e4528"} Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.593743 4933 generic.go:334] "Generic (PLEG): container finished" podID="fb89501a-64c9-4b40-84cc-e18896cb53ec" containerID="4ae6836e240c6d4440c9a92c7c0e6996695758b46ae46eb70f0a757393723a54" exitCode=0 Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.595029 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4748k" event={"ID":"fb89501a-64c9-4b40-84cc-e18896cb53ec","Type":"ContainerDied","Data":"4ae6836e240c6d4440c9a92c7c0e6996695758b46ae46eb70f0a757393723a54"} Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.595064 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4748k" event={"ID":"fb89501a-64c9-4b40-84cc-e18896cb53ec","Type":"ContainerStarted","Data":"8fe0de50ecb8ce6366487e9841ad33408182892a47a72addfc5f91628323d037"} Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.655408 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6cws8" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.738848 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xtxwj"] Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.739742 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xtxwj" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.781943 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xtxwj"] Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.896774 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vd4m5\" (UniqueName: \"kubernetes.io/projected/a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f-kube-api-access-vd4m5\") pod \"redhat-marketplace-xtxwj\" (UID: \"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f\") " pod="openshift-marketplace/redhat-marketplace-xtxwj" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.896858 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f-catalog-content\") pod \"redhat-marketplace-xtxwj\" (UID: \"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f\") " pod="openshift-marketplace/redhat-marketplace-xtxwj" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.896922 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f-utilities\") pod \"redhat-marketplace-xtxwj\" (UID: \"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f\") " pod="openshift-marketplace/redhat-marketplace-xtxwj" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.934038 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" podStartSLOduration=141.934020409 podStartE2EDuration="2m21.934020409s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:32.912154328 +0000 UTC m=+160.749279671" watchObservedRunningTime="2026-01-22 05:48:32.934020409 +0000 UTC m=+160.771145762" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.951018 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.951702 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.955704 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.957224 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.964924 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 22 05:48:32 crc kubenswrapper[4933]: I0122 05:48:32.999053 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f-utilities\") pod \"redhat-marketplace-xtxwj\" (UID: \"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f\") " pod="openshift-marketplace/redhat-marketplace-xtxwj" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:32.999475 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vd4m5\" (UniqueName: \"kubernetes.io/projected/a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f-kube-api-access-vd4m5\") pod \"redhat-marketplace-xtxwj\" (UID: \"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f\") " pod="openshift-marketplace/redhat-marketplace-xtxwj" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:32.999525 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f-catalog-content\") pod \"redhat-marketplace-xtxwj\" (UID: \"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f\") " pod="openshift-marketplace/redhat-marketplace-xtxwj" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:32.999608 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f-utilities\") pod \"redhat-marketplace-xtxwj\" (UID: \"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f\") " pod="openshift-marketplace/redhat-marketplace-xtxwj" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:32.999857 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f-catalog-content\") pod \"redhat-marketplace-xtxwj\" (UID: \"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f\") " pod="openshift-marketplace/redhat-marketplace-xtxwj" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.025469 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vd4m5\" (UniqueName: \"kubernetes.io/projected/a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f-kube-api-access-vd4m5\") pod \"redhat-marketplace-xtxwj\" (UID: \"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f\") " pod="openshift-marketplace/redhat-marketplace-xtxwj" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.081452 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xtxwj" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.100716 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/636a6786-8509-4ee8-9f85-daef3d422ee1-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"636a6786-8509-4ee8-9f85-daef3d422ee1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.100952 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/636a6786-8509-4ee8-9f85-daef3d422ee1-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"636a6786-8509-4ee8-9f85-daef3d422ee1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.154525 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6cws8"] Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.202581 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/636a6786-8509-4ee8-9f85-daef3d422ee1-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"636a6786-8509-4ee8-9f85-daef3d422ee1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.202631 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/636a6786-8509-4ee8-9f85-daef3d422ee1-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"636a6786-8509-4ee8-9f85-daef3d422ee1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.202737 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/636a6786-8509-4ee8-9f85-daef3d422ee1-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"636a6786-8509-4ee8-9f85-daef3d422ee1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:48:33 crc kubenswrapper[4933]: W0122 05:48:33.204184 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0992ece5_d7dd_40c1_adc4_12711a7b3b69.slice/crio-e8bc1bb5924f2fe5aaeaa451bfe67e1c84ecdd22205846ad32f15fb0926a8035 WatchSource:0}: Error finding container e8bc1bb5924f2fe5aaeaa451bfe67e1c84ecdd22205846ad32f15fb0926a8035: Status 404 returned error can't find the container with id e8bc1bb5924f2fe5aaeaa451bfe67e1c84ecdd22205846ad32f15fb0926a8035 Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.213883 4933 patch_prober.go:28] interesting pod/router-default-5444994796-77xhn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:48:33 crc kubenswrapper[4933]: [-]has-synced failed: reason withheld Jan 22 05:48:33 crc kubenswrapper[4933]: [+]process-running ok Jan 22 05:48:33 crc kubenswrapper[4933]: healthz check failed Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.213942 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-77xhn" podUID="2860560e-929a-4dbe-84c0-23326bf7cdf4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.225957 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/636a6786-8509-4ee8-9f85-daef3d422ee1-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"636a6786-8509-4ee8-9f85-daef3d422ee1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.291842 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.518130 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xmhw6"] Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.519452 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xmhw6" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.521326 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.527979 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xmhw6"] Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.612134 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d-catalog-content\") pod \"redhat-operators-xmhw6\" (UID: \"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d\") " pod="openshift-marketplace/redhat-operators-xmhw6" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.612189 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d-utilities\") pod \"redhat-operators-xmhw6\" (UID: \"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d\") " pod="openshift-marketplace/redhat-operators-xmhw6" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.612262 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs\") pod \"network-metrics-daemon-t8rgm\" (UID: \"0902347a-c5e2-4891-812b-cfe6efc32261\") " pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.612300 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7r9z\" (UniqueName: \"kubernetes.io/projected/480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d-kube-api-access-z7r9z\") pod \"redhat-operators-xmhw6\" (UID: \"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d\") " pod="openshift-marketplace/redhat-operators-xmhw6" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.613045 4933 generic.go:334] "Generic (PLEG): container finished" podID="0992ece5-d7dd-40c1-adc4-12711a7b3b69" containerID="947cfa8c81af705b5addf0892ac881baf528d00ac7c27a078325dea971669f52" exitCode=0 Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.613227 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6cws8" event={"ID":"0992ece5-d7dd-40c1-adc4-12711a7b3b69","Type":"ContainerDied","Data":"947cfa8c81af705b5addf0892ac881baf528d00ac7c27a078325dea971669f52"} Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.613537 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6cws8" event={"ID":"0992ece5-d7dd-40c1-adc4-12711a7b3b69","Type":"ContainerStarted","Data":"e8bc1bb5924f2fe5aaeaa451bfe67e1c84ecdd22205846ad32f15fb0926a8035"} Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.626658 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0902347a-c5e2-4891-812b-cfe6efc32261-metrics-certs\") pod \"network-metrics-daemon-t8rgm\" (UID: \"0902347a-c5e2-4891-812b-cfe6efc32261\") " pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.652291 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.659415 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t8rgm" Jan 22 05:48:33 crc kubenswrapper[4933]: W0122 05:48:33.665579 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod636a6786_8509_4ee8_9f85_daef3d422ee1.slice/crio-1ea10079e612b741900a9b87a42b09f59ffa36ab2df388c5b5f7b34a1f4c483e WatchSource:0}: Error finding container 1ea10079e612b741900a9b87a42b09f59ffa36ab2df388c5b5f7b34a1f4c483e: Status 404 returned error can't find the container with id 1ea10079e612b741900a9b87a42b09f59ffa36ab2df388c5b5f7b34a1f4c483e Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.712976 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d-catalog-content\") pod \"redhat-operators-xmhw6\" (UID: \"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d\") " pod="openshift-marketplace/redhat-operators-xmhw6" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.713050 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d-utilities\") pod \"redhat-operators-xmhw6\" (UID: \"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d\") " pod="openshift-marketplace/redhat-operators-xmhw6" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.713168 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7r9z\" (UniqueName: \"kubernetes.io/projected/480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d-kube-api-access-z7r9z\") pod \"redhat-operators-xmhw6\" (UID: \"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d\") " pod="openshift-marketplace/redhat-operators-xmhw6" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.714263 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d-catalog-content\") pod \"redhat-operators-xmhw6\" (UID: \"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d\") " pod="openshift-marketplace/redhat-operators-xmhw6" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.714692 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d-utilities\") pod \"redhat-operators-xmhw6\" (UID: \"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d\") " pod="openshift-marketplace/redhat-operators-xmhw6" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.755729 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7r9z\" (UniqueName: \"kubernetes.io/projected/480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d-kube-api-access-z7r9z\") pod \"redhat-operators-xmhw6\" (UID: \"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d\") " pod="openshift-marketplace/redhat-operators-xmhw6" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.790891 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xtxwj"] Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.909754 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xmhw6" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.913292 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wm42f"] Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.914235 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wm42f" Jan 22 05:48:33 crc kubenswrapper[4933]: I0122 05:48:33.950290 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wm42f"] Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.020588 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50afe38c-f6b1-422d-9abd-b3a62bc7c24d-utilities\") pod \"redhat-operators-wm42f\" (UID: \"50afe38c-f6b1-422d-9abd-b3a62bc7c24d\") " pod="openshift-marketplace/redhat-operators-wm42f" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.020884 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50afe38c-f6b1-422d-9abd-b3a62bc7c24d-catalog-content\") pod \"redhat-operators-wm42f\" (UID: \"50afe38c-f6b1-422d-9abd-b3a62bc7c24d\") " pod="openshift-marketplace/redhat-operators-wm42f" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.021001 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krh9x\" (UniqueName: \"kubernetes.io/projected/50afe38c-f6b1-422d-9abd-b3a62bc7c24d-kube-api-access-krh9x\") pod \"redhat-operators-wm42f\" (UID: \"50afe38c-f6b1-422d-9abd-b3a62bc7c24d\") " pod="openshift-marketplace/redhat-operators-wm42f" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.057200 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-t8rgm"] Jan 22 05:48:34 crc kubenswrapper[4933]: W0122 05:48:34.100496 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0902347a_c5e2_4891_812b_cfe6efc32261.slice/crio-88344fff1c1d9294466371942d0a79b232912331b379c708eee15e0509e8aebe WatchSource:0}: Error finding container 88344fff1c1d9294466371942d0a79b232912331b379c708eee15e0509e8aebe: Status 404 returned error can't find the container with id 88344fff1c1d9294466371942d0a79b232912331b379c708eee15e0509e8aebe Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.134600 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50afe38c-f6b1-422d-9abd-b3a62bc7c24d-utilities\") pod \"redhat-operators-wm42f\" (UID: \"50afe38c-f6b1-422d-9abd-b3a62bc7c24d\") " pod="openshift-marketplace/redhat-operators-wm42f" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.134629 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50afe38c-f6b1-422d-9abd-b3a62bc7c24d-catalog-content\") pod \"redhat-operators-wm42f\" (UID: \"50afe38c-f6b1-422d-9abd-b3a62bc7c24d\") " pod="openshift-marketplace/redhat-operators-wm42f" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.134666 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krh9x\" (UniqueName: \"kubernetes.io/projected/50afe38c-f6b1-422d-9abd-b3a62bc7c24d-kube-api-access-krh9x\") pod \"redhat-operators-wm42f\" (UID: \"50afe38c-f6b1-422d-9abd-b3a62bc7c24d\") " pod="openshift-marketplace/redhat-operators-wm42f" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.136947 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50afe38c-f6b1-422d-9abd-b3a62bc7c24d-catalog-content\") pod \"redhat-operators-wm42f\" (UID: \"50afe38c-f6b1-422d-9abd-b3a62bc7c24d\") " pod="openshift-marketplace/redhat-operators-wm42f" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.137024 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50afe38c-f6b1-422d-9abd-b3a62bc7c24d-utilities\") pod \"redhat-operators-wm42f\" (UID: \"50afe38c-f6b1-422d-9abd-b3a62bc7c24d\") " pod="openshift-marketplace/redhat-operators-wm42f" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.159034 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krh9x\" (UniqueName: \"kubernetes.io/projected/50afe38c-f6b1-422d-9abd-b3a62bc7c24d-kube-api-access-krh9x\") pod \"redhat-operators-wm42f\" (UID: \"50afe38c-f6b1-422d-9abd-b3a62bc7c24d\") " pod="openshift-marketplace/redhat-operators-wm42f" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.206218 4933 patch_prober.go:28] interesting pod/router-default-5444994796-77xhn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:48:34 crc kubenswrapper[4933]: [-]has-synced failed: reason withheld Jan 22 05:48:34 crc kubenswrapper[4933]: [+]process-running ok Jan 22 05:48:34 crc kubenswrapper[4933]: healthz check failed Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.206274 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-77xhn" podUID="2860560e-929a-4dbe-84c0-23326bf7cdf4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.309846 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xmhw6"] Jan 22 05:48:34 crc kubenswrapper[4933]: W0122 05:48:34.315308 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod480c0b7c_fa2f_4a02_b29f_0fb1c05c2e2d.slice/crio-c28d8ebcd6eefb076f1eb828e3508c40964973525c8f928d92faebc43e21ef9b WatchSource:0}: Error finding container c28d8ebcd6eefb076f1eb828e3508c40964973525c8f928d92faebc43e21ef9b: Status 404 returned error can't find the container with id c28d8ebcd6eefb076f1eb828e3508c40964973525c8f928d92faebc43e21ef9b Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.318768 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wm42f" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.542367 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.543749 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.545699 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.550428 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.556104 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.629600 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-t8rgm" event={"ID":"0902347a-c5e2-4891-812b-cfe6efc32261","Type":"ContainerStarted","Data":"0457750b0082b0dd1260f67a30dba41dfd5f4d3adc7e9bd3ea0b00fb7fdf0079"} Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.629649 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-t8rgm" event={"ID":"0902347a-c5e2-4891-812b-cfe6efc32261","Type":"ContainerStarted","Data":"88344fff1c1d9294466371942d0a79b232912331b379c708eee15e0509e8aebe"} Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.643689 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xmhw6" event={"ID":"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d","Type":"ContainerStarted","Data":"c28d8ebcd6eefb076f1eb828e3508c40964973525c8f928d92faebc43e21ef9b"} Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.644266 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e0a663e1-62c0-4e9b-a2c0-bbd6de45542b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"e0a663e1-62c0-4e9b-a2c0-bbd6de45542b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.644419 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e0a663e1-62c0-4e9b-a2c0-bbd6de45542b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"e0a663e1-62c0-4e9b-a2c0-bbd6de45542b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.645344 4933 generic.go:334] "Generic (PLEG): container finished" podID="a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f" containerID="63f33c516f448cfbf67d2fd9f379ecb07a4b62ce9f54e3a39db9602ed8fbd4af" exitCode=0 Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.645410 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xtxwj" event={"ID":"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f","Type":"ContainerDied","Data":"63f33c516f448cfbf67d2fd9f379ecb07a4b62ce9f54e3a39db9602ed8fbd4af"} Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.645432 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xtxwj" event={"ID":"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f","Type":"ContainerStarted","Data":"32c22486d8b0317a2b00e11d0f89dbd8c031deb4d30ad86ace82290fe99b9a06"} Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.649668 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"636a6786-8509-4ee8-9f85-daef3d422ee1","Type":"ContainerStarted","Data":"74517f62acc69d76f69d5e7d80c0fd2658355b1c8e3239c53da5a3e7ff59bdba"} Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.649744 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"636a6786-8509-4ee8-9f85-daef3d422ee1","Type":"ContainerStarted","Data":"1ea10079e612b741900a9b87a42b09f59ffa36ab2df388c5b5f7b34a1f4c483e"} Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.748404 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e0a663e1-62c0-4e9b-a2c0-bbd6de45542b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"e0a663e1-62c0-4e9b-a2c0-bbd6de45542b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.748506 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e0a663e1-62c0-4e9b-a2c0-bbd6de45542b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"e0a663e1-62c0-4e9b-a2c0-bbd6de45542b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.750483 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e0a663e1-62c0-4e9b-a2c0-bbd6de45542b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"e0a663e1-62c0-4e9b-a2c0-bbd6de45542b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.800391 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e0a663e1-62c0-4e9b-a2c0-bbd6de45542b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"e0a663e1-62c0-4e9b-a2c0-bbd6de45542b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:48:34 crc kubenswrapper[4933]: I0122 05:48:34.887713 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:48:35 crc kubenswrapper[4933]: I0122 05:48:35.009938 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=3.009920378 podStartE2EDuration="3.009920378s" podCreationTimestamp="2026-01-22 05:48:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:34.68782964 +0000 UTC m=+162.524955003" watchObservedRunningTime="2026-01-22 05:48:35.009920378 +0000 UTC m=+162.847045731" Jan 22 05:48:35 crc kubenswrapper[4933]: I0122 05:48:35.011962 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wm42f"] Jan 22 05:48:35 crc kubenswrapper[4933]: I0122 05:48:35.207319 4933 patch_prober.go:28] interesting pod/router-default-5444994796-77xhn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:48:35 crc kubenswrapper[4933]: [-]has-synced failed: reason withheld Jan 22 05:48:35 crc kubenswrapper[4933]: [+]process-running ok Jan 22 05:48:35 crc kubenswrapper[4933]: healthz check failed Jan 22 05:48:35 crc kubenswrapper[4933]: I0122 05:48:35.208284 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-77xhn" podUID="2860560e-929a-4dbe-84c0-23326bf7cdf4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:48:35 crc kubenswrapper[4933]: I0122 05:48:35.413250 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:35 crc kubenswrapper[4933]: I0122 05:48:35.418639 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-llbq4" Jan 22 05:48:35 crc kubenswrapper[4933]: I0122 05:48:35.507207 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 22 05:48:35 crc kubenswrapper[4933]: I0122 05:48:35.685825 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"e0a663e1-62c0-4e9b-a2c0-bbd6de45542b","Type":"ContainerStarted","Data":"ddb09ad795a8c790e890e231e1e617dfbfce01774ea17749c961999bb4f91378"} Jan 22 05:48:35 crc kubenswrapper[4933]: I0122 05:48:35.688127 4933 generic.go:334] "Generic (PLEG): container finished" podID="50afe38c-f6b1-422d-9abd-b3a62bc7c24d" containerID="db255203f63d962c2cb73363896ab0267c2546a1e5b74b89b4b6bc28e60c8145" exitCode=0 Jan 22 05:48:35 crc kubenswrapper[4933]: I0122 05:48:35.688216 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wm42f" event={"ID":"50afe38c-f6b1-422d-9abd-b3a62bc7c24d","Type":"ContainerDied","Data":"db255203f63d962c2cb73363896ab0267c2546a1e5b74b89b4b6bc28e60c8145"} Jan 22 05:48:35 crc kubenswrapper[4933]: I0122 05:48:35.688236 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wm42f" event={"ID":"50afe38c-f6b1-422d-9abd-b3a62bc7c24d","Type":"ContainerStarted","Data":"5a3cb1e62d9d35bb5507bb5712e6a7dc98881887f77af4464f63fd7550750c06"} Jan 22 05:48:35 crc kubenswrapper[4933]: I0122 05:48:35.689955 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-t8rgm" event={"ID":"0902347a-c5e2-4891-812b-cfe6efc32261","Type":"ContainerStarted","Data":"f73bf74dbb4d2e7f4f4581bd12a191f2ecf63835d87ee4af711528edbe7b3ded"} Jan 22 05:48:35 crc kubenswrapper[4933]: I0122 05:48:35.691572 4933 generic.go:334] "Generic (PLEG): container finished" podID="480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d" containerID="85e636ac8af1c85c8d8df702289b2ed04fbbaf9c26247491f57c57a32cfa66f8" exitCode=0 Jan 22 05:48:35 crc kubenswrapper[4933]: I0122 05:48:35.691697 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xmhw6" event={"ID":"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d","Type":"ContainerDied","Data":"85e636ac8af1c85c8d8df702289b2ed04fbbaf9c26247491f57c57a32cfa66f8"} Jan 22 05:48:35 crc kubenswrapper[4933]: I0122 05:48:35.694506 4933 generic.go:334] "Generic (PLEG): container finished" podID="636a6786-8509-4ee8-9f85-daef3d422ee1" containerID="74517f62acc69d76f69d5e7d80c0fd2658355b1c8e3239c53da5a3e7ff59bdba" exitCode=0 Jan 22 05:48:35 crc kubenswrapper[4933]: I0122 05:48:35.694764 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"636a6786-8509-4ee8-9f85-daef3d422ee1","Type":"ContainerDied","Data":"74517f62acc69d76f69d5e7d80c0fd2658355b1c8e3239c53da5a3e7ff59bdba"} Jan 22 05:48:35 crc kubenswrapper[4933]: I0122 05:48:35.754637 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-t8rgm" podStartSLOduration=144.754611942 podStartE2EDuration="2m24.754611942s" podCreationTimestamp="2026-01-22 05:46:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:35.743205705 +0000 UTC m=+163.580331078" watchObservedRunningTime="2026-01-22 05:48:35.754611942 +0000 UTC m=+163.591737315" Jan 22 05:48:36 crc kubenswrapper[4933]: I0122 05:48:36.207255 4933 patch_prober.go:28] interesting pod/router-default-5444994796-77xhn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:48:36 crc kubenswrapper[4933]: [-]has-synced failed: reason withheld Jan 22 05:48:36 crc kubenswrapper[4933]: [+]process-running ok Jan 22 05:48:36 crc kubenswrapper[4933]: healthz check failed Jan 22 05:48:36 crc kubenswrapper[4933]: I0122 05:48:36.207614 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-77xhn" podUID="2860560e-929a-4dbe-84c0-23326bf7cdf4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:48:36 crc kubenswrapper[4933]: I0122 05:48:36.705260 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"e0a663e1-62c0-4e9b-a2c0-bbd6de45542b","Type":"ContainerStarted","Data":"c101fbde037b471798aa00291813489d646872ce14bfc093ede6701dad7df504"} Jan 22 05:48:36 crc kubenswrapper[4933]: I0122 05:48:36.720313 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.720296634 podStartE2EDuration="2.720296634s" podCreationTimestamp="2026-01-22 05:48:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:48:36.719445435 +0000 UTC m=+164.556570788" watchObservedRunningTime="2026-01-22 05:48:36.720296634 +0000 UTC m=+164.557421987" Jan 22 05:48:37 crc kubenswrapper[4933]: I0122 05:48:37.206711 4933 patch_prober.go:28] interesting pod/router-default-5444994796-77xhn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:48:37 crc kubenswrapper[4933]: [-]has-synced failed: reason withheld Jan 22 05:48:37 crc kubenswrapper[4933]: [+]process-running ok Jan 22 05:48:37 crc kubenswrapper[4933]: healthz check failed Jan 22 05:48:37 crc kubenswrapper[4933]: I0122 05:48:37.207052 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-77xhn" podUID="2860560e-929a-4dbe-84c0-23326bf7cdf4" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:48:37 crc kubenswrapper[4933]: I0122 05:48:37.227815 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:48:37 crc kubenswrapper[4933]: I0122 05:48:37.419045 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/636a6786-8509-4ee8-9f85-daef3d422ee1-kube-api-access\") pod \"636a6786-8509-4ee8-9f85-daef3d422ee1\" (UID: \"636a6786-8509-4ee8-9f85-daef3d422ee1\") " Jan 22 05:48:37 crc kubenswrapper[4933]: I0122 05:48:37.419132 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/636a6786-8509-4ee8-9f85-daef3d422ee1-kubelet-dir\") pod \"636a6786-8509-4ee8-9f85-daef3d422ee1\" (UID: \"636a6786-8509-4ee8-9f85-daef3d422ee1\") " Jan 22 05:48:37 crc kubenswrapper[4933]: I0122 05:48:37.422872 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/636a6786-8509-4ee8-9f85-daef3d422ee1-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "636a6786-8509-4ee8-9f85-daef3d422ee1" (UID: "636a6786-8509-4ee8-9f85-daef3d422ee1"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:48:37 crc kubenswrapper[4933]: I0122 05:48:37.428262 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/636a6786-8509-4ee8-9f85-daef3d422ee1-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "636a6786-8509-4ee8-9f85-daef3d422ee1" (UID: "636a6786-8509-4ee8-9f85-daef3d422ee1"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:48:37 crc kubenswrapper[4933]: I0122 05:48:37.528195 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/636a6786-8509-4ee8-9f85-daef3d422ee1-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 05:48:37 crc kubenswrapper[4933]: I0122 05:48:37.528234 4933 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/636a6786-8509-4ee8-9f85-daef3d422ee1-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:48:37 crc kubenswrapper[4933]: I0122 05:48:37.756561 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"636a6786-8509-4ee8-9f85-daef3d422ee1","Type":"ContainerDied","Data":"1ea10079e612b741900a9b87a42b09f59ffa36ab2df388c5b5f7b34a1f4c483e"} Jan 22 05:48:37 crc kubenswrapper[4933]: I0122 05:48:37.756597 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ea10079e612b741900a9b87a42b09f59ffa36ab2df388c5b5f7b34a1f4c483e" Jan 22 05:48:37 crc kubenswrapper[4933]: I0122 05:48:37.756700 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:48:37 crc kubenswrapper[4933]: I0122 05:48:37.761198 4933 generic.go:334] "Generic (PLEG): container finished" podID="e0a663e1-62c0-4e9b-a2c0-bbd6de45542b" containerID="c101fbde037b471798aa00291813489d646872ce14bfc093ede6701dad7df504" exitCode=0 Jan 22 05:48:37 crc kubenswrapper[4933]: I0122 05:48:37.761253 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"e0a663e1-62c0-4e9b-a2c0-bbd6de45542b","Type":"ContainerDied","Data":"c101fbde037b471798aa00291813489d646872ce14bfc093ede6701dad7df504"} Jan 22 05:48:38 crc kubenswrapper[4933]: I0122 05:48:38.215814 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:38 crc kubenswrapper[4933]: I0122 05:48:38.218821 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-77xhn" Jan 22 05:48:38 crc kubenswrapper[4933]: I0122 05:48:38.902162 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-jhjlm" Jan 22 05:48:39 crc kubenswrapper[4933]: I0122 05:48:39.355912 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:48:39 crc kubenswrapper[4933]: I0122 05:48:39.474165 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e0a663e1-62c0-4e9b-a2c0-bbd6de45542b-kube-api-access\") pod \"e0a663e1-62c0-4e9b-a2c0-bbd6de45542b\" (UID: \"e0a663e1-62c0-4e9b-a2c0-bbd6de45542b\") " Jan 22 05:48:39 crc kubenswrapper[4933]: I0122 05:48:39.474250 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e0a663e1-62c0-4e9b-a2c0-bbd6de45542b-kubelet-dir\") pod \"e0a663e1-62c0-4e9b-a2c0-bbd6de45542b\" (UID: \"e0a663e1-62c0-4e9b-a2c0-bbd6de45542b\") " Jan 22 05:48:39 crc kubenswrapper[4933]: I0122 05:48:39.474393 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e0a663e1-62c0-4e9b-a2c0-bbd6de45542b-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "e0a663e1-62c0-4e9b-a2c0-bbd6de45542b" (UID: "e0a663e1-62c0-4e9b-a2c0-bbd6de45542b"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:48:39 crc kubenswrapper[4933]: I0122 05:48:39.474797 4933 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e0a663e1-62c0-4e9b-a2c0-bbd6de45542b-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:48:39 crc kubenswrapper[4933]: I0122 05:48:39.479770 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0a663e1-62c0-4e9b-a2c0-bbd6de45542b-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e0a663e1-62c0-4e9b-a2c0-bbd6de45542b" (UID: "e0a663e1-62c0-4e9b-a2c0-bbd6de45542b"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:48:39 crc kubenswrapper[4933]: I0122 05:48:39.576635 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e0a663e1-62c0-4e9b-a2c0-bbd6de45542b-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 05:48:39 crc kubenswrapper[4933]: I0122 05:48:39.787668 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"e0a663e1-62c0-4e9b-a2c0-bbd6de45542b","Type":"ContainerDied","Data":"ddb09ad795a8c790e890e231e1e617dfbfce01774ea17749c961999bb4f91378"} Jan 22 05:48:39 crc kubenswrapper[4933]: I0122 05:48:39.787700 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ddb09ad795a8c790e890e231e1e617dfbfce01774ea17749c961999bb4f91378" Jan 22 05:48:39 crc kubenswrapper[4933]: I0122 05:48:39.787749 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:48:40 crc kubenswrapper[4933]: I0122 05:48:40.664934 4933 patch_prober.go:28] interesting pod/downloads-7954f5f757-tm764 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" start-of-body= Jan 22 05:48:40 crc kubenswrapper[4933]: I0122 05:48:40.665310 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-tm764" podUID="80358407-ad1b-499f-868f-44e3388b0fac" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" Jan 22 05:48:40 crc kubenswrapper[4933]: I0122 05:48:40.664946 4933 patch_prober.go:28] interesting pod/downloads-7954f5f757-tm764 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" start-of-body= Jan 22 05:48:40 crc kubenswrapper[4933]: I0122 05:48:40.665689 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-tm764" podUID="80358407-ad1b-499f-868f-44e3388b0fac" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" Jan 22 05:48:40 crc kubenswrapper[4933]: I0122 05:48:40.740045 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" Jan 22 05:48:40 crc kubenswrapper[4933]: I0122 05:48:40.905548 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:40 crc kubenswrapper[4933]: I0122 05:48:40.911677 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:48:40 crc kubenswrapper[4933]: I0122 05:48:40.943966 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:48:40 crc kubenswrapper[4933]: I0122 05:48:40.944023 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:48:44 crc kubenswrapper[4933]: I0122 05:48:44.584717 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-jt4v9"] Jan 22 05:48:44 crc kubenswrapper[4933]: I0122 05:48:44.585720 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" podUID="3af890e6-9547-4a96-8719-a7599d1b1701" containerName="controller-manager" containerID="cri-o://a216542fbb5e6883c5ea5f326e32538de556b3bd9ab6fad8f25875f42b06ce8b" gracePeriod=30 Jan 22 05:48:44 crc kubenswrapper[4933]: I0122 05:48:44.608730 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9"] Jan 22 05:48:44 crc kubenswrapper[4933]: I0122 05:48:44.608931 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" podUID="44595319-f5f2-4db3-9671-9c8680c2dfc7" containerName="route-controller-manager" containerID="cri-o://e30d8f80c85f5e587204ffacd864dfbbd2eaee05f711406e5fe051e7b167636b" gracePeriod=30 Jan 22 05:48:44 crc kubenswrapper[4933]: I0122 05:48:44.864833 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-cluster-samples-operator_cluster-samples-operator-665b6dd947-9j8zq_6b02d39a-2e64-4035-abf2-99dcc7f32194/cluster-samples-operator/0.log" Jan 22 05:48:44 crc kubenswrapper[4933]: I0122 05:48:44.864877 4933 generic.go:334] "Generic (PLEG): container finished" podID="6b02d39a-2e64-4035-abf2-99dcc7f32194" containerID="aab05ecaa26364b487e369a9587de68da3f1f462b33de1c2701da91852afa1b3" exitCode=2 Jan 22 05:48:44 crc kubenswrapper[4933]: I0122 05:48:44.864902 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9j8zq" event={"ID":"6b02d39a-2e64-4035-abf2-99dcc7f32194","Type":"ContainerDied","Data":"aab05ecaa26364b487e369a9587de68da3f1f462b33de1c2701da91852afa1b3"} Jan 22 05:48:44 crc kubenswrapper[4933]: I0122 05:48:44.865327 4933 scope.go:117] "RemoveContainer" containerID="aab05ecaa26364b487e369a9587de68da3f1f462b33de1c2701da91852afa1b3" Jan 22 05:48:45 crc kubenswrapper[4933]: I0122 05:48:45.876883 4933 generic.go:334] "Generic (PLEG): container finished" podID="44595319-f5f2-4db3-9671-9c8680c2dfc7" containerID="e30d8f80c85f5e587204ffacd864dfbbd2eaee05f711406e5fe051e7b167636b" exitCode=0 Jan 22 05:48:45 crc kubenswrapper[4933]: I0122 05:48:45.876976 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" event={"ID":"44595319-f5f2-4db3-9671-9c8680c2dfc7","Type":"ContainerDied","Data":"e30d8f80c85f5e587204ffacd864dfbbd2eaee05f711406e5fe051e7b167636b"} Jan 22 05:48:45 crc kubenswrapper[4933]: I0122 05:48:45.879371 4933 generic.go:334] "Generic (PLEG): container finished" podID="3af890e6-9547-4a96-8719-a7599d1b1701" containerID="a216542fbb5e6883c5ea5f326e32538de556b3bd9ab6fad8f25875f42b06ce8b" exitCode=0 Jan 22 05:48:45 crc kubenswrapper[4933]: I0122 05:48:45.879399 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" event={"ID":"3af890e6-9547-4a96-8719-a7599d1b1701","Type":"ContainerDied","Data":"a216542fbb5e6883c5ea5f326e32538de556b3bd9ab6fad8f25875f42b06ce8b"} Jan 22 05:48:49 crc kubenswrapper[4933]: I0122 05:48:49.499878 4933 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-jt4v9 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 22 05:48:49 crc kubenswrapper[4933]: I0122 05:48:49.500286 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" podUID="3af890e6-9547-4a96-8719-a7599d1b1701" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 22 05:48:50 crc kubenswrapper[4933]: I0122 05:48:50.691658 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-tm764" Jan 22 05:48:50 crc kubenswrapper[4933]: I0122 05:48:50.774041 4933 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-tzlk9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Jan 22 05:48:50 crc kubenswrapper[4933]: I0122 05:48:50.774127 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" podUID="44595319-f5f2-4db3-9671-9c8680c2dfc7" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Jan 22 05:48:51 crc kubenswrapper[4933]: I0122 05:48:51.035496 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.471875 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.500582 4933 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-jt4v9 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: i/o timeout" start-of-body= Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.500666 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" podUID="3af890e6-9547-4a96-8719-a7599d1b1701" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: i/o timeout" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.523826 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-579857c6c9-hc2bw"] Jan 22 05:49:00 crc kubenswrapper[4933]: E0122 05:49:00.524096 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3af890e6-9547-4a96-8719-a7599d1b1701" containerName="controller-manager" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.524111 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3af890e6-9547-4a96-8719-a7599d1b1701" containerName="controller-manager" Jan 22 05:49:00 crc kubenswrapper[4933]: E0122 05:49:00.524127 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0a663e1-62c0-4e9b-a2c0-bbd6de45542b" containerName="pruner" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.524134 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0a663e1-62c0-4e9b-a2c0-bbd6de45542b" containerName="pruner" Jan 22 05:49:00 crc kubenswrapper[4933]: E0122 05:49:00.524142 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="636a6786-8509-4ee8-9f85-daef3d422ee1" containerName="pruner" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.524148 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="636a6786-8509-4ee8-9f85-daef3d422ee1" containerName="pruner" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.524363 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0a663e1-62c0-4e9b-a2c0-bbd6de45542b" containerName="pruner" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.524381 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="3af890e6-9547-4a96-8719-a7599d1b1701" containerName="controller-manager" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.524390 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="636a6786-8509-4ee8-9f85-daef3d422ee1" containerName="pruner" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.524795 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.536200 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-579857c6c9-hc2bw"] Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.623990 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hc95z\" (UniqueName: \"kubernetes.io/projected/3af890e6-9547-4a96-8719-a7599d1b1701-kube-api-access-hc95z\") pod \"3af890e6-9547-4a96-8719-a7599d1b1701\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.624141 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3af890e6-9547-4a96-8719-a7599d1b1701-serving-cert\") pod \"3af890e6-9547-4a96-8719-a7599d1b1701\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.624203 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3af890e6-9547-4a96-8719-a7599d1b1701-config\") pod \"3af890e6-9547-4a96-8719-a7599d1b1701\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.624237 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3af890e6-9547-4a96-8719-a7599d1b1701-proxy-ca-bundles\") pod \"3af890e6-9547-4a96-8719-a7599d1b1701\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.624277 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3af890e6-9547-4a96-8719-a7599d1b1701-client-ca\") pod \"3af890e6-9547-4a96-8719-a7599d1b1701\" (UID: \"3af890e6-9547-4a96-8719-a7599d1b1701\") " Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.624906 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3af890e6-9547-4a96-8719-a7599d1b1701-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "3af890e6-9547-4a96-8719-a7599d1b1701" (UID: "3af890e6-9547-4a96-8719-a7599d1b1701"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.624991 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3af890e6-9547-4a96-8719-a7599d1b1701-config" (OuterVolumeSpecName: "config") pod "3af890e6-9547-4a96-8719-a7599d1b1701" (UID: "3af890e6-9547-4a96-8719-a7599d1b1701"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.625187 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3af890e6-9547-4a96-8719-a7599d1b1701-client-ca" (OuterVolumeSpecName: "client-ca") pod "3af890e6-9547-4a96-8719-a7599d1b1701" (UID: "3af890e6-9547-4a96-8719-a7599d1b1701"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.633774 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3af890e6-9547-4a96-8719-a7599d1b1701-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "3af890e6-9547-4a96-8719-a7599d1b1701" (UID: "3af890e6-9547-4a96-8719-a7599d1b1701"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.634428 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3af890e6-9547-4a96-8719-a7599d1b1701-kube-api-access-hc95z" (OuterVolumeSpecName: "kube-api-access-hc95z") pod "3af890e6-9547-4a96-8719-a7599d1b1701" (UID: "3af890e6-9547-4a96-8719-a7599d1b1701"). InnerVolumeSpecName "kube-api-access-hc95z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.726638 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82a649f3-2713-4ec8-aa69-6266ac6963d9-config\") pod \"controller-manager-579857c6c9-hc2bw\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.726754 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/82a649f3-2713-4ec8-aa69-6266ac6963d9-proxy-ca-bundles\") pod \"controller-manager-579857c6c9-hc2bw\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.728691 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/82a649f3-2713-4ec8-aa69-6266ac6963d9-client-ca\") pod \"controller-manager-579857c6c9-hc2bw\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.728836 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9d5n\" (UniqueName: \"kubernetes.io/projected/82a649f3-2713-4ec8-aa69-6266ac6963d9-kube-api-access-l9d5n\") pod \"controller-manager-579857c6c9-hc2bw\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.728864 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/82a649f3-2713-4ec8-aa69-6266ac6963d9-serving-cert\") pod \"controller-manager-579857c6c9-hc2bw\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.729008 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hc95z\" (UniqueName: \"kubernetes.io/projected/3af890e6-9547-4a96-8719-a7599d1b1701-kube-api-access-hc95z\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.729030 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3af890e6-9547-4a96-8719-a7599d1b1701-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.729047 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3af890e6-9547-4a96-8719-a7599d1b1701-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.729057 4933 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3af890e6-9547-4a96-8719-a7599d1b1701-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.729067 4933 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3af890e6-9547-4a96-8719-a7599d1b1701-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.773350 4933 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-tzlk9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.773439 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" podUID="44595319-f5f2-4db3-9671-9c8680c2dfc7" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.830462 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/82a649f3-2713-4ec8-aa69-6266ac6963d9-client-ca\") pod \"controller-manager-579857c6c9-hc2bw\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.830524 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9d5n\" (UniqueName: \"kubernetes.io/projected/82a649f3-2713-4ec8-aa69-6266ac6963d9-kube-api-access-l9d5n\") pod \"controller-manager-579857c6c9-hc2bw\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.830550 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/82a649f3-2713-4ec8-aa69-6266ac6963d9-serving-cert\") pod \"controller-manager-579857c6c9-hc2bw\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.830581 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82a649f3-2713-4ec8-aa69-6266ac6963d9-config\") pod \"controller-manager-579857c6c9-hc2bw\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.830624 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/82a649f3-2713-4ec8-aa69-6266ac6963d9-proxy-ca-bundles\") pod \"controller-manager-579857c6c9-hc2bw\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.832616 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/82a649f3-2713-4ec8-aa69-6266ac6963d9-proxy-ca-bundles\") pod \"controller-manager-579857c6c9-hc2bw\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.832687 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/82a649f3-2713-4ec8-aa69-6266ac6963d9-client-ca\") pod \"controller-manager-579857c6c9-hc2bw\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.833750 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82a649f3-2713-4ec8-aa69-6266ac6963d9-config\") pod \"controller-manager-579857c6c9-hc2bw\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.837914 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/82a649f3-2713-4ec8-aa69-6266ac6963d9-serving-cert\") pod \"controller-manager-579857c6c9-hc2bw\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.861877 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9d5n\" (UniqueName: \"kubernetes.io/projected/82a649f3-2713-4ec8-aa69-6266ac6963d9-kube-api-access-l9d5n\") pod \"controller-manager-579857c6c9-hc2bw\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.878206 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-q2rvb" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.968228 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" event={"ID":"3af890e6-9547-4a96-8719-a7599d1b1701","Type":"ContainerDied","Data":"61294885fbd6e819e78ae84d309fe1c301eb30c1a003b84022a822dd7505b42e"} Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.968286 4933 scope.go:117] "RemoveContainer" containerID="a216542fbb5e6883c5ea5f326e32538de556b3bd9ab6fad8f25875f42b06ce8b" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.968301 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-jt4v9" Jan 22 05:49:00 crc kubenswrapper[4933]: I0122 05:49:00.996630 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-jt4v9"] Jan 22 05:49:01 crc kubenswrapper[4933]: I0122 05:49:01.001001 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-jt4v9"] Jan 22 05:49:01 crc kubenswrapper[4933]: I0122 05:49:01.148551 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" Jan 22 05:49:02 crc kubenswrapper[4933]: I0122 05:49:02.500897 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3af890e6-9547-4a96-8719-a7599d1b1701" path="/var/lib/kubelet/pods/3af890e6-9547-4a96-8719-a7599d1b1701/volumes" Jan 22 05:49:04 crc kubenswrapper[4933]: I0122 05:49:04.522545 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-579857c6c9-hc2bw"] Jan 22 05:49:10 crc kubenswrapper[4933]: I0122 05:49:10.943906 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:49:10 crc kubenswrapper[4933]: I0122 05:49:10.944408 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:49:11 crc kubenswrapper[4933]: I0122 05:49:11.151228 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 22 05:49:11 crc kubenswrapper[4933]: I0122 05:49:11.152209 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:49:11 crc kubenswrapper[4933]: I0122 05:49:11.154588 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 22 05:49:11 crc kubenswrapper[4933]: I0122 05:49:11.154902 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 22 05:49:11 crc kubenswrapper[4933]: I0122 05:49:11.157054 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 22 05:49:11 crc kubenswrapper[4933]: I0122 05:49:11.191085 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/97d20e75-3505-42aa-af7e-3181eed5d8df-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"97d20e75-3505-42aa-af7e-3181eed5d8df\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:49:11 crc kubenswrapper[4933]: I0122 05:49:11.191157 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/97d20e75-3505-42aa-af7e-3181eed5d8df-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"97d20e75-3505-42aa-af7e-3181eed5d8df\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:49:11 crc kubenswrapper[4933]: E0122 05:49:11.281210 4933 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 22 05:49:11 crc kubenswrapper[4933]: E0122 05:49:11.281371 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vd4m5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-xtxwj_openshift-marketplace(a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 22 05:49:11 crc kubenswrapper[4933]: E0122 05:49:11.282541 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-xtxwj" podUID="a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f" Jan 22 05:49:11 crc kubenswrapper[4933]: I0122 05:49:11.291986 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/97d20e75-3505-42aa-af7e-3181eed5d8df-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"97d20e75-3505-42aa-af7e-3181eed5d8df\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:49:11 crc kubenswrapper[4933]: I0122 05:49:11.292028 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/97d20e75-3505-42aa-af7e-3181eed5d8df-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"97d20e75-3505-42aa-af7e-3181eed5d8df\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:49:11 crc kubenswrapper[4933]: I0122 05:49:11.292146 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/97d20e75-3505-42aa-af7e-3181eed5d8df-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"97d20e75-3505-42aa-af7e-3181eed5d8df\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:49:11 crc kubenswrapper[4933]: I0122 05:49:11.310990 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/97d20e75-3505-42aa-af7e-3181eed5d8df-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"97d20e75-3505-42aa-af7e-3181eed5d8df\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:49:11 crc kubenswrapper[4933]: I0122 05:49:11.480815 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:49:11 crc kubenswrapper[4933]: I0122 05:49:11.773730 4933 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-tzlk9 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 22 05:49:11 crc kubenswrapper[4933]: I0122 05:49:11.773799 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" podUID="44595319-f5f2-4db3-9671-9c8680c2dfc7" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 22 05:49:11 crc kubenswrapper[4933]: E0122 05:49:11.900396 4933 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 22 05:49:11 crc kubenswrapper[4933]: E0122 05:49:11.900830 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-krh9x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-wm42f_openshift-marketplace(50afe38c-f6b1-422d-9abd-b3a62bc7c24d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 22 05:49:11 crc kubenswrapper[4933]: E0122 05:49:11.902029 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-wm42f" podUID="50afe38c-f6b1-422d-9abd-b3a62bc7c24d" Jan 22 05:49:11 crc kubenswrapper[4933]: E0122 05:49:11.916373 4933 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 22 05:49:11 crc kubenswrapper[4933]: E0122 05:49:11.916518 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-v6fb4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-6cws8_openshift-marketplace(0992ece5-d7dd-40c1-adc4-12711a7b3b69): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 22 05:49:11 crc kubenswrapper[4933]: E0122 05:49:11.917680 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-6cws8" podUID="0992ece5-d7dd-40c1-adc4-12711a7b3b69" Jan 22 05:49:11 crc kubenswrapper[4933]: E0122 05:49:11.970951 4933 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 22 05:49:11 crc kubenswrapper[4933]: E0122 05:49:11.971120 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fpsgr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-qxtfm_openshift-marketplace(ec6f4762-c94a-4c73-a84f-469729ae7bae): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 22 05:49:11 crc kubenswrapper[4933]: E0122 05:49:11.972390 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-qxtfm" podUID="ec6f4762-c94a-4c73-a84f-469729ae7bae" Jan 22 05:49:13 crc kubenswrapper[4933]: E0122 05:49:13.441939 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-xtxwj" podUID="a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f" Jan 22 05:49:13 crc kubenswrapper[4933]: E0122 05:49:13.442406 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-qxtfm" podUID="ec6f4762-c94a-4c73-a84f-469729ae7bae" Jan 22 05:49:13 crc kubenswrapper[4933]: E0122 05:49:13.442413 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-6cws8" podUID="0992ece5-d7dd-40c1-adc4-12711a7b3b69" Jan 22 05:49:13 crc kubenswrapper[4933]: E0122 05:49:13.442410 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-wm42f" podUID="50afe38c-f6b1-422d-9abd-b3a62bc7c24d" Jan 22 05:49:13 crc kubenswrapper[4933]: E0122 05:49:13.509726 4933 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 22 05:49:13 crc kubenswrapper[4933]: E0122 05:49:13.509892 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9cl67,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-4748k_openshift-marketplace(fb89501a-64c9-4b40-84cc-e18896cb53ec): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 22 05:49:13 crc kubenswrapper[4933]: E0122 05:49:13.514173 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-4748k" podUID="fb89501a-64c9-4b40-84cc-e18896cb53ec" Jan 22 05:49:13 crc kubenswrapper[4933]: E0122 05:49:13.524927 4933 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 22 05:49:13 crc kubenswrapper[4933]: E0122 05:49:13.525112 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sgjnd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-jcpdb_openshift-marketplace(4c343d48-14c7-4862-ab0a-7851d4e0e72a): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 22 05:49:13 crc kubenswrapper[4933]: E0122 05:49:13.526286 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-jcpdb" podUID="4c343d48-14c7-4862-ab0a-7851d4e0e72a" Jan 22 05:49:13 crc kubenswrapper[4933]: E0122 05:49:13.538737 4933 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 22 05:49:13 crc kubenswrapper[4933]: E0122 05:49:13.538893 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6skvb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-jrc2t_openshift-marketplace(731fa9cf-182b-4086-8696-ec971095cb38): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 22 05:49:13 crc kubenswrapper[4933]: E0122 05:49:13.540049 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-jrc2t" podUID="731fa9cf-182b-4086-8696-ec971095cb38" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.578860 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.620563 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k"] Jan 22 05:49:13 crc kubenswrapper[4933]: E0122 05:49:13.621236 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44595319-f5f2-4db3-9671-9c8680c2dfc7" containerName="route-controller-manager" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.621252 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="44595319-f5f2-4db3-9671-9c8680c2dfc7" containerName="route-controller-manager" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.621348 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="44595319-f5f2-4db3-9671-9c8680c2dfc7" containerName="route-controller-manager" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.629201 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k"] Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.629291 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.720518 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/44595319-f5f2-4db3-9671-9c8680c2dfc7-serving-cert\") pod \"44595319-f5f2-4db3-9671-9c8680c2dfc7\" (UID: \"44595319-f5f2-4db3-9671-9c8680c2dfc7\") " Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.720914 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/44595319-f5f2-4db3-9671-9c8680c2dfc7-client-ca\") pod \"44595319-f5f2-4db3-9671-9c8680c2dfc7\" (UID: \"44595319-f5f2-4db3-9671-9c8680c2dfc7\") " Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.720949 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44595319-f5f2-4db3-9671-9c8680c2dfc7-config\") pod \"44595319-f5f2-4db3-9671-9c8680c2dfc7\" (UID: \"44595319-f5f2-4db3-9671-9c8680c2dfc7\") " Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.720991 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sn4l5\" (UniqueName: \"kubernetes.io/projected/44595319-f5f2-4db3-9671-9c8680c2dfc7-kube-api-access-sn4l5\") pod \"44595319-f5f2-4db3-9671-9c8680c2dfc7\" (UID: \"44595319-f5f2-4db3-9671-9c8680c2dfc7\") " Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.721229 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/184eb57b-71b7-4d7a-a3bf-7866425be9ce-serving-cert\") pod \"route-controller-manager-7d4564854f-sxx4k\" (UID: \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\") " pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.721271 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/184eb57b-71b7-4d7a-a3bf-7866425be9ce-client-ca\") pod \"route-controller-manager-7d4564854f-sxx4k\" (UID: \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\") " pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.721378 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29j8x\" (UniqueName: \"kubernetes.io/projected/184eb57b-71b7-4d7a-a3bf-7866425be9ce-kube-api-access-29j8x\") pod \"route-controller-manager-7d4564854f-sxx4k\" (UID: \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\") " pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.721443 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/184eb57b-71b7-4d7a-a3bf-7866425be9ce-config\") pod \"route-controller-manager-7d4564854f-sxx4k\" (UID: \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\") " pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.722066 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44595319-f5f2-4db3-9671-9c8680c2dfc7-client-ca" (OuterVolumeSpecName: "client-ca") pod "44595319-f5f2-4db3-9671-9c8680c2dfc7" (UID: "44595319-f5f2-4db3-9671-9c8680c2dfc7"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.722168 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44595319-f5f2-4db3-9671-9c8680c2dfc7-config" (OuterVolumeSpecName: "config") pod "44595319-f5f2-4db3-9671-9c8680c2dfc7" (UID: "44595319-f5f2-4db3-9671-9c8680c2dfc7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.733226 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44595319-f5f2-4db3-9671-9c8680c2dfc7-kube-api-access-sn4l5" (OuterVolumeSpecName: "kube-api-access-sn4l5") pod "44595319-f5f2-4db3-9671-9c8680c2dfc7" (UID: "44595319-f5f2-4db3-9671-9c8680c2dfc7"). InnerVolumeSpecName "kube-api-access-sn4l5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.733226 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44595319-f5f2-4db3-9671-9c8680c2dfc7-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "44595319-f5f2-4db3-9671-9c8680c2dfc7" (UID: "44595319-f5f2-4db3-9671-9c8680c2dfc7"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.795817 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 22 05:49:13 crc kubenswrapper[4933]: W0122 05:49:13.801807 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod97d20e75_3505_42aa_af7e_3181eed5d8df.slice/crio-70560fcb782d9afcf327dccd433691551d36f95d8f1710190173239cc962db7e WatchSource:0}: Error finding container 70560fcb782d9afcf327dccd433691551d36f95d8f1710190173239cc962db7e: Status 404 returned error can't find the container with id 70560fcb782d9afcf327dccd433691551d36f95d8f1710190173239cc962db7e Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.822406 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29j8x\" (UniqueName: \"kubernetes.io/projected/184eb57b-71b7-4d7a-a3bf-7866425be9ce-kube-api-access-29j8x\") pod \"route-controller-manager-7d4564854f-sxx4k\" (UID: \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\") " pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.822467 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/184eb57b-71b7-4d7a-a3bf-7866425be9ce-config\") pod \"route-controller-manager-7d4564854f-sxx4k\" (UID: \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\") " pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.822512 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/184eb57b-71b7-4d7a-a3bf-7866425be9ce-serving-cert\") pod \"route-controller-manager-7d4564854f-sxx4k\" (UID: \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\") " pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.822532 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/184eb57b-71b7-4d7a-a3bf-7866425be9ce-client-ca\") pod \"route-controller-manager-7d4564854f-sxx4k\" (UID: \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\") " pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.822611 4933 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/44595319-f5f2-4db3-9671-9c8680c2dfc7-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.822622 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44595319-f5f2-4db3-9671-9c8680c2dfc7-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.822632 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sn4l5\" (UniqueName: \"kubernetes.io/projected/44595319-f5f2-4db3-9671-9c8680c2dfc7-kube-api-access-sn4l5\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.823367 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/184eb57b-71b7-4d7a-a3bf-7866425be9ce-client-ca\") pod \"route-controller-manager-7d4564854f-sxx4k\" (UID: \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\") " pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.823428 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/44595319-f5f2-4db3-9671-9c8680c2dfc7-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.823726 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/184eb57b-71b7-4d7a-a3bf-7866425be9ce-config\") pod \"route-controller-manager-7d4564854f-sxx4k\" (UID: \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\") " pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.826050 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/184eb57b-71b7-4d7a-a3bf-7866425be9ce-serving-cert\") pod \"route-controller-manager-7d4564854f-sxx4k\" (UID: \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\") " pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.837244 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29j8x\" (UniqueName: \"kubernetes.io/projected/184eb57b-71b7-4d7a-a3bf-7866425be9ce-kube-api-access-29j8x\") pod \"route-controller-manager-7d4564854f-sxx4k\" (UID: \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\") " pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" Jan 22 05:49:13 crc kubenswrapper[4933]: I0122 05:49:13.917159 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-579857c6c9-hc2bw"] Jan 22 05:49:13 crc kubenswrapper[4933]: W0122 05:49:13.924821 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod82a649f3_2713_4ec8_aa69_6266ac6963d9.slice/crio-ace6f9a5ead90d15865161464dfdd3433a3d024f12565cd7e0c4e03b1d67a4bf WatchSource:0}: Error finding container ace6f9a5ead90d15865161464dfdd3433a3d024f12565cd7e0c4e03b1d67a4bf: Status 404 returned error can't find the container with id ace6f9a5ead90d15865161464dfdd3433a3d024f12565cd7e0c4e03b1d67a4bf Jan 22 05:49:14 crc kubenswrapper[4933]: I0122 05:49:14.005355 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" Jan 22 05:49:14 crc kubenswrapper[4933]: I0122 05:49:14.040637 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xmhw6" event={"ID":"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d","Type":"ContainerStarted","Data":"eda1389c004388f74d26f6548a9539440547b1a26f39d0f4dc8f2c7356c2d42a"} Jan 22 05:49:14 crc kubenswrapper[4933]: I0122 05:49:14.046470 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-cluster-samples-operator_cluster-samples-operator-665b6dd947-9j8zq_6b02d39a-2e64-4035-abf2-99dcc7f32194/cluster-samples-operator/0.log" Jan 22 05:49:14 crc kubenswrapper[4933]: I0122 05:49:14.046558 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9j8zq" event={"ID":"6b02d39a-2e64-4035-abf2-99dcc7f32194","Type":"ContainerStarted","Data":"f980c769ee8e9754fd34d87ecbc4533f764c1ca1874dc76ca2ea984ea562cc7f"} Jan 22 05:49:14 crc kubenswrapper[4933]: I0122 05:49:14.060029 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"97d20e75-3505-42aa-af7e-3181eed5d8df","Type":"ContainerStarted","Data":"70560fcb782d9afcf327dccd433691551d36f95d8f1710190173239cc962db7e"} Jan 22 05:49:14 crc kubenswrapper[4933]: I0122 05:49:14.063310 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" event={"ID":"82a649f3-2713-4ec8-aa69-6266ac6963d9","Type":"ContainerStarted","Data":"ace6f9a5ead90d15865161464dfdd3433a3d024f12565cd7e0c4e03b1d67a4bf"} Jan 22 05:49:14 crc kubenswrapper[4933]: I0122 05:49:14.065448 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" event={"ID":"44595319-f5f2-4db3-9671-9c8680c2dfc7","Type":"ContainerDied","Data":"6d32637900b1a1c19ced0771c46d6d20697304dbe8d93d0a7d36597627c65d6d"} Jan 22 05:49:14 crc kubenswrapper[4933]: I0122 05:49:14.065486 4933 scope.go:117] "RemoveContainer" containerID="e30d8f80c85f5e587204ffacd864dfbbd2eaee05f711406e5fe051e7b167636b" Jan 22 05:49:14 crc kubenswrapper[4933]: I0122 05:49:14.065773 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9" Jan 22 05:49:14 crc kubenswrapper[4933]: E0122 05:49:14.075595 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-4748k" podUID="fb89501a-64c9-4b40-84cc-e18896cb53ec" Jan 22 05:49:14 crc kubenswrapper[4933]: E0122 05:49:14.075595 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-jcpdb" podUID="4c343d48-14c7-4862-ab0a-7851d4e0e72a" Jan 22 05:49:14 crc kubenswrapper[4933]: E0122 05:49:14.075783 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-jrc2t" podUID="731fa9cf-182b-4086-8696-ec971095cb38" Jan 22 05:49:14 crc kubenswrapper[4933]: I0122 05:49:14.163296 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9"] Jan 22 05:49:14 crc kubenswrapper[4933]: I0122 05:49:14.168833 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-tzlk9"] Jan 22 05:49:14 crc kubenswrapper[4933]: I0122 05:49:14.424304 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k"] Jan 22 05:49:14 crc kubenswrapper[4933]: I0122 05:49:14.497558 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44595319-f5f2-4db3-9671-9c8680c2dfc7" path="/var/lib/kubelet/pods/44595319-f5f2-4db3-9671-9c8680c2dfc7/volumes" Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.073746 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" event={"ID":"184eb57b-71b7-4d7a-a3bf-7866425be9ce","Type":"ContainerStarted","Data":"02f569da839993006f7c87e1ba712c93c8734ff40c310b7b626741863619736a"} Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.074324 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.074355 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" event={"ID":"184eb57b-71b7-4d7a-a3bf-7866425be9ce","Type":"ContainerStarted","Data":"f27d6793df71d620b2ee7a821516d4e8dcc4e3b5f0bbf274d2d8d9d4228e895b"} Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.075297 4933 generic.go:334] "Generic (PLEG): container finished" podID="97d20e75-3505-42aa-af7e-3181eed5d8df" containerID="111ab42e3197357f98d8fa8394f4789ef2e1124d3e7aebc8ada148677f8676fd" exitCode=0 Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.075371 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"97d20e75-3505-42aa-af7e-3181eed5d8df","Type":"ContainerDied","Data":"111ab42e3197357f98d8fa8394f4789ef2e1124d3e7aebc8ada148677f8676fd"} Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.077639 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" event={"ID":"82a649f3-2713-4ec8-aa69-6266ac6963d9","Type":"ContainerStarted","Data":"2771222ddaf40dd91c508f15a135dae908853d73999e9c998cba33a538cd998b"} Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.078320 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" podUID="82a649f3-2713-4ec8-aa69-6266ac6963d9" containerName="controller-manager" containerID="cri-o://2771222ddaf40dd91c508f15a135dae908853d73999e9c998cba33a538cd998b" gracePeriod=30 Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.081422 4933 generic.go:334] "Generic (PLEG): container finished" podID="480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d" containerID="eda1389c004388f74d26f6548a9539440547b1a26f39d0f4dc8f2c7356c2d42a" exitCode=0 Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.081476 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xmhw6" event={"ID":"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d","Type":"ContainerDied","Data":"eda1389c004388f74d26f6548a9539440547b1a26f39d0f4dc8f2c7356c2d42a"} Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.096771 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" podStartSLOduration=11.096755865 podStartE2EDuration="11.096755865s" podCreationTimestamp="2026-01-22 05:49:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:49:15.095252352 +0000 UTC m=+202.932377745" watchObservedRunningTime="2026-01-22 05:49:15.096755865 +0000 UTC m=+202.933881218" Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.097675 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.155280 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" podStartSLOduration=31.155259385 podStartE2EDuration="31.155259385s" podCreationTimestamp="2026-01-22 05:48:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:49:15.151572486 +0000 UTC m=+202.988697839" watchObservedRunningTime="2026-01-22 05:49:15.155259385 +0000 UTC m=+202.992384738" Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.444119 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.544099 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/82a649f3-2713-4ec8-aa69-6266ac6963d9-client-ca\") pod \"82a649f3-2713-4ec8-aa69-6266ac6963d9\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.544333 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82a649f3-2713-4ec8-aa69-6266ac6963d9-config\") pod \"82a649f3-2713-4ec8-aa69-6266ac6963d9\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.544408 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9d5n\" (UniqueName: \"kubernetes.io/projected/82a649f3-2713-4ec8-aa69-6266ac6963d9-kube-api-access-l9d5n\") pod \"82a649f3-2713-4ec8-aa69-6266ac6963d9\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.544449 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/82a649f3-2713-4ec8-aa69-6266ac6963d9-serving-cert\") pod \"82a649f3-2713-4ec8-aa69-6266ac6963d9\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.544480 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/82a649f3-2713-4ec8-aa69-6266ac6963d9-proxy-ca-bundles\") pod \"82a649f3-2713-4ec8-aa69-6266ac6963d9\" (UID: \"82a649f3-2713-4ec8-aa69-6266ac6963d9\") " Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.544899 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82a649f3-2713-4ec8-aa69-6266ac6963d9-client-ca" (OuterVolumeSpecName: "client-ca") pod "82a649f3-2713-4ec8-aa69-6266ac6963d9" (UID: "82a649f3-2713-4ec8-aa69-6266ac6963d9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.545270 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82a649f3-2713-4ec8-aa69-6266ac6963d9-config" (OuterVolumeSpecName: "config") pod "82a649f3-2713-4ec8-aa69-6266ac6963d9" (UID: "82a649f3-2713-4ec8-aa69-6266ac6963d9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.545368 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82a649f3-2713-4ec8-aa69-6266ac6963d9-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "82a649f3-2713-4ec8-aa69-6266ac6963d9" (UID: "82a649f3-2713-4ec8-aa69-6266ac6963d9"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.550241 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82a649f3-2713-4ec8-aa69-6266ac6963d9-kube-api-access-l9d5n" (OuterVolumeSpecName: "kube-api-access-l9d5n") pod "82a649f3-2713-4ec8-aa69-6266ac6963d9" (UID: "82a649f3-2713-4ec8-aa69-6266ac6963d9"). InnerVolumeSpecName "kube-api-access-l9d5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.552191 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82a649f3-2713-4ec8-aa69-6266ac6963d9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "82a649f3-2713-4ec8-aa69-6266ac6963d9" (UID: "82a649f3-2713-4ec8-aa69-6266ac6963d9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.646721 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9d5n\" (UniqueName: \"kubernetes.io/projected/82a649f3-2713-4ec8-aa69-6266ac6963d9-kube-api-access-l9d5n\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.646786 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/82a649f3-2713-4ec8-aa69-6266ac6963d9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.646801 4933 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/82a649f3-2713-4ec8-aa69-6266ac6963d9-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.646814 4933 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/82a649f3-2713-4ec8-aa69-6266ac6963d9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.646832 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82a649f3-2713-4ec8-aa69-6266ac6963d9-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.990275 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-79df4745c9-vdhq9"] Jan 22 05:49:15 crc kubenswrapper[4933]: E0122 05:49:15.990852 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82a649f3-2713-4ec8-aa69-6266ac6963d9" containerName="controller-manager" Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.990869 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="82a649f3-2713-4ec8-aa69-6266ac6963d9" containerName="controller-manager" Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.990983 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="82a649f3-2713-4ec8-aa69-6266ac6963d9" containerName="controller-manager" Jan 22 05:49:15 crc kubenswrapper[4933]: I0122 05:49:15.991426 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.003800 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-79df4745c9-vdhq9"] Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.091302 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xmhw6" event={"ID":"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d","Type":"ContainerStarted","Data":"7e176891d2476c96df95c5ff737fa32f2b5e3eb921918aad3e10b42fedaf5029"} Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.092809 4933 generic.go:334] "Generic (PLEG): container finished" podID="82a649f3-2713-4ec8-aa69-6266ac6963d9" containerID="2771222ddaf40dd91c508f15a135dae908853d73999e9c998cba33a538cd998b" exitCode=0 Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.092912 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" event={"ID":"82a649f3-2713-4ec8-aa69-6266ac6963d9","Type":"ContainerDied","Data":"2771222ddaf40dd91c508f15a135dae908853d73999e9c998cba33a538cd998b"} Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.092911 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.092982 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-579857c6c9-hc2bw" event={"ID":"82a649f3-2713-4ec8-aa69-6266ac6963d9","Type":"ContainerDied","Data":"ace6f9a5ead90d15865161464dfdd3433a3d024f12565cd7e0c4e03b1d67a4bf"} Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.093030 4933 scope.go:117] "RemoveContainer" containerID="2771222ddaf40dd91c508f15a135dae908853d73999e9c998cba33a538cd998b" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.110867 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xmhw6" podStartSLOduration=3.310362633 podStartE2EDuration="43.110851194s" podCreationTimestamp="2026-01-22 05:48:33 +0000 UTC" firstStartedPulling="2026-01-22 05:48:35.711712841 +0000 UTC m=+163.548838194" lastFinishedPulling="2026-01-22 05:49:15.512201392 +0000 UTC m=+203.349326755" observedRunningTime="2026-01-22 05:49:16.105926118 +0000 UTC m=+203.943051471" watchObservedRunningTime="2026-01-22 05:49:16.110851194 +0000 UTC m=+203.947976547" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.114108 4933 scope.go:117] "RemoveContainer" containerID="2771222ddaf40dd91c508f15a135dae908853d73999e9c998cba33a538cd998b" Jan 22 05:49:16 crc kubenswrapper[4933]: E0122 05:49:16.117757 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2771222ddaf40dd91c508f15a135dae908853d73999e9c998cba33a538cd998b\": container with ID starting with 2771222ddaf40dd91c508f15a135dae908853d73999e9c998cba33a538cd998b not found: ID does not exist" containerID="2771222ddaf40dd91c508f15a135dae908853d73999e9c998cba33a538cd998b" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.117797 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2771222ddaf40dd91c508f15a135dae908853d73999e9c998cba33a538cd998b"} err="failed to get container status \"2771222ddaf40dd91c508f15a135dae908853d73999e9c998cba33a538cd998b\": rpc error: code = NotFound desc = could not find container \"2771222ddaf40dd91c508f15a135dae908853d73999e9c998cba33a538cd998b\": container with ID starting with 2771222ddaf40dd91c508f15a135dae908853d73999e9c998cba33a538cd998b not found: ID does not exist" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.127653 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-579857c6c9-hc2bw"] Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.131006 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-579857c6c9-hc2bw"] Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.153837 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-client-ca\") pod \"controller-manager-79df4745c9-vdhq9\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.153902 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-serving-cert\") pod \"controller-manager-79df4745c9-vdhq9\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.153975 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-config\") pod \"controller-manager-79df4745c9-vdhq9\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.154084 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-proxy-ca-bundles\") pod \"controller-manager-79df4745c9-vdhq9\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.154165 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfjkw\" (UniqueName: \"kubernetes.io/projected/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-kube-api-access-gfjkw\") pod \"controller-manager-79df4745c9-vdhq9\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.254959 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-config\") pod \"controller-manager-79df4745c9-vdhq9\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.255010 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-proxy-ca-bundles\") pod \"controller-manager-79df4745c9-vdhq9\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.255043 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfjkw\" (UniqueName: \"kubernetes.io/projected/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-kube-api-access-gfjkw\") pod \"controller-manager-79df4745c9-vdhq9\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.255138 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-client-ca\") pod \"controller-manager-79df4745c9-vdhq9\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.255161 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-serving-cert\") pod \"controller-manager-79df4745c9-vdhq9\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.257322 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-config\") pod \"controller-manager-79df4745c9-vdhq9\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.257412 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-proxy-ca-bundles\") pod \"controller-manager-79df4745c9-vdhq9\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.257591 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-client-ca\") pod \"controller-manager-79df4745c9-vdhq9\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.276705 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfjkw\" (UniqueName: \"kubernetes.io/projected/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-kube-api-access-gfjkw\") pod \"controller-manager-79df4745c9-vdhq9\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.279401 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-serving-cert\") pod \"controller-manager-79df4745c9-vdhq9\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.321115 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.324082 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.458138 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/97d20e75-3505-42aa-af7e-3181eed5d8df-kube-api-access\") pod \"97d20e75-3505-42aa-af7e-3181eed5d8df\" (UID: \"97d20e75-3505-42aa-af7e-3181eed5d8df\") " Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.458221 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/97d20e75-3505-42aa-af7e-3181eed5d8df-kubelet-dir\") pod \"97d20e75-3505-42aa-af7e-3181eed5d8df\" (UID: \"97d20e75-3505-42aa-af7e-3181eed5d8df\") " Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.458623 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/97d20e75-3505-42aa-af7e-3181eed5d8df-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "97d20e75-3505-42aa-af7e-3181eed5d8df" (UID: "97d20e75-3505-42aa-af7e-3181eed5d8df"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.464413 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97d20e75-3505-42aa-af7e-3181eed5d8df-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "97d20e75-3505-42aa-af7e-3181eed5d8df" (UID: "97d20e75-3505-42aa-af7e-3181eed5d8df"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.502665 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82a649f3-2713-4ec8-aa69-6266ac6963d9" path="/var/lib/kubelet/pods/82a649f3-2713-4ec8-aa69-6266ac6963d9/volumes" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.509532 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-79df4745c9-vdhq9"] Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.560184 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/97d20e75-3505-42aa-af7e-3181eed5d8df-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:16 crc kubenswrapper[4933]: I0122 05:49:16.560497 4933 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/97d20e75-3505-42aa-af7e-3181eed5d8df-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.100988 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"97d20e75-3505-42aa-af7e-3181eed5d8df","Type":"ContainerDied","Data":"70560fcb782d9afcf327dccd433691551d36f95d8f1710190173239cc962db7e"} Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.101534 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="70560fcb782d9afcf327dccd433691551d36f95d8f1710190173239cc962db7e" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.101198 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.102268 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" event={"ID":"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a","Type":"ContainerStarted","Data":"a7647b42d6a1caaf92d46216fa89085113b26a5f6107e4a8677e9fcf61761e39"} Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.102322 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" event={"ID":"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a","Type":"ContainerStarted","Data":"25e346971e1e795f7ca6bed81eebf16435a979d1824cfb3cdc7ce4ab697fe597"} Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.125384 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" podStartSLOduration=13.125364402 podStartE2EDuration="13.125364402s" podCreationTimestamp="2026-01-22 05:49:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:49:17.12250878 +0000 UTC m=+204.959634143" watchObservedRunningTime="2026-01-22 05:49:17.125364402 +0000 UTC m=+204.962489755" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.137755 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 22 05:49:17 crc kubenswrapper[4933]: E0122 05:49:17.137986 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97d20e75-3505-42aa-af7e-3181eed5d8df" containerName="pruner" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.138003 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="97d20e75-3505-42aa-af7e-3181eed5d8df" containerName="pruner" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.138149 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="97d20e75-3505-42aa-af7e-3181eed5d8df" containerName="pruner" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.138498 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.143175 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.143188 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.151345 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.271787 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/520b2813-3aaf-46c7-9592-f8a8be0f9348-kubelet-dir\") pod \"installer-9-crc\" (UID: \"520b2813-3aaf-46c7-9592-f8a8be0f9348\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.271856 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/520b2813-3aaf-46c7-9592-f8a8be0f9348-kube-api-access\") pod \"installer-9-crc\" (UID: \"520b2813-3aaf-46c7-9592-f8a8be0f9348\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.271999 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/520b2813-3aaf-46c7-9592-f8a8be0f9348-var-lock\") pod \"installer-9-crc\" (UID: \"520b2813-3aaf-46c7-9592-f8a8be0f9348\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.373407 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/520b2813-3aaf-46c7-9592-f8a8be0f9348-kubelet-dir\") pod \"installer-9-crc\" (UID: \"520b2813-3aaf-46c7-9592-f8a8be0f9348\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.373462 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/520b2813-3aaf-46c7-9592-f8a8be0f9348-kubelet-dir\") pod \"installer-9-crc\" (UID: \"520b2813-3aaf-46c7-9592-f8a8be0f9348\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.373489 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/520b2813-3aaf-46c7-9592-f8a8be0f9348-kube-api-access\") pod \"installer-9-crc\" (UID: \"520b2813-3aaf-46c7-9592-f8a8be0f9348\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.373627 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/520b2813-3aaf-46c7-9592-f8a8be0f9348-var-lock\") pod \"installer-9-crc\" (UID: \"520b2813-3aaf-46c7-9592-f8a8be0f9348\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.373793 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/520b2813-3aaf-46c7-9592-f8a8be0f9348-var-lock\") pod \"installer-9-crc\" (UID: \"520b2813-3aaf-46c7-9592-f8a8be0f9348\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.392096 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/520b2813-3aaf-46c7-9592-f8a8be0f9348-kube-api-access\") pod \"installer-9-crc\" (UID: \"520b2813-3aaf-46c7-9592-f8a8be0f9348\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.455387 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:49:17 crc kubenswrapper[4933]: I0122 05:49:17.879057 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 22 05:49:17 crc kubenswrapper[4933]: W0122 05:49:17.897665 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod520b2813_3aaf_46c7_9592_f8a8be0f9348.slice/crio-b42b6bf777c6b4f4420e7a693f18b8d4e6b2000ef07802fe62b727b59198555c WatchSource:0}: Error finding container b42b6bf777c6b4f4420e7a693f18b8d4e6b2000ef07802fe62b727b59198555c: Status 404 returned error can't find the container with id b42b6bf777c6b4f4420e7a693f18b8d4e6b2000ef07802fe62b727b59198555c Jan 22 05:49:18 crc kubenswrapper[4933]: I0122 05:49:18.107347 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"520b2813-3aaf-46c7-9592-f8a8be0f9348","Type":"ContainerStarted","Data":"b42b6bf777c6b4f4420e7a693f18b8d4e6b2000ef07802fe62b727b59198555c"} Jan 22 05:49:18 crc kubenswrapper[4933]: I0122 05:49:18.107590 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:18 crc kubenswrapper[4933]: I0122 05:49:18.114596 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:19 crc kubenswrapper[4933]: I0122 05:49:19.114223 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"520b2813-3aaf-46c7-9592-f8a8be0f9348","Type":"ContainerStarted","Data":"235bfc9f43ca57736b6d0f8b326f71afe935d8e82f1ec74f6f22f78ac099f3e8"} Jan 22 05:49:19 crc kubenswrapper[4933]: I0122 05:49:19.129136 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.129116254 podStartE2EDuration="2.129116254s" podCreationTimestamp="2026-01-22 05:49:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:49:19.128496881 +0000 UTC m=+206.965622234" watchObservedRunningTime="2026-01-22 05:49:19.129116254 +0000 UTC m=+206.966241617" Jan 22 05:49:23 crc kubenswrapper[4933]: I0122 05:49:23.910171 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xmhw6" Jan 22 05:49:23 crc kubenswrapper[4933]: I0122 05:49:23.910508 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xmhw6" Jan 22 05:49:23 crc kubenswrapper[4933]: I0122 05:49:23.990503 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xmhw6" Jan 22 05:49:24 crc kubenswrapper[4933]: I0122 05:49:24.214378 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xmhw6" Jan 22 05:49:30 crc kubenswrapper[4933]: I0122 05:49:30.184216 4933 generic.go:334] "Generic (PLEG): container finished" podID="50afe38c-f6b1-422d-9abd-b3a62bc7c24d" containerID="c9031d798e82a5fa3599fc702b438c2e8b1ffd3d3d8d8db2a3afe44acd20edc1" exitCode=0 Jan 22 05:49:30 crc kubenswrapper[4933]: I0122 05:49:30.184366 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wm42f" event={"ID":"50afe38c-f6b1-422d-9abd-b3a62bc7c24d","Type":"ContainerDied","Data":"c9031d798e82a5fa3599fc702b438c2e8b1ffd3d3d8d8db2a3afe44acd20edc1"} Jan 22 05:49:30 crc kubenswrapper[4933]: I0122 05:49:30.191419 4933 generic.go:334] "Generic (PLEG): container finished" podID="0992ece5-d7dd-40c1-adc4-12711a7b3b69" containerID="567daa808d7de70d9ba86adefc69c207e7ad743f7887e8965ef3eda716462bf3" exitCode=0 Jan 22 05:49:30 crc kubenswrapper[4933]: I0122 05:49:30.191510 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6cws8" event={"ID":"0992ece5-d7dd-40c1-adc4-12711a7b3b69","Type":"ContainerDied","Data":"567daa808d7de70d9ba86adefc69c207e7ad743f7887e8965ef3eda716462bf3"} Jan 22 05:49:30 crc kubenswrapper[4933]: I0122 05:49:30.195058 4933 generic.go:334] "Generic (PLEG): container finished" podID="ec6f4762-c94a-4c73-a84f-469729ae7bae" containerID="b8c7dcef9a346edddb43b8a85eae7c2faadaa6d5aea7bdcdffb0e5c145835f3f" exitCode=0 Jan 22 05:49:30 crc kubenswrapper[4933]: I0122 05:49:30.195452 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxtfm" event={"ID":"ec6f4762-c94a-4c73-a84f-469729ae7bae","Type":"ContainerDied","Data":"b8c7dcef9a346edddb43b8a85eae7c2faadaa6d5aea7bdcdffb0e5c145835f3f"} Jan 22 05:49:30 crc kubenswrapper[4933]: I0122 05:49:30.198330 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jcpdb" event={"ID":"4c343d48-14c7-4862-ab0a-7851d4e0e72a","Type":"ContainerStarted","Data":"a4a6a7dbe6e4a3fd1f1a9286a2ffe87c9e796865e88e22307b69863ef9e258ef"} Jan 22 05:49:30 crc kubenswrapper[4933]: I0122 05:49:30.202934 4933 generic.go:334] "Generic (PLEG): container finished" podID="a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f" containerID="715aa553a2abde76fe5858b77b9b86abc29ca28ab4f25da9e8dc7a1f079a1dd8" exitCode=0 Jan 22 05:49:30 crc kubenswrapper[4933]: I0122 05:49:30.202957 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xtxwj" event={"ID":"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f","Type":"ContainerDied","Data":"715aa553a2abde76fe5858b77b9b86abc29ca28ab4f25da9e8dc7a1f079a1dd8"} Jan 22 05:49:31 crc kubenswrapper[4933]: I0122 05:49:31.210107 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xtxwj" event={"ID":"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f","Type":"ContainerStarted","Data":"c3514de8978030c12e5592fa0edefbcf4fabf23e091cd3dfb8a0d3830195e9fc"} Jan 22 05:49:31 crc kubenswrapper[4933]: I0122 05:49:31.212911 4933 generic.go:334] "Generic (PLEG): container finished" podID="731fa9cf-182b-4086-8696-ec971095cb38" containerID="b2f47ae385e1cc4aecf7a2abe8e44a6e27e442b38d44110ab854dd2030619395" exitCode=0 Jan 22 05:49:31 crc kubenswrapper[4933]: I0122 05:49:31.213119 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jrc2t" event={"ID":"731fa9cf-182b-4086-8696-ec971095cb38","Type":"ContainerDied","Data":"b2f47ae385e1cc4aecf7a2abe8e44a6e27e442b38d44110ab854dd2030619395"} Jan 22 05:49:31 crc kubenswrapper[4933]: I0122 05:49:31.217067 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wm42f" event={"ID":"50afe38c-f6b1-422d-9abd-b3a62bc7c24d","Type":"ContainerStarted","Data":"28d7966e9e748d44d12e3c1e22d24651fe34c7d9f4237da2fc42be30652415e6"} Jan 22 05:49:31 crc kubenswrapper[4933]: I0122 05:49:31.220300 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6cws8" event={"ID":"0992ece5-d7dd-40c1-adc4-12711a7b3b69","Type":"ContainerStarted","Data":"4851553e5c57ce9097722f4c14d6ea50a54af0d57ca603b5f5b44985a51fa584"} Jan 22 05:49:31 crc kubenswrapper[4933]: I0122 05:49:31.222361 4933 generic.go:334] "Generic (PLEG): container finished" podID="fb89501a-64c9-4b40-84cc-e18896cb53ec" containerID="87d30d4c36286b302611a006df0aa2cb5bfc2a61353d30d26b5d40bd09bdd931" exitCode=0 Jan 22 05:49:31 crc kubenswrapper[4933]: I0122 05:49:31.222412 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4748k" event={"ID":"fb89501a-64c9-4b40-84cc-e18896cb53ec","Type":"ContainerDied","Data":"87d30d4c36286b302611a006df0aa2cb5bfc2a61353d30d26b5d40bd09bdd931"} Jan 22 05:49:31 crc kubenswrapper[4933]: I0122 05:49:31.229124 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxtfm" event={"ID":"ec6f4762-c94a-4c73-a84f-469729ae7bae","Type":"ContainerStarted","Data":"af68a8ac02eef998264dc15be2dc378a128bbf6fb1cbb9ebe692f30ccb8f7bb1"} Jan 22 05:49:31 crc kubenswrapper[4933]: I0122 05:49:31.230866 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xtxwj" podStartSLOduration=3.305745214 podStartE2EDuration="59.230846301s" podCreationTimestamp="2026-01-22 05:48:32 +0000 UTC" firstStartedPulling="2026-01-22 05:48:34.691746771 +0000 UTC m=+162.528872134" lastFinishedPulling="2026-01-22 05:49:30.616847868 +0000 UTC m=+218.453973221" observedRunningTime="2026-01-22 05:49:31.228670504 +0000 UTC m=+219.065795857" watchObservedRunningTime="2026-01-22 05:49:31.230846301 +0000 UTC m=+219.067971654" Jan 22 05:49:31 crc kubenswrapper[4933]: I0122 05:49:31.233991 4933 generic.go:334] "Generic (PLEG): container finished" podID="4c343d48-14c7-4862-ab0a-7851d4e0e72a" containerID="a4a6a7dbe6e4a3fd1f1a9286a2ffe87c9e796865e88e22307b69863ef9e258ef" exitCode=0 Jan 22 05:49:31 crc kubenswrapper[4933]: I0122 05:49:31.234022 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jcpdb" event={"ID":"4c343d48-14c7-4862-ab0a-7851d4e0e72a","Type":"ContainerDied","Data":"a4a6a7dbe6e4a3fd1f1a9286a2ffe87c9e796865e88e22307b69863ef9e258ef"} Jan 22 05:49:31 crc kubenswrapper[4933]: I0122 05:49:31.234048 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jcpdb" event={"ID":"4c343d48-14c7-4862-ab0a-7851d4e0e72a","Type":"ContainerStarted","Data":"21079466bf7b8a1df4fe564f4f8e3cb054f0591b1723e899a0eddcee7e88c730"} Jan 22 05:49:31 crc kubenswrapper[4933]: I0122 05:49:31.270645 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6cws8" podStartSLOduration=2.184101153 podStartE2EDuration="59.270624727s" podCreationTimestamp="2026-01-22 05:48:32 +0000 UTC" firstStartedPulling="2026-01-22 05:48:33.615782074 +0000 UTC m=+161.452907427" lastFinishedPulling="2026-01-22 05:49:30.702305648 +0000 UTC m=+218.539431001" observedRunningTime="2026-01-22 05:49:31.250674188 +0000 UTC m=+219.087799541" watchObservedRunningTime="2026-01-22 05:49:31.270624727 +0000 UTC m=+219.107750080" Jan 22 05:49:31 crc kubenswrapper[4933]: I0122 05:49:31.291035 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wm42f" podStartSLOduration=3.238714002 podStartE2EDuration="58.291019907s" podCreationTimestamp="2026-01-22 05:48:33 +0000 UTC" firstStartedPulling="2026-01-22 05:48:35.690593858 +0000 UTC m=+163.527719211" lastFinishedPulling="2026-01-22 05:49:30.742899763 +0000 UTC m=+218.580025116" observedRunningTime="2026-01-22 05:49:31.289185677 +0000 UTC m=+219.126311040" watchObservedRunningTime="2026-01-22 05:49:31.291019907 +0000 UTC m=+219.128145260" Jan 22 05:49:31 crc kubenswrapper[4933]: I0122 05:49:31.328422 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qxtfm" podStartSLOduration=3.241776469 podStartE2EDuration="1m1.328406452s" podCreationTimestamp="2026-01-22 05:48:30 +0000 UTC" firstStartedPulling="2026-01-22 05:48:32.586322433 +0000 UTC m=+160.423447786" lastFinishedPulling="2026-01-22 05:49:30.672952416 +0000 UTC m=+218.510077769" observedRunningTime="2026-01-22 05:49:31.327346379 +0000 UTC m=+219.164471732" watchObservedRunningTime="2026-01-22 05:49:31.328406452 +0000 UTC m=+219.165531805" Jan 22 05:49:32 crc kubenswrapper[4933]: I0122 05:49:32.241391 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4748k" event={"ID":"fb89501a-64c9-4b40-84cc-e18896cb53ec","Type":"ContainerStarted","Data":"3dccfe84321f2e56719d65746e4198e9a2b41a0d48cb1e3b5fc193c49de4943b"} Jan 22 05:49:32 crc kubenswrapper[4933]: I0122 05:49:32.243625 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jrc2t" event={"ID":"731fa9cf-182b-4086-8696-ec971095cb38","Type":"ContainerStarted","Data":"28b73509fabe11a2c0e49a768463c846ebc5d5a14e82a1a07c1ce57d6cc8fc2b"} Jan 22 05:49:32 crc kubenswrapper[4933]: I0122 05:49:32.264371 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jcpdb" podStartSLOduration=4.181534859 podStartE2EDuration="1m2.264356618s" podCreationTimestamp="2026-01-22 05:48:30 +0000 UTC" firstStartedPulling="2026-01-22 05:48:32.554189443 +0000 UTC m=+160.391314796" lastFinishedPulling="2026-01-22 05:49:30.637011202 +0000 UTC m=+218.474136555" observedRunningTime="2026-01-22 05:49:31.34734635 +0000 UTC m=+219.184471723" watchObservedRunningTime="2026-01-22 05:49:32.264356618 +0000 UTC m=+220.101481971" Jan 22 05:49:32 crc kubenswrapper[4933]: I0122 05:49:32.266277 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4748k" podStartSLOduration=3.236743535 podStartE2EDuration="1m2.266268859s" podCreationTimestamp="2026-01-22 05:48:30 +0000 UTC" firstStartedPulling="2026-01-22 05:48:32.600429002 +0000 UTC m=+160.437554355" lastFinishedPulling="2026-01-22 05:49:31.629954316 +0000 UTC m=+219.467079679" observedRunningTime="2026-01-22 05:49:32.263483589 +0000 UTC m=+220.100608942" watchObservedRunningTime="2026-01-22 05:49:32.266268859 +0000 UTC m=+220.103394212" Jan 22 05:49:32 crc kubenswrapper[4933]: I0122 05:49:32.283417 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jrc2t" podStartSLOduration=3.208534759 podStartE2EDuration="1m2.283384637s" podCreationTimestamp="2026-01-22 05:48:30 +0000 UTC" firstStartedPulling="2026-01-22 05:48:32.562820724 +0000 UTC m=+160.399946067" lastFinishedPulling="2026-01-22 05:49:31.637670582 +0000 UTC m=+219.474795945" observedRunningTime="2026-01-22 05:49:32.276623493 +0000 UTC m=+220.113748846" watchObservedRunningTime="2026-01-22 05:49:32.283384637 +0000 UTC m=+220.120510030" Jan 22 05:49:32 crc kubenswrapper[4933]: I0122 05:49:32.658718 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6cws8" Jan 22 05:49:32 crc kubenswrapper[4933]: I0122 05:49:32.659284 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6cws8" Jan 22 05:49:33 crc kubenswrapper[4933]: I0122 05:49:33.082681 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xtxwj" Jan 22 05:49:33 crc kubenswrapper[4933]: I0122 05:49:33.082762 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xtxwj" Jan 22 05:49:33 crc kubenswrapper[4933]: I0122 05:49:33.121262 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xtxwj" Jan 22 05:49:33 crc kubenswrapper[4933]: I0122 05:49:33.714804 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-6cws8" podUID="0992ece5-d7dd-40c1-adc4-12711a7b3b69" containerName="registry-server" probeResult="failure" output=< Jan 22 05:49:33 crc kubenswrapper[4933]: timeout: failed to connect service ":50051" within 1s Jan 22 05:49:33 crc kubenswrapper[4933]: > Jan 22 05:49:34 crc kubenswrapper[4933]: I0122 05:49:34.319543 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wm42f" Jan 22 05:49:34 crc kubenswrapper[4933]: I0122 05:49:34.319587 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wm42f" Jan 22 05:49:35 crc kubenswrapper[4933]: I0122 05:49:35.356103 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wm42f" podUID="50afe38c-f6b1-422d-9abd-b3a62bc7c24d" containerName="registry-server" probeResult="failure" output=< Jan 22 05:49:35 crc kubenswrapper[4933]: timeout: failed to connect service ":50051" within 1s Jan 22 05:49:35 crc kubenswrapper[4933]: > Jan 22 05:49:40 crc kubenswrapper[4933]: I0122 05:49:40.676367 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qxtfm" Jan 22 05:49:40 crc kubenswrapper[4933]: I0122 05:49:40.676410 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qxtfm" Jan 22 05:49:40 crc kubenswrapper[4933]: I0122 05:49:40.743850 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qxtfm" Jan 22 05:49:40 crc kubenswrapper[4933]: I0122 05:49:40.931153 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jcpdb" Jan 22 05:49:40 crc kubenswrapper[4933]: I0122 05:49:40.931278 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jcpdb" Jan 22 05:49:40 crc kubenswrapper[4933]: I0122 05:49:40.943494 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:49:40 crc kubenswrapper[4933]: I0122 05:49:40.943550 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:49:40 crc kubenswrapper[4933]: I0122 05:49:40.943599 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 05:49:40 crc kubenswrapper[4933]: I0122 05:49:40.944240 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 05:49:40 crc kubenswrapper[4933]: I0122 05:49:40.944287 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0" gracePeriod=600 Jan 22 05:49:40 crc kubenswrapper[4933]: I0122 05:49:40.971650 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jcpdb" Jan 22 05:49:41 crc kubenswrapper[4933]: I0122 05:49:41.047312 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jrc2t" Jan 22 05:49:41 crc kubenswrapper[4933]: I0122 05:49:41.047363 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jrc2t" Jan 22 05:49:41 crc kubenswrapper[4933]: I0122 05:49:41.119732 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jrc2t" Jan 22 05:49:41 crc kubenswrapper[4933]: I0122 05:49:41.284046 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4748k" Jan 22 05:49:41 crc kubenswrapper[4933]: I0122 05:49:41.284311 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4748k" Jan 22 05:49:41 crc kubenswrapper[4933]: I0122 05:49:41.328595 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4748k" Jan 22 05:49:41 crc kubenswrapper[4933]: I0122 05:49:41.358539 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qxtfm" Jan 22 05:49:41 crc kubenswrapper[4933]: I0122 05:49:41.364139 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jrc2t" Jan 22 05:49:41 crc kubenswrapper[4933]: I0122 05:49:41.366582 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jcpdb" Jan 22 05:49:42 crc kubenswrapper[4933]: I0122 05:49:42.377517 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4748k" Jan 22 05:49:42 crc kubenswrapper[4933]: I0122 05:49:42.574746 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jrc2t"] Jan 22 05:49:42 crc kubenswrapper[4933]: I0122 05:49:42.703389 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6cws8" Jan 22 05:49:42 crc kubenswrapper[4933]: I0122 05:49:42.763518 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6cws8" Jan 22 05:49:43 crc kubenswrapper[4933]: I0122 05:49:43.136947 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xtxwj" Jan 22 05:49:43 crc kubenswrapper[4933]: I0122 05:49:43.320923 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0" exitCode=0 Jan 22 05:49:43 crc kubenswrapper[4933]: I0122 05:49:43.321164 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0"} Jan 22 05:49:43 crc kubenswrapper[4933]: I0122 05:49:43.321243 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"f2ae10bf6f7d37b27e86b0e464e488c6851c87577378f41c92c7954310583b11"} Jan 22 05:49:43 crc kubenswrapper[4933]: I0122 05:49:43.321364 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jrc2t" podUID="731fa9cf-182b-4086-8696-ec971095cb38" containerName="registry-server" containerID="cri-o://28b73509fabe11a2c0e49a768463c846ebc5d5a14e82a1a07c1ce57d6cc8fc2b" gracePeriod=2 Jan 22 05:49:43 crc kubenswrapper[4933]: I0122 05:49:43.573846 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4748k"] Jan 22 05:49:43 crc kubenswrapper[4933]: I0122 05:49:43.866631 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jrc2t" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.067563 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/731fa9cf-182b-4086-8696-ec971095cb38-utilities\") pod \"731fa9cf-182b-4086-8696-ec971095cb38\" (UID: \"731fa9cf-182b-4086-8696-ec971095cb38\") " Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.067629 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/731fa9cf-182b-4086-8696-ec971095cb38-catalog-content\") pod \"731fa9cf-182b-4086-8696-ec971095cb38\" (UID: \"731fa9cf-182b-4086-8696-ec971095cb38\") " Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.067661 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6skvb\" (UniqueName: \"kubernetes.io/projected/731fa9cf-182b-4086-8696-ec971095cb38-kube-api-access-6skvb\") pod \"731fa9cf-182b-4086-8696-ec971095cb38\" (UID: \"731fa9cf-182b-4086-8696-ec971095cb38\") " Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.069699 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/731fa9cf-182b-4086-8696-ec971095cb38-utilities" (OuterVolumeSpecName: "utilities") pod "731fa9cf-182b-4086-8696-ec971095cb38" (UID: "731fa9cf-182b-4086-8696-ec971095cb38"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.076738 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/731fa9cf-182b-4086-8696-ec971095cb38-kube-api-access-6skvb" (OuterVolumeSpecName: "kube-api-access-6skvb") pod "731fa9cf-182b-4086-8696-ec971095cb38" (UID: "731fa9cf-182b-4086-8696-ec971095cb38"). InnerVolumeSpecName "kube-api-access-6skvb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.132092 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/731fa9cf-182b-4086-8696-ec971095cb38-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "731fa9cf-182b-4086-8696-ec971095cb38" (UID: "731fa9cf-182b-4086-8696-ec971095cb38"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.169415 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/731fa9cf-182b-4086-8696-ec971095cb38-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.169466 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/731fa9cf-182b-4086-8696-ec971095cb38-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.169481 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6skvb\" (UniqueName: \"kubernetes.io/projected/731fa9cf-182b-4086-8696-ec971095cb38-kube-api-access-6skvb\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.332815 4933 generic.go:334] "Generic (PLEG): container finished" podID="731fa9cf-182b-4086-8696-ec971095cb38" containerID="28b73509fabe11a2c0e49a768463c846ebc5d5a14e82a1a07c1ce57d6cc8fc2b" exitCode=0 Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.332940 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jrc2t" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.332936 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jrc2t" event={"ID":"731fa9cf-182b-4086-8696-ec971095cb38","Type":"ContainerDied","Data":"28b73509fabe11a2c0e49a768463c846ebc5d5a14e82a1a07c1ce57d6cc8fc2b"} Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.333015 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jrc2t" event={"ID":"731fa9cf-182b-4086-8696-ec971095cb38","Type":"ContainerDied","Data":"aeb8c7ea5b29fb9c1ba769c8a630b2311e7e47142ba30266823b13028c291c1a"} Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.333045 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4748k" podUID="fb89501a-64c9-4b40-84cc-e18896cb53ec" containerName="registry-server" containerID="cri-o://3dccfe84321f2e56719d65746e4198e9a2b41a0d48cb1e3b5fc193c49de4943b" gracePeriod=2 Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.333056 4933 scope.go:117] "RemoveContainer" containerID="28b73509fabe11a2c0e49a768463c846ebc5d5a14e82a1a07c1ce57d6cc8fc2b" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.362001 4933 scope.go:117] "RemoveContainer" containerID="b2f47ae385e1cc4aecf7a2abe8e44a6e27e442b38d44110ab854dd2030619395" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.370590 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jrc2t"] Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.373635 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jrc2t"] Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.393536 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wm42f" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.398367 4933 scope.go:117] "RemoveContainer" containerID="52157e67eabae2582e3276c15218656d53da92eccff79d8bc890fc4ee72a40c7" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.413738 4933 scope.go:117] "RemoveContainer" containerID="28b73509fabe11a2c0e49a768463c846ebc5d5a14e82a1a07c1ce57d6cc8fc2b" Jan 22 05:49:44 crc kubenswrapper[4933]: E0122 05:49:44.414135 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28b73509fabe11a2c0e49a768463c846ebc5d5a14e82a1a07c1ce57d6cc8fc2b\": container with ID starting with 28b73509fabe11a2c0e49a768463c846ebc5d5a14e82a1a07c1ce57d6cc8fc2b not found: ID does not exist" containerID="28b73509fabe11a2c0e49a768463c846ebc5d5a14e82a1a07c1ce57d6cc8fc2b" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.414181 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28b73509fabe11a2c0e49a768463c846ebc5d5a14e82a1a07c1ce57d6cc8fc2b"} err="failed to get container status \"28b73509fabe11a2c0e49a768463c846ebc5d5a14e82a1a07c1ce57d6cc8fc2b\": rpc error: code = NotFound desc = could not find container \"28b73509fabe11a2c0e49a768463c846ebc5d5a14e82a1a07c1ce57d6cc8fc2b\": container with ID starting with 28b73509fabe11a2c0e49a768463c846ebc5d5a14e82a1a07c1ce57d6cc8fc2b not found: ID does not exist" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.414220 4933 scope.go:117] "RemoveContainer" containerID="b2f47ae385e1cc4aecf7a2abe8e44a6e27e442b38d44110ab854dd2030619395" Jan 22 05:49:44 crc kubenswrapper[4933]: E0122 05:49:44.415771 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2f47ae385e1cc4aecf7a2abe8e44a6e27e442b38d44110ab854dd2030619395\": container with ID starting with b2f47ae385e1cc4aecf7a2abe8e44a6e27e442b38d44110ab854dd2030619395 not found: ID does not exist" containerID="b2f47ae385e1cc4aecf7a2abe8e44a6e27e442b38d44110ab854dd2030619395" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.415810 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2f47ae385e1cc4aecf7a2abe8e44a6e27e442b38d44110ab854dd2030619395"} err="failed to get container status \"b2f47ae385e1cc4aecf7a2abe8e44a6e27e442b38d44110ab854dd2030619395\": rpc error: code = NotFound desc = could not find container \"b2f47ae385e1cc4aecf7a2abe8e44a6e27e442b38d44110ab854dd2030619395\": container with ID starting with b2f47ae385e1cc4aecf7a2abe8e44a6e27e442b38d44110ab854dd2030619395 not found: ID does not exist" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.415840 4933 scope.go:117] "RemoveContainer" containerID="52157e67eabae2582e3276c15218656d53da92eccff79d8bc890fc4ee72a40c7" Jan 22 05:49:44 crc kubenswrapper[4933]: E0122 05:49:44.416207 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52157e67eabae2582e3276c15218656d53da92eccff79d8bc890fc4ee72a40c7\": container with ID starting with 52157e67eabae2582e3276c15218656d53da92eccff79d8bc890fc4ee72a40c7 not found: ID does not exist" containerID="52157e67eabae2582e3276c15218656d53da92eccff79d8bc890fc4ee72a40c7" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.416250 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52157e67eabae2582e3276c15218656d53da92eccff79d8bc890fc4ee72a40c7"} err="failed to get container status \"52157e67eabae2582e3276c15218656d53da92eccff79d8bc890fc4ee72a40c7\": rpc error: code = NotFound desc = could not find container \"52157e67eabae2582e3276c15218656d53da92eccff79d8bc890fc4ee72a40c7\": container with ID starting with 52157e67eabae2582e3276c15218656d53da92eccff79d8bc890fc4ee72a40c7 not found: ID does not exist" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.452588 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wm42f" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.498141 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="731fa9cf-182b-4086-8696-ec971095cb38" path="/var/lib/kubelet/pods/731fa9cf-182b-4086-8696-ec971095cb38/volumes" Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.528832 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-79df4745c9-vdhq9"] Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.529092 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" podUID="a93d1ec5-c71f-4a9b-994e-f3b8de60f12a" containerName="controller-manager" containerID="cri-o://a7647b42d6a1caaf92d46216fa89085113b26a5f6107e4a8677e9fcf61761e39" gracePeriod=30 Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.631700 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k"] Jan 22 05:49:44 crc kubenswrapper[4933]: I0122 05:49:44.632017 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" podUID="184eb57b-71b7-4d7a-a3bf-7866425be9ce" containerName="route-controller-manager" containerID="cri-o://02f569da839993006f7c87e1ba712c93c8734ff40c310b7b626741863619736a" gracePeriod=30 Jan 22 05:49:44 crc kubenswrapper[4933]: E0122 05:49:44.664542 4933 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda93d1ec5_c71f_4a9b_994e_f3b8de60f12a.slice/crio-a7647b42d6a1caaf92d46216fa89085113b26a5f6107e4a8677e9fcf61761e39.scope\": RecentStats: unable to find data in memory cache]" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.334631 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4748k" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.338181 4933 generic.go:334] "Generic (PLEG): container finished" podID="a93d1ec5-c71f-4a9b-994e-f3b8de60f12a" containerID="a7647b42d6a1caaf92d46216fa89085113b26a5f6107e4a8677e9fcf61761e39" exitCode=0 Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.338238 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" event={"ID":"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a","Type":"ContainerDied","Data":"a7647b42d6a1caaf92d46216fa89085113b26a5f6107e4a8677e9fcf61761e39"} Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.344090 4933 generic.go:334] "Generic (PLEG): container finished" podID="fb89501a-64c9-4b40-84cc-e18896cb53ec" containerID="3dccfe84321f2e56719d65746e4198e9a2b41a0d48cb1e3b5fc193c49de4943b" exitCode=0 Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.344138 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4748k" event={"ID":"fb89501a-64c9-4b40-84cc-e18896cb53ec","Type":"ContainerDied","Data":"3dccfe84321f2e56719d65746e4198e9a2b41a0d48cb1e3b5fc193c49de4943b"} Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.344165 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4748k" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.344202 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4748k" event={"ID":"fb89501a-64c9-4b40-84cc-e18896cb53ec","Type":"ContainerDied","Data":"8fe0de50ecb8ce6366487e9841ad33408182892a47a72addfc5f91628323d037"} Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.344227 4933 scope.go:117] "RemoveContainer" containerID="3dccfe84321f2e56719d65746e4198e9a2b41a0d48cb1e3b5fc193c49de4943b" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.345921 4933 generic.go:334] "Generic (PLEG): container finished" podID="184eb57b-71b7-4d7a-a3bf-7866425be9ce" containerID="02f569da839993006f7c87e1ba712c93c8734ff40c310b7b626741863619736a" exitCode=0 Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.345965 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" event={"ID":"184eb57b-71b7-4d7a-a3bf-7866425be9ce","Type":"ContainerDied","Data":"02f569da839993006f7c87e1ba712c93c8734ff40c310b7b626741863619736a"} Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.368038 4933 scope.go:117] "RemoveContainer" containerID="87d30d4c36286b302611a006df0aa2cb5bfc2a61353d30d26b5d40bd09bdd931" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.422836 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb89501a-64c9-4b40-84cc-e18896cb53ec-catalog-content\") pod \"fb89501a-64c9-4b40-84cc-e18896cb53ec\" (UID: \"fb89501a-64c9-4b40-84cc-e18896cb53ec\") " Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.422888 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb89501a-64c9-4b40-84cc-e18896cb53ec-utilities\") pod \"fb89501a-64c9-4b40-84cc-e18896cb53ec\" (UID: \"fb89501a-64c9-4b40-84cc-e18896cb53ec\") " Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.423030 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9cl67\" (UniqueName: \"kubernetes.io/projected/fb89501a-64c9-4b40-84cc-e18896cb53ec-kube-api-access-9cl67\") pod \"fb89501a-64c9-4b40-84cc-e18896cb53ec\" (UID: \"fb89501a-64c9-4b40-84cc-e18896cb53ec\") " Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.424786 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb89501a-64c9-4b40-84cc-e18896cb53ec-utilities" (OuterVolumeSpecName: "utilities") pod "fb89501a-64c9-4b40-84cc-e18896cb53ec" (UID: "fb89501a-64c9-4b40-84cc-e18896cb53ec"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.426227 4933 scope.go:117] "RemoveContainer" containerID="4ae6836e240c6d4440c9a92c7c0e6996695758b46ae46eb70f0a757393723a54" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.436187 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb89501a-64c9-4b40-84cc-e18896cb53ec-kube-api-access-9cl67" (OuterVolumeSpecName: "kube-api-access-9cl67") pod "fb89501a-64c9-4b40-84cc-e18896cb53ec" (UID: "fb89501a-64c9-4b40-84cc-e18896cb53ec"). InnerVolumeSpecName "kube-api-access-9cl67". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.466121 4933 scope.go:117] "RemoveContainer" containerID="3dccfe84321f2e56719d65746e4198e9a2b41a0d48cb1e3b5fc193c49de4943b" Jan 22 05:49:45 crc kubenswrapper[4933]: E0122 05:49:45.466805 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3dccfe84321f2e56719d65746e4198e9a2b41a0d48cb1e3b5fc193c49de4943b\": container with ID starting with 3dccfe84321f2e56719d65746e4198e9a2b41a0d48cb1e3b5fc193c49de4943b not found: ID does not exist" containerID="3dccfe84321f2e56719d65746e4198e9a2b41a0d48cb1e3b5fc193c49de4943b" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.466839 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3dccfe84321f2e56719d65746e4198e9a2b41a0d48cb1e3b5fc193c49de4943b"} err="failed to get container status \"3dccfe84321f2e56719d65746e4198e9a2b41a0d48cb1e3b5fc193c49de4943b\": rpc error: code = NotFound desc = could not find container \"3dccfe84321f2e56719d65746e4198e9a2b41a0d48cb1e3b5fc193c49de4943b\": container with ID starting with 3dccfe84321f2e56719d65746e4198e9a2b41a0d48cb1e3b5fc193c49de4943b not found: ID does not exist" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.466868 4933 scope.go:117] "RemoveContainer" containerID="87d30d4c36286b302611a006df0aa2cb5bfc2a61353d30d26b5d40bd09bdd931" Jan 22 05:49:45 crc kubenswrapper[4933]: E0122 05:49:45.467410 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87d30d4c36286b302611a006df0aa2cb5bfc2a61353d30d26b5d40bd09bdd931\": container with ID starting with 87d30d4c36286b302611a006df0aa2cb5bfc2a61353d30d26b5d40bd09bdd931 not found: ID does not exist" containerID="87d30d4c36286b302611a006df0aa2cb5bfc2a61353d30d26b5d40bd09bdd931" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.467454 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87d30d4c36286b302611a006df0aa2cb5bfc2a61353d30d26b5d40bd09bdd931"} err="failed to get container status \"87d30d4c36286b302611a006df0aa2cb5bfc2a61353d30d26b5d40bd09bdd931\": rpc error: code = NotFound desc = could not find container \"87d30d4c36286b302611a006df0aa2cb5bfc2a61353d30d26b5d40bd09bdd931\": container with ID starting with 87d30d4c36286b302611a006df0aa2cb5bfc2a61353d30d26b5d40bd09bdd931 not found: ID does not exist" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.467483 4933 scope.go:117] "RemoveContainer" containerID="4ae6836e240c6d4440c9a92c7c0e6996695758b46ae46eb70f0a757393723a54" Jan 22 05:49:45 crc kubenswrapper[4933]: E0122 05:49:45.467850 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ae6836e240c6d4440c9a92c7c0e6996695758b46ae46eb70f0a757393723a54\": container with ID starting with 4ae6836e240c6d4440c9a92c7c0e6996695758b46ae46eb70f0a757393723a54 not found: ID does not exist" containerID="4ae6836e240c6d4440c9a92c7c0e6996695758b46ae46eb70f0a757393723a54" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.467869 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ae6836e240c6d4440c9a92c7c0e6996695758b46ae46eb70f0a757393723a54"} err="failed to get container status \"4ae6836e240c6d4440c9a92c7c0e6996695758b46ae46eb70f0a757393723a54\": rpc error: code = NotFound desc = could not find container \"4ae6836e240c6d4440c9a92c7c0e6996695758b46ae46eb70f0a757393723a54\": container with ID starting with 4ae6836e240c6d4440c9a92c7c0e6996695758b46ae46eb70f0a757393723a54 not found: ID does not exist" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.475647 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb89501a-64c9-4b40-84cc-e18896cb53ec-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fb89501a-64c9-4b40-84cc-e18896cb53ec" (UID: "fb89501a-64c9-4b40-84cc-e18896cb53ec"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.524907 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9cl67\" (UniqueName: \"kubernetes.io/projected/fb89501a-64c9-4b40-84cc-e18896cb53ec-kube-api-access-9cl67\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.524994 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb89501a-64c9-4b40-84cc-e18896cb53ec-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.525008 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb89501a-64c9-4b40-84cc-e18896cb53ec-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.536748 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.569606 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.626068 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-proxy-ca-bundles\") pod \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.627087 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-serving-cert\") pod \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.627633 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/184eb57b-71b7-4d7a-a3bf-7866425be9ce-config\") pod \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\" (UID: \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\") " Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.627763 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-config\") pod \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.627863 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfjkw\" (UniqueName: \"kubernetes.io/projected/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-kube-api-access-gfjkw\") pod \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.626996 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "a93d1ec5-c71f-4a9b-994e-f3b8de60f12a" (UID: "a93d1ec5-c71f-4a9b-994e-f3b8de60f12a"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.628501 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-29j8x\" (UniqueName: \"kubernetes.io/projected/184eb57b-71b7-4d7a-a3bf-7866425be9ce-kube-api-access-29j8x\") pod \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\" (UID: \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\") " Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.628619 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/184eb57b-71b7-4d7a-a3bf-7866425be9ce-client-ca\") pod \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\" (UID: \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\") " Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.628796 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/184eb57b-71b7-4d7a-a3bf-7866425be9ce-serving-cert\") pod \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\" (UID: \"184eb57b-71b7-4d7a-a3bf-7866425be9ce\") " Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.628987 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-client-ca\") pod \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\" (UID: \"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a\") " Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.628614 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-config" (OuterVolumeSpecName: "config") pod "a93d1ec5-c71f-4a9b-994e-f3b8de60f12a" (UID: "a93d1ec5-c71f-4a9b-994e-f3b8de60f12a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.628724 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/184eb57b-71b7-4d7a-a3bf-7866425be9ce-config" (OuterVolumeSpecName: "config") pod "184eb57b-71b7-4d7a-a3bf-7866425be9ce" (UID: "184eb57b-71b7-4d7a-a3bf-7866425be9ce"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.629323 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/184eb57b-71b7-4d7a-a3bf-7866425be9ce-client-ca" (OuterVolumeSpecName: "client-ca") pod "184eb57b-71b7-4d7a-a3bf-7866425be9ce" (UID: "184eb57b-71b7-4d7a-a3bf-7866425be9ce"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.629720 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-client-ca" (OuterVolumeSpecName: "client-ca") pod "a93d1ec5-c71f-4a9b-994e-f3b8de60f12a" (UID: "a93d1ec5-c71f-4a9b-994e-f3b8de60f12a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.629853 4933 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.629948 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/184eb57b-71b7-4d7a-a3bf-7866425be9ce-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.630022 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.630125 4933 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/184eb57b-71b7-4d7a-a3bf-7866425be9ce-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.631467 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/184eb57b-71b7-4d7a-a3bf-7866425be9ce-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "184eb57b-71b7-4d7a-a3bf-7866425be9ce" (UID: "184eb57b-71b7-4d7a-a3bf-7866425be9ce"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.631716 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a93d1ec5-c71f-4a9b-994e-f3b8de60f12a" (UID: "a93d1ec5-c71f-4a9b-994e-f3b8de60f12a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.631714 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-kube-api-access-gfjkw" (OuterVolumeSpecName: "kube-api-access-gfjkw") pod "a93d1ec5-c71f-4a9b-994e-f3b8de60f12a" (UID: "a93d1ec5-c71f-4a9b-994e-f3b8de60f12a"). InnerVolumeSpecName "kube-api-access-gfjkw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.632241 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/184eb57b-71b7-4d7a-a3bf-7866425be9ce-kube-api-access-29j8x" (OuterVolumeSpecName: "kube-api-access-29j8x") pod "184eb57b-71b7-4d7a-a3bf-7866425be9ce" (UID: "184eb57b-71b7-4d7a-a3bf-7866425be9ce"). InnerVolumeSpecName "kube-api-access-29j8x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.669471 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4748k"] Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.672409 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4748k"] Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.731524 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.731852 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfjkw\" (UniqueName: \"kubernetes.io/projected/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-kube-api-access-gfjkw\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.731952 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-29j8x\" (UniqueName: \"kubernetes.io/projected/184eb57b-71b7-4d7a-a3bf-7866425be9ce-kube-api-access-29j8x\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.732042 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/184eb57b-71b7-4d7a-a3bf-7866425be9ce-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.732139 4933 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.971929 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xtxwj"] Jan 22 05:49:45 crc kubenswrapper[4933]: I0122 05:49:45.972667 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xtxwj" podUID="a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f" containerName="registry-server" containerID="cri-o://c3514de8978030c12e5592fa0edefbcf4fabf23e091cd3dfb8a0d3830195e9fc" gracePeriod=2 Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.010834 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-977898fdc-8bmw7"] Jan 22 05:49:46 crc kubenswrapper[4933]: E0122 05:49:46.011257 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb89501a-64c9-4b40-84cc-e18896cb53ec" containerName="extract-utilities" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.011328 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb89501a-64c9-4b40-84cc-e18896cb53ec" containerName="extract-utilities" Jan 22 05:49:46 crc kubenswrapper[4933]: E0122 05:49:46.011419 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="731fa9cf-182b-4086-8696-ec971095cb38" containerName="extract-content" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.011485 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="731fa9cf-182b-4086-8696-ec971095cb38" containerName="extract-content" Jan 22 05:49:46 crc kubenswrapper[4933]: E0122 05:49:46.011542 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="184eb57b-71b7-4d7a-a3bf-7866425be9ce" containerName="route-controller-manager" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.011593 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="184eb57b-71b7-4d7a-a3bf-7866425be9ce" containerName="route-controller-manager" Jan 22 05:49:46 crc kubenswrapper[4933]: E0122 05:49:46.011663 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="731fa9cf-182b-4086-8696-ec971095cb38" containerName="extract-utilities" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.011717 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="731fa9cf-182b-4086-8696-ec971095cb38" containerName="extract-utilities" Jan 22 05:49:46 crc kubenswrapper[4933]: E0122 05:49:46.011779 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb89501a-64c9-4b40-84cc-e18896cb53ec" containerName="extract-content" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.011835 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb89501a-64c9-4b40-84cc-e18896cb53ec" containerName="extract-content" Jan 22 05:49:46 crc kubenswrapper[4933]: E0122 05:49:46.011886 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb89501a-64c9-4b40-84cc-e18896cb53ec" containerName="registry-server" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.011937 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb89501a-64c9-4b40-84cc-e18896cb53ec" containerName="registry-server" Jan 22 05:49:46 crc kubenswrapper[4933]: E0122 05:49:46.011990 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="731fa9cf-182b-4086-8696-ec971095cb38" containerName="registry-server" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.012040 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="731fa9cf-182b-4086-8696-ec971095cb38" containerName="registry-server" Jan 22 05:49:46 crc kubenswrapper[4933]: E0122 05:49:46.012119 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a93d1ec5-c71f-4a9b-994e-f3b8de60f12a" containerName="controller-manager" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.012182 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a93d1ec5-c71f-4a9b-994e-f3b8de60f12a" containerName="controller-manager" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.012340 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a93d1ec5-c71f-4a9b-994e-f3b8de60f12a" containerName="controller-manager" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.012405 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="731fa9cf-182b-4086-8696-ec971095cb38" containerName="registry-server" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.012464 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="184eb57b-71b7-4d7a-a3bf-7866425be9ce" containerName="route-controller-manager" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.012526 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb89501a-64c9-4b40-84cc-e18896cb53ec" containerName="registry-server" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.012922 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.016654 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2"] Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.017367 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.020614 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-977898fdc-8bmw7"] Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.024906 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2"] Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.035734 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30297fb1-7354-44f3-a4a6-a3729e48c214-serving-cert\") pod \"controller-manager-977898fdc-8bmw7\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.035980 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-config\") pod \"route-controller-manager-6b5dfcd9f-f8jd2\" (UID: \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\") " pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.036123 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mt7qz\" (UniqueName: \"kubernetes.io/projected/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-kube-api-access-mt7qz\") pod \"route-controller-manager-6b5dfcd9f-f8jd2\" (UID: \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\") " pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.036212 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/30297fb1-7354-44f3-a4a6-a3729e48c214-proxy-ca-bundles\") pod \"controller-manager-977898fdc-8bmw7\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.036296 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-serving-cert\") pod \"route-controller-manager-6b5dfcd9f-f8jd2\" (UID: \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\") " pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.036395 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-client-ca\") pod \"route-controller-manager-6b5dfcd9f-f8jd2\" (UID: \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\") " pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.036502 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/30297fb1-7354-44f3-a4a6-a3729e48c214-config\") pod \"controller-manager-977898fdc-8bmw7\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.036597 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/30297fb1-7354-44f3-a4a6-a3729e48c214-client-ca\") pod \"controller-manager-977898fdc-8bmw7\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.036671 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qbwl\" (UniqueName: \"kubernetes.io/projected/30297fb1-7354-44f3-a4a6-a3729e48c214-kube-api-access-7qbwl\") pod \"controller-manager-977898fdc-8bmw7\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.140639 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30297fb1-7354-44f3-a4a6-a3729e48c214-serving-cert\") pod \"controller-manager-977898fdc-8bmw7\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.140688 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-config\") pod \"route-controller-manager-6b5dfcd9f-f8jd2\" (UID: \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\") " pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.140716 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mt7qz\" (UniqueName: \"kubernetes.io/projected/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-kube-api-access-mt7qz\") pod \"route-controller-manager-6b5dfcd9f-f8jd2\" (UID: \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\") " pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.140736 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/30297fb1-7354-44f3-a4a6-a3729e48c214-proxy-ca-bundles\") pod \"controller-manager-977898fdc-8bmw7\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.140753 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-serving-cert\") pod \"route-controller-manager-6b5dfcd9f-f8jd2\" (UID: \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\") " pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.140783 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-client-ca\") pod \"route-controller-manager-6b5dfcd9f-f8jd2\" (UID: \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\") " pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.140814 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/30297fb1-7354-44f3-a4a6-a3729e48c214-config\") pod \"controller-manager-977898fdc-8bmw7\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.140843 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/30297fb1-7354-44f3-a4a6-a3729e48c214-client-ca\") pod \"controller-manager-977898fdc-8bmw7\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.140859 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qbwl\" (UniqueName: \"kubernetes.io/projected/30297fb1-7354-44f3-a4a6-a3729e48c214-kube-api-access-7qbwl\") pod \"controller-manager-977898fdc-8bmw7\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.141850 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-config\") pod \"route-controller-manager-6b5dfcd9f-f8jd2\" (UID: \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\") " pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.142196 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/30297fb1-7354-44f3-a4a6-a3729e48c214-client-ca\") pod \"controller-manager-977898fdc-8bmw7\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.142281 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-client-ca\") pod \"route-controller-manager-6b5dfcd9f-f8jd2\" (UID: \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\") " pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.142604 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/30297fb1-7354-44f3-a4a6-a3729e48c214-config\") pod \"controller-manager-977898fdc-8bmw7\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.142780 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/30297fb1-7354-44f3-a4a6-a3729e48c214-proxy-ca-bundles\") pod \"controller-manager-977898fdc-8bmw7\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.146926 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30297fb1-7354-44f3-a4a6-a3729e48c214-serving-cert\") pod \"controller-manager-977898fdc-8bmw7\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.146934 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-serving-cert\") pod \"route-controller-manager-6b5dfcd9f-f8jd2\" (UID: \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\") " pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.159895 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mt7qz\" (UniqueName: \"kubernetes.io/projected/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-kube-api-access-mt7qz\") pod \"route-controller-manager-6b5dfcd9f-f8jd2\" (UID: \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\") " pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.164289 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qbwl\" (UniqueName: \"kubernetes.io/projected/30297fb1-7354-44f3-a4a6-a3729e48c214-kube-api-access-7qbwl\") pod \"controller-manager-977898fdc-8bmw7\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.321097 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xtxwj" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.343252 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vd4m5\" (UniqueName: \"kubernetes.io/projected/a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f-kube-api-access-vd4m5\") pod \"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f\" (UID: \"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f\") " Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.343309 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f-catalog-content\") pod \"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f\" (UID: \"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f\") " Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.343359 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f-utilities\") pod \"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f\" (UID: \"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f\") " Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.344328 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f-utilities" (OuterVolumeSpecName: "utilities") pod "a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f" (UID: "a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.347717 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f-kube-api-access-vd4m5" (OuterVolumeSpecName: "kube-api-access-vd4m5") pod "a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f" (UID: "a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f"). InnerVolumeSpecName "kube-api-access-vd4m5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.353606 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" event={"ID":"a93d1ec5-c71f-4a9b-994e-f3b8de60f12a","Type":"ContainerDied","Data":"25e346971e1e795f7ca6bed81eebf16435a979d1824cfb3cdc7ce4ab697fe597"} Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.353630 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-79df4745c9-vdhq9" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.353650 4933 scope.go:117] "RemoveContainer" containerID="a7647b42d6a1caaf92d46216fa89085113b26a5f6107e4a8677e9fcf61761e39" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.359249 4933 generic.go:334] "Generic (PLEG): container finished" podID="a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f" containerID="c3514de8978030c12e5592fa0edefbcf4fabf23e091cd3dfb8a0d3830195e9fc" exitCode=0 Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.359333 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xtxwj" event={"ID":"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f","Type":"ContainerDied","Data":"c3514de8978030c12e5592fa0edefbcf4fabf23e091cd3dfb8a0d3830195e9fc"} Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.359365 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xtxwj" event={"ID":"a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f","Type":"ContainerDied","Data":"32c22486d8b0317a2b00e11d0f89dbd8c031deb4d30ad86ace82290fe99b9a06"} Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.359378 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xtxwj" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.361224 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" event={"ID":"184eb57b-71b7-4d7a-a3bf-7866425be9ce","Type":"ContainerDied","Data":"f27d6793df71d620b2ee7a821516d4e8dcc4e3b5f0bbf274d2d8d9d4228e895b"} Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.361318 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.372946 4933 scope.go:117] "RemoveContainer" containerID="c3514de8978030c12e5592fa0edefbcf4fabf23e091cd3dfb8a0d3830195e9fc" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.385070 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-79df4745c9-vdhq9"] Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.387166 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-79df4745c9-vdhq9"] Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.389461 4933 scope.go:117] "RemoveContainer" containerID="715aa553a2abde76fe5858b77b9b86abc29ca28ab4f25da9e8dc7a1f079a1dd8" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.389755 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.390662 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f" (UID: "a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.397219 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.398516 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k"] Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.401876 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d4564854f-sxx4k"] Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.418850 4933 scope.go:117] "RemoveContainer" containerID="63f33c516f448cfbf67d2fd9f379ecb07a4b62ce9f54e3a39db9602ed8fbd4af" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.439151 4933 scope.go:117] "RemoveContainer" containerID="c3514de8978030c12e5592fa0edefbcf4fabf23e091cd3dfb8a0d3830195e9fc" Jan 22 05:49:46 crc kubenswrapper[4933]: E0122 05:49:46.441552 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3514de8978030c12e5592fa0edefbcf4fabf23e091cd3dfb8a0d3830195e9fc\": container with ID starting with c3514de8978030c12e5592fa0edefbcf4fabf23e091cd3dfb8a0d3830195e9fc not found: ID does not exist" containerID="c3514de8978030c12e5592fa0edefbcf4fabf23e091cd3dfb8a0d3830195e9fc" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.441597 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3514de8978030c12e5592fa0edefbcf4fabf23e091cd3dfb8a0d3830195e9fc"} err="failed to get container status \"c3514de8978030c12e5592fa0edefbcf4fabf23e091cd3dfb8a0d3830195e9fc\": rpc error: code = NotFound desc = could not find container \"c3514de8978030c12e5592fa0edefbcf4fabf23e091cd3dfb8a0d3830195e9fc\": container with ID starting with c3514de8978030c12e5592fa0edefbcf4fabf23e091cd3dfb8a0d3830195e9fc not found: ID does not exist" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.441632 4933 scope.go:117] "RemoveContainer" containerID="715aa553a2abde76fe5858b77b9b86abc29ca28ab4f25da9e8dc7a1f079a1dd8" Jan 22 05:49:46 crc kubenswrapper[4933]: E0122 05:49:46.442423 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"715aa553a2abde76fe5858b77b9b86abc29ca28ab4f25da9e8dc7a1f079a1dd8\": container with ID starting with 715aa553a2abde76fe5858b77b9b86abc29ca28ab4f25da9e8dc7a1f079a1dd8 not found: ID does not exist" containerID="715aa553a2abde76fe5858b77b9b86abc29ca28ab4f25da9e8dc7a1f079a1dd8" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.442457 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"715aa553a2abde76fe5858b77b9b86abc29ca28ab4f25da9e8dc7a1f079a1dd8"} err="failed to get container status \"715aa553a2abde76fe5858b77b9b86abc29ca28ab4f25da9e8dc7a1f079a1dd8\": rpc error: code = NotFound desc = could not find container \"715aa553a2abde76fe5858b77b9b86abc29ca28ab4f25da9e8dc7a1f079a1dd8\": container with ID starting with 715aa553a2abde76fe5858b77b9b86abc29ca28ab4f25da9e8dc7a1f079a1dd8 not found: ID does not exist" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.442482 4933 scope.go:117] "RemoveContainer" containerID="63f33c516f448cfbf67d2fd9f379ecb07a4b62ce9f54e3a39db9602ed8fbd4af" Jan 22 05:49:46 crc kubenswrapper[4933]: E0122 05:49:46.443053 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63f33c516f448cfbf67d2fd9f379ecb07a4b62ce9f54e3a39db9602ed8fbd4af\": container with ID starting with 63f33c516f448cfbf67d2fd9f379ecb07a4b62ce9f54e3a39db9602ed8fbd4af not found: ID does not exist" containerID="63f33c516f448cfbf67d2fd9f379ecb07a4b62ce9f54e3a39db9602ed8fbd4af" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.443154 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63f33c516f448cfbf67d2fd9f379ecb07a4b62ce9f54e3a39db9602ed8fbd4af"} err="failed to get container status \"63f33c516f448cfbf67d2fd9f379ecb07a4b62ce9f54e3a39db9602ed8fbd4af\": rpc error: code = NotFound desc = could not find container \"63f33c516f448cfbf67d2fd9f379ecb07a4b62ce9f54e3a39db9602ed8fbd4af\": container with ID starting with 63f33c516f448cfbf67d2fd9f379ecb07a4b62ce9f54e3a39db9602ed8fbd4af not found: ID does not exist" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.443222 4933 scope.go:117] "RemoveContainer" containerID="02f569da839993006f7c87e1ba712c93c8734ff40c310b7b626741863619736a" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.445232 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vd4m5\" (UniqueName: \"kubernetes.io/projected/a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f-kube-api-access-vd4m5\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.445250 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.445261 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.505665 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="184eb57b-71b7-4d7a-a3bf-7866425be9ce" path="/var/lib/kubelet/pods/184eb57b-71b7-4d7a-a3bf-7866425be9ce/volumes" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.506236 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a93d1ec5-c71f-4a9b-994e-f3b8de60f12a" path="/var/lib/kubelet/pods/a93d1ec5-c71f-4a9b-994e-f3b8de60f12a/volumes" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.506876 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb89501a-64c9-4b40-84cc-e18896cb53ec" path="/var/lib/kubelet/pods/fb89501a-64c9-4b40-84cc-e18896cb53ec/volumes" Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.673329 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xtxwj"] Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.676745 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xtxwj"] Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.815841 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-977898fdc-8bmw7"] Jan 22 05:49:46 crc kubenswrapper[4933]: W0122 05:49:46.823054 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod30297fb1_7354_44f3_a4a6_a3729e48c214.slice/crio-5c8906dfc01fd6c1c43d989d9f049421c1e4a64314aa9970f4d3c03bc86747a6 WatchSource:0}: Error finding container 5c8906dfc01fd6c1c43d989d9f049421c1e4a64314aa9970f4d3c03bc86747a6: Status 404 returned error can't find the container with id 5c8906dfc01fd6c1c43d989d9f049421c1e4a64314aa9970f4d3c03bc86747a6 Jan 22 05:49:46 crc kubenswrapper[4933]: I0122 05:49:46.866715 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2"] Jan 22 05:49:47 crc kubenswrapper[4933]: I0122 05:49:47.368909 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" event={"ID":"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed","Type":"ContainerStarted","Data":"fdb515748f4c39f1b9554d1f768bd8aa9f00bbfe979a2e9529798d691adfed0c"} Jan 22 05:49:47 crc kubenswrapper[4933]: I0122 05:49:47.368957 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" event={"ID":"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed","Type":"ContainerStarted","Data":"96a6453fc187e4acb73afdb2aba1df22fcaf5e561b9adc1bb3c18edeb3312d26"} Jan 22 05:49:47 crc kubenswrapper[4933]: I0122 05:49:47.369130 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" Jan 22 05:49:47 crc kubenswrapper[4933]: I0122 05:49:47.373440 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" event={"ID":"30297fb1-7354-44f3-a4a6-a3729e48c214","Type":"ContainerStarted","Data":"5e1b2813cc52dfb2b45b851545517adc40866378da4ee5a8389e3961c63fee61"} Jan 22 05:49:47 crc kubenswrapper[4933]: I0122 05:49:47.373485 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" event={"ID":"30297fb1-7354-44f3-a4a6-a3729e48c214","Type":"ContainerStarted","Data":"5c8906dfc01fd6c1c43d989d9f049421c1e4a64314aa9970f4d3c03bc86747a6"} Jan 22 05:49:47 crc kubenswrapper[4933]: I0122 05:49:47.373591 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:49:47 crc kubenswrapper[4933]: I0122 05:49:47.380170 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:49:47 crc kubenswrapper[4933]: I0122 05:49:47.395585 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" podStartSLOduration=3.395564012 podStartE2EDuration="3.395564012s" podCreationTimestamp="2026-01-22 05:49:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:49:47.391736629 +0000 UTC m=+235.228861982" watchObservedRunningTime="2026-01-22 05:49:47.395564012 +0000 UTC m=+235.232689365" Jan 22 05:49:47 crc kubenswrapper[4933]: I0122 05:49:47.417384 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" podStartSLOduration=3.417365971 podStartE2EDuration="3.417365971s" podCreationTimestamp="2026-01-22 05:49:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:49:47.413424666 +0000 UTC m=+235.250550039" watchObservedRunningTime="2026-01-22 05:49:47.417365971 +0000 UTC m=+235.254491324" Jan 22 05:49:47 crc kubenswrapper[4933]: I0122 05:49:47.583240 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" Jan 22 05:49:48 crc kubenswrapper[4933]: I0122 05:49:48.374692 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wm42f"] Jan 22 05:49:48 crc kubenswrapper[4933]: I0122 05:49:48.377519 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wm42f" podUID="50afe38c-f6b1-422d-9abd-b3a62bc7c24d" containerName="registry-server" containerID="cri-o://28d7966e9e748d44d12e3c1e22d24651fe34c7d9f4237da2fc42be30652415e6" gracePeriod=2 Jan 22 05:49:48 crc kubenswrapper[4933]: I0122 05:49:48.498850 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f" path="/var/lib/kubelet/pods/a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f/volumes" Jan 22 05:49:48 crc kubenswrapper[4933]: I0122 05:49:48.761160 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wm42f" Jan 22 05:49:48 crc kubenswrapper[4933]: I0122 05:49:48.874584 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50afe38c-f6b1-422d-9abd-b3a62bc7c24d-catalog-content\") pod \"50afe38c-f6b1-422d-9abd-b3a62bc7c24d\" (UID: \"50afe38c-f6b1-422d-9abd-b3a62bc7c24d\") " Jan 22 05:49:48 crc kubenswrapper[4933]: I0122 05:49:48.874706 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-krh9x\" (UniqueName: \"kubernetes.io/projected/50afe38c-f6b1-422d-9abd-b3a62bc7c24d-kube-api-access-krh9x\") pod \"50afe38c-f6b1-422d-9abd-b3a62bc7c24d\" (UID: \"50afe38c-f6b1-422d-9abd-b3a62bc7c24d\") " Jan 22 05:49:48 crc kubenswrapper[4933]: I0122 05:49:48.874757 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50afe38c-f6b1-422d-9abd-b3a62bc7c24d-utilities\") pod \"50afe38c-f6b1-422d-9abd-b3a62bc7c24d\" (UID: \"50afe38c-f6b1-422d-9abd-b3a62bc7c24d\") " Jan 22 05:49:48 crc kubenswrapper[4933]: I0122 05:49:48.875824 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50afe38c-f6b1-422d-9abd-b3a62bc7c24d-utilities" (OuterVolumeSpecName: "utilities") pod "50afe38c-f6b1-422d-9abd-b3a62bc7c24d" (UID: "50afe38c-f6b1-422d-9abd-b3a62bc7c24d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:49:48 crc kubenswrapper[4933]: I0122 05:49:48.882273 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50afe38c-f6b1-422d-9abd-b3a62bc7c24d-kube-api-access-krh9x" (OuterVolumeSpecName: "kube-api-access-krh9x") pod "50afe38c-f6b1-422d-9abd-b3a62bc7c24d" (UID: "50afe38c-f6b1-422d-9abd-b3a62bc7c24d"). InnerVolumeSpecName "kube-api-access-krh9x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:49:48 crc kubenswrapper[4933]: I0122 05:49:48.975906 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-krh9x\" (UniqueName: \"kubernetes.io/projected/50afe38c-f6b1-422d-9abd-b3a62bc7c24d-kube-api-access-krh9x\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:48 crc kubenswrapper[4933]: I0122 05:49:48.975939 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50afe38c-f6b1-422d-9abd-b3a62bc7c24d-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:49 crc kubenswrapper[4933]: I0122 05:49:49.023749 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50afe38c-f6b1-422d-9abd-b3a62bc7c24d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "50afe38c-f6b1-422d-9abd-b3a62bc7c24d" (UID: "50afe38c-f6b1-422d-9abd-b3a62bc7c24d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:49:49 crc kubenswrapper[4933]: I0122 05:49:49.077666 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50afe38c-f6b1-422d-9abd-b3a62bc7c24d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:49 crc kubenswrapper[4933]: I0122 05:49:49.120331 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gj98r"] Jan 22 05:49:49 crc kubenswrapper[4933]: I0122 05:49:49.385829 4933 generic.go:334] "Generic (PLEG): container finished" podID="50afe38c-f6b1-422d-9abd-b3a62bc7c24d" containerID="28d7966e9e748d44d12e3c1e22d24651fe34c7d9f4237da2fc42be30652415e6" exitCode=0 Jan 22 05:49:49 crc kubenswrapper[4933]: I0122 05:49:49.385885 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wm42f" Jan 22 05:49:49 crc kubenswrapper[4933]: I0122 05:49:49.385894 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wm42f" event={"ID":"50afe38c-f6b1-422d-9abd-b3a62bc7c24d","Type":"ContainerDied","Data":"28d7966e9e748d44d12e3c1e22d24651fe34c7d9f4237da2fc42be30652415e6"} Jan 22 05:49:49 crc kubenswrapper[4933]: I0122 05:49:49.385960 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wm42f" event={"ID":"50afe38c-f6b1-422d-9abd-b3a62bc7c24d","Type":"ContainerDied","Data":"5a3cb1e62d9d35bb5507bb5712e6a7dc98881887f77af4464f63fd7550750c06"} Jan 22 05:49:49 crc kubenswrapper[4933]: I0122 05:49:49.385979 4933 scope.go:117] "RemoveContainer" containerID="28d7966e9e748d44d12e3c1e22d24651fe34c7d9f4237da2fc42be30652415e6" Jan 22 05:49:49 crc kubenswrapper[4933]: I0122 05:49:49.406008 4933 scope.go:117] "RemoveContainer" containerID="c9031d798e82a5fa3599fc702b438c2e8b1ffd3d3d8d8db2a3afe44acd20edc1" Jan 22 05:49:49 crc kubenswrapper[4933]: I0122 05:49:49.411992 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wm42f"] Jan 22 05:49:49 crc kubenswrapper[4933]: I0122 05:49:49.415159 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wm42f"] Jan 22 05:49:49 crc kubenswrapper[4933]: I0122 05:49:49.432361 4933 scope.go:117] "RemoveContainer" containerID="db255203f63d962c2cb73363896ab0267c2546a1e5b74b89b4b6bc28e60c8145" Jan 22 05:49:49 crc kubenswrapper[4933]: I0122 05:49:49.453406 4933 scope.go:117] "RemoveContainer" containerID="28d7966e9e748d44d12e3c1e22d24651fe34c7d9f4237da2fc42be30652415e6" Jan 22 05:49:49 crc kubenswrapper[4933]: E0122 05:49:49.454347 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28d7966e9e748d44d12e3c1e22d24651fe34c7d9f4237da2fc42be30652415e6\": container with ID starting with 28d7966e9e748d44d12e3c1e22d24651fe34c7d9f4237da2fc42be30652415e6 not found: ID does not exist" containerID="28d7966e9e748d44d12e3c1e22d24651fe34c7d9f4237da2fc42be30652415e6" Jan 22 05:49:49 crc kubenswrapper[4933]: I0122 05:49:49.454406 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28d7966e9e748d44d12e3c1e22d24651fe34c7d9f4237da2fc42be30652415e6"} err="failed to get container status \"28d7966e9e748d44d12e3c1e22d24651fe34c7d9f4237da2fc42be30652415e6\": rpc error: code = NotFound desc = could not find container \"28d7966e9e748d44d12e3c1e22d24651fe34c7d9f4237da2fc42be30652415e6\": container with ID starting with 28d7966e9e748d44d12e3c1e22d24651fe34c7d9f4237da2fc42be30652415e6 not found: ID does not exist" Jan 22 05:49:49 crc kubenswrapper[4933]: I0122 05:49:49.454432 4933 scope.go:117] "RemoveContainer" containerID="c9031d798e82a5fa3599fc702b438c2e8b1ffd3d3d8d8db2a3afe44acd20edc1" Jan 22 05:49:49 crc kubenswrapper[4933]: E0122 05:49:49.457407 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9031d798e82a5fa3599fc702b438c2e8b1ffd3d3d8d8db2a3afe44acd20edc1\": container with ID starting with c9031d798e82a5fa3599fc702b438c2e8b1ffd3d3d8d8db2a3afe44acd20edc1 not found: ID does not exist" containerID="c9031d798e82a5fa3599fc702b438c2e8b1ffd3d3d8d8db2a3afe44acd20edc1" Jan 22 05:49:49 crc kubenswrapper[4933]: I0122 05:49:49.457453 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9031d798e82a5fa3599fc702b438c2e8b1ffd3d3d8d8db2a3afe44acd20edc1"} err="failed to get container status \"c9031d798e82a5fa3599fc702b438c2e8b1ffd3d3d8d8db2a3afe44acd20edc1\": rpc error: code = NotFound desc = could not find container \"c9031d798e82a5fa3599fc702b438c2e8b1ffd3d3d8d8db2a3afe44acd20edc1\": container with ID starting with c9031d798e82a5fa3599fc702b438c2e8b1ffd3d3d8d8db2a3afe44acd20edc1 not found: ID does not exist" Jan 22 05:49:49 crc kubenswrapper[4933]: I0122 05:49:49.457481 4933 scope.go:117] "RemoveContainer" containerID="db255203f63d962c2cb73363896ab0267c2546a1e5b74b89b4b6bc28e60c8145" Jan 22 05:49:49 crc kubenswrapper[4933]: E0122 05:49:49.459674 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db255203f63d962c2cb73363896ab0267c2546a1e5b74b89b4b6bc28e60c8145\": container with ID starting with db255203f63d962c2cb73363896ab0267c2546a1e5b74b89b4b6bc28e60c8145 not found: ID does not exist" containerID="db255203f63d962c2cb73363896ab0267c2546a1e5b74b89b4b6bc28e60c8145" Jan 22 05:49:49 crc kubenswrapper[4933]: I0122 05:49:49.459724 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db255203f63d962c2cb73363896ab0267c2546a1e5b74b89b4b6bc28e60c8145"} err="failed to get container status \"db255203f63d962c2cb73363896ab0267c2546a1e5b74b89b4b6bc28e60c8145\": rpc error: code = NotFound desc = could not find container \"db255203f63d962c2cb73363896ab0267c2546a1e5b74b89b4b6bc28e60c8145\": container with ID starting with db255203f63d962c2cb73363896ab0267c2546a1e5b74b89b4b6bc28e60c8145 not found: ID does not exist" Jan 22 05:49:50 crc kubenswrapper[4933]: I0122 05:49:50.502721 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50afe38c-f6b1-422d-9abd-b3a62bc7c24d" path="/var/lib/kubelet/pods/50afe38c-f6b1-422d-9abd-b3a62bc7c24d/volumes" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.813091 4933 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 22 05:49:55 crc kubenswrapper[4933]: E0122 05:49:55.813879 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50afe38c-f6b1-422d-9abd-b3a62bc7c24d" containerName="extract-content" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.813896 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="50afe38c-f6b1-422d-9abd-b3a62bc7c24d" containerName="extract-content" Jan 22 05:49:55 crc kubenswrapper[4933]: E0122 05:49:55.813909 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f" containerName="extract-content" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.813920 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f" containerName="extract-content" Jan 22 05:49:55 crc kubenswrapper[4933]: E0122 05:49:55.813940 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f" containerName="registry-server" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.813949 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f" containerName="registry-server" Jan 22 05:49:55 crc kubenswrapper[4933]: E0122 05:49:55.813961 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50afe38c-f6b1-422d-9abd-b3a62bc7c24d" containerName="registry-server" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.813968 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="50afe38c-f6b1-422d-9abd-b3a62bc7c24d" containerName="registry-server" Jan 22 05:49:55 crc kubenswrapper[4933]: E0122 05:49:55.813981 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f" containerName="extract-utilities" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.813990 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f" containerName="extract-utilities" Jan 22 05:49:55 crc kubenswrapper[4933]: E0122 05:49:55.814005 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50afe38c-f6b1-422d-9abd-b3a62bc7c24d" containerName="extract-utilities" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.814013 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="50afe38c-f6b1-422d-9abd-b3a62bc7c24d" containerName="extract-utilities" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.814154 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3b2d717-b4ae-4cc1-ba4b-48e4d426b60f" containerName="registry-server" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.814167 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="50afe38c-f6b1-422d-9abd-b3a62bc7c24d" containerName="registry-server" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.815656 4933 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.815924 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.816089 4933 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.816113 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb" gracePeriod=15 Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.816167 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9" gracePeriod=15 Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.816253 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164" gracePeriod=15 Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.816298 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f" gracePeriod=15 Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.816307 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336" gracePeriod=15 Jan 22 05:49:55 crc kubenswrapper[4933]: E0122 05:49:55.817118 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.817138 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 22 05:49:55 crc kubenswrapper[4933]: E0122 05:49:55.817151 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.817159 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 22 05:49:55 crc kubenswrapper[4933]: E0122 05:49:55.817171 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.817210 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 22 05:49:55 crc kubenswrapper[4933]: E0122 05:49:55.817228 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.817236 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 22 05:49:55 crc kubenswrapper[4933]: E0122 05:49:55.817254 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.817292 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 22 05:49:55 crc kubenswrapper[4933]: E0122 05:49:55.817305 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.817315 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 05:49:55 crc kubenswrapper[4933]: E0122 05:49:55.817329 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.817338 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.817537 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.817554 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.817563 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.817575 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.817588 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.817631 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.832815 4933 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.863183 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.863312 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.863347 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.863440 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.863494 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.863539 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.863569 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.863605 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: E0122 05:49:55.887269 4933 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.163:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.964712 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.964777 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.964809 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.964824 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.964859 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.964899 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.964933 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.964950 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.965013 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.965050 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.965085 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.965108 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.965130 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.965151 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.965170 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:55 crc kubenswrapper[4933]: I0122 05:49:55.965189 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:49:56 crc kubenswrapper[4933]: I0122 05:49:56.188476 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:56 crc kubenswrapper[4933]: E0122 05:49:56.218030 4933 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.163:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188cf79a697dceac openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-22 05:49:56.216639148 +0000 UTC m=+244.053764501,LastTimestamp:2026-01-22 05:49:56.216639148 +0000 UTC m=+244.053764501,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 22 05:49:56 crc kubenswrapper[4933]: I0122 05:49:56.424315 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"dc1b2e82ef0aaf56ef5739bdaab72414ea11cccac1ef605bb000b33869479066"} Jan 22 05:49:56 crc kubenswrapper[4933]: I0122 05:49:56.426783 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 22 05:49:56 crc kubenswrapper[4933]: I0122 05:49:56.428025 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 22 05:49:56 crc kubenswrapper[4933]: I0122 05:49:56.428811 4933 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f" exitCode=0 Jan 22 05:49:56 crc kubenswrapper[4933]: I0122 05:49:56.428858 4933 scope.go:117] "RemoveContainer" containerID="73eca79133314de8b49710fc09b1c2d1e2ef89e908557146061dd96f37fd4b28" Jan 22 05:49:56 crc kubenswrapper[4933]: I0122 05:49:56.428869 4933 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9" exitCode=0 Jan 22 05:49:56 crc kubenswrapper[4933]: I0122 05:49:56.428880 4933 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164" exitCode=0 Jan 22 05:49:56 crc kubenswrapper[4933]: I0122 05:49:56.428890 4933 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336" exitCode=2 Jan 22 05:49:56 crc kubenswrapper[4933]: I0122 05:49:56.430991 4933 generic.go:334] "Generic (PLEG): container finished" podID="520b2813-3aaf-46c7-9592-f8a8be0f9348" containerID="235bfc9f43ca57736b6d0f8b326f71afe935d8e82f1ec74f6f22f78ac099f3e8" exitCode=0 Jan 22 05:49:56 crc kubenswrapper[4933]: I0122 05:49:56.431039 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"520b2813-3aaf-46c7-9592-f8a8be0f9348","Type":"ContainerDied","Data":"235bfc9f43ca57736b6d0f8b326f71afe935d8e82f1ec74f6f22f78ac099f3e8"} Jan 22 05:49:56 crc kubenswrapper[4933]: I0122 05:49:56.431857 4933 status_manager.go:851] "Failed to get status for pod" podUID="520b2813-3aaf-46c7-9592-f8a8be0f9348" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.163:6443: connect: connection refused" Jan 22 05:49:57 crc kubenswrapper[4933]: I0122 05:49:57.438929 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"316e39fa653004d010cfb005d9eb456c2d844660f5a292f6bba64a8d5e75b4b6"} Jan 22 05:49:57 crc kubenswrapper[4933]: I0122 05:49:57.439830 4933 status_manager.go:851] "Failed to get status for pod" podUID="520b2813-3aaf-46c7-9592-f8a8be0f9348" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.163:6443: connect: connection refused" Jan 22 05:49:57 crc kubenswrapper[4933]: E0122 05:49:57.440016 4933 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.163:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:57 crc kubenswrapper[4933]: I0122 05:49:57.444952 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 22 05:49:57 crc kubenswrapper[4933]: I0122 05:49:57.818552 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:49:57 crc kubenswrapper[4933]: I0122 05:49:57.819460 4933 status_manager.go:851] "Failed to get status for pod" podUID="520b2813-3aaf-46c7-9592-f8a8be0f9348" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.163:6443: connect: connection refused" Jan 22 05:49:57 crc kubenswrapper[4933]: I0122 05:49:57.996067 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/520b2813-3aaf-46c7-9592-f8a8be0f9348-var-lock\") pod \"520b2813-3aaf-46c7-9592-f8a8be0f9348\" (UID: \"520b2813-3aaf-46c7-9592-f8a8be0f9348\") " Jan 22 05:49:57 crc kubenswrapper[4933]: I0122 05:49:57.996242 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/520b2813-3aaf-46c7-9592-f8a8be0f9348-var-lock" (OuterVolumeSpecName: "var-lock") pod "520b2813-3aaf-46c7-9592-f8a8be0f9348" (UID: "520b2813-3aaf-46c7-9592-f8a8be0f9348"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:49:57 crc kubenswrapper[4933]: I0122 05:49:57.996517 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/520b2813-3aaf-46c7-9592-f8a8be0f9348-kube-api-access\") pod \"520b2813-3aaf-46c7-9592-f8a8be0f9348\" (UID: \"520b2813-3aaf-46c7-9592-f8a8be0f9348\") " Jan 22 05:49:57 crc kubenswrapper[4933]: I0122 05:49:57.996662 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/520b2813-3aaf-46c7-9592-f8a8be0f9348-kubelet-dir\") pod \"520b2813-3aaf-46c7-9592-f8a8be0f9348\" (UID: \"520b2813-3aaf-46c7-9592-f8a8be0f9348\") " Jan 22 05:49:57 crc kubenswrapper[4933]: I0122 05:49:57.996840 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/520b2813-3aaf-46c7-9592-f8a8be0f9348-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "520b2813-3aaf-46c7-9592-f8a8be0f9348" (UID: "520b2813-3aaf-46c7-9592-f8a8be0f9348"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:49:57 crc kubenswrapper[4933]: I0122 05:49:57.997270 4933 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/520b2813-3aaf-46c7-9592-f8a8be0f9348-var-lock\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:57 crc kubenswrapper[4933]: I0122 05:49:57.997296 4933 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/520b2813-3aaf-46c7-9592-f8a8be0f9348-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.005384 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/520b2813-3aaf-46c7-9592-f8a8be0f9348-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "520b2813-3aaf-46c7-9592-f8a8be0f9348" (UID: "520b2813-3aaf-46c7-9592-f8a8be0f9348"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.098610 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/520b2813-3aaf-46c7-9592-f8a8be0f9348-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.184087 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.185292 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.186029 4933 status_manager.go:851] "Failed to get status for pod" podUID="520b2813-3aaf-46c7-9592-f8a8be0f9348" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.163:6443: connect: connection refused" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.186718 4933 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.163:6443: connect: connection refused" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.301780 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.301901 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.301970 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.301981 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.302019 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.302042 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.302315 4933 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.302333 4933 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.302345 4933 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.455798 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.456670 4933 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb" exitCode=0 Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.456768 4933 scope.go:117] "RemoveContainer" containerID="bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.456790 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.459295 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"520b2813-3aaf-46c7-9592-f8a8be0f9348","Type":"ContainerDied","Data":"b42b6bf777c6b4f4420e7a693f18b8d4e6b2000ef07802fe62b727b59198555c"} Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.459358 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b42b6bf777c6b4f4420e7a693f18b8d4e6b2000ef07802fe62b727b59198555c" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.459505 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:49:58 crc kubenswrapper[4933]: E0122 05:49:58.460421 4933 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.163:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.476351 4933 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.163:6443: connect: connection refused" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.476792 4933 status_manager.go:851] "Failed to get status for pod" podUID="520b2813-3aaf-46c7-9592-f8a8be0f9348" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.163:6443: connect: connection refused" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.477925 4933 scope.go:117] "RemoveContainer" containerID="d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.484836 4933 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.163:6443: connect: connection refused" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.485158 4933 status_manager.go:851] "Failed to get status for pod" podUID="520b2813-3aaf-46c7-9592-f8a8be0f9348" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.163:6443: connect: connection refused" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.490253 4933 scope.go:117] "RemoveContainer" containerID="9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.498956 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.506172 4933 scope.go:117] "RemoveContainer" containerID="237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.520744 4933 scope.go:117] "RemoveContainer" containerID="d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.537538 4933 scope.go:117] "RemoveContainer" containerID="fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.550828 4933 scope.go:117] "RemoveContainer" containerID="bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f" Jan 22 05:49:58 crc kubenswrapper[4933]: E0122 05:49:58.551651 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\": container with ID starting with bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f not found: ID does not exist" containerID="bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.551681 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f"} err="failed to get container status \"bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\": rpc error: code = NotFound desc = could not find container \"bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f\": container with ID starting with bcce185dd1fd75905aa111698d6ed3136b30b3a0b30ac04ac1111971fe0e244f not found: ID does not exist" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.551702 4933 scope.go:117] "RemoveContainer" containerID="d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9" Jan 22 05:49:58 crc kubenswrapper[4933]: E0122 05:49:58.552070 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\": container with ID starting with d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9 not found: ID does not exist" containerID="d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.552148 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9"} err="failed to get container status \"d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\": rpc error: code = NotFound desc = could not find container \"d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9\": container with ID starting with d1687c5b61e3ff586591b865ebd418e3fe9a80588d37ae42ab10fc0162b0ece9 not found: ID does not exist" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.552194 4933 scope.go:117] "RemoveContainer" containerID="9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164" Jan 22 05:49:58 crc kubenswrapper[4933]: E0122 05:49:58.552763 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\": container with ID starting with 9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164 not found: ID does not exist" containerID="9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.552790 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164"} err="failed to get container status \"9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\": rpc error: code = NotFound desc = could not find container \"9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164\": container with ID starting with 9ee790c15def6381ecd9357f0a99ec8cf92dd6f307f147858b1f716310e77164 not found: ID does not exist" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.552806 4933 scope.go:117] "RemoveContainer" containerID="237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336" Jan 22 05:49:58 crc kubenswrapper[4933]: E0122 05:49:58.553099 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\": container with ID starting with 237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336 not found: ID does not exist" containerID="237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.553120 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336"} err="failed to get container status \"237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\": rpc error: code = NotFound desc = could not find container \"237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336\": container with ID starting with 237d9efe96b03428ceb1f9f5f9b75e5a6a526d120cc3a57e14a315c5411f3336 not found: ID does not exist" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.553133 4933 scope.go:117] "RemoveContainer" containerID="d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb" Jan 22 05:49:58 crc kubenswrapper[4933]: E0122 05:49:58.553380 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\": container with ID starting with d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb not found: ID does not exist" containerID="d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.553414 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb"} err="failed to get container status \"d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\": rpc error: code = NotFound desc = could not find container \"d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb\": container with ID starting with d989d858ac90be4f934c9fb9736c06706602589ed2d5e6623013d137719033cb not found: ID does not exist" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.553432 4933 scope.go:117] "RemoveContainer" containerID="fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d" Jan 22 05:49:58 crc kubenswrapper[4933]: E0122 05:49:58.553683 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\": container with ID starting with fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d not found: ID does not exist" containerID="fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d" Jan 22 05:49:58 crc kubenswrapper[4933]: I0122 05:49:58.553719 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d"} err="failed to get container status \"fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\": rpc error: code = NotFound desc = could not find container \"fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d\": container with ID starting with fac7eb61a19729df0d7e0bf2652c59fee669fba82d3a78aa55de3e9f7cd8483d not found: ID does not exist" Jan 22 05:50:02 crc kubenswrapper[4933]: I0122 05:50:02.518373 4933 status_manager.go:851] "Failed to get status for pod" podUID="520b2813-3aaf-46c7-9592-f8a8be0f9348" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.163:6443: connect: connection refused" Jan 22 05:50:04 crc kubenswrapper[4933]: E0122 05:50:04.512019 4933 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.163:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188cf79a697dceac openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-22 05:49:56.216639148 +0000 UTC m=+244.053764501,LastTimestamp:2026-01-22 05:49:56.216639148 +0000 UTC m=+244.053764501,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 22 05:50:05 crc kubenswrapper[4933]: E0122 05:50:05.518449 4933 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.163:6443: connect: connection refused" Jan 22 05:50:05 crc kubenswrapper[4933]: E0122 05:50:05.519142 4933 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.163:6443: connect: connection refused" Jan 22 05:50:05 crc kubenswrapper[4933]: E0122 05:50:05.519699 4933 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.163:6443: connect: connection refused" Jan 22 05:50:05 crc kubenswrapper[4933]: E0122 05:50:05.520122 4933 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.163:6443: connect: connection refused" Jan 22 05:50:05 crc kubenswrapper[4933]: E0122 05:50:05.520514 4933 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.163:6443: connect: connection refused" Jan 22 05:50:05 crc kubenswrapper[4933]: I0122 05:50:05.520560 4933 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 22 05:50:05 crc kubenswrapper[4933]: E0122 05:50:05.520897 4933 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.163:6443: connect: connection refused" interval="200ms" Jan 22 05:50:05 crc kubenswrapper[4933]: E0122 05:50:05.722166 4933 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.163:6443: connect: connection refused" interval="400ms" Jan 22 05:50:06 crc kubenswrapper[4933]: E0122 05:50:06.122689 4933 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.163:6443: connect: connection refused" interval="800ms" Jan 22 05:50:06 crc kubenswrapper[4933]: E0122 05:50:06.929716 4933 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.163:6443: connect: connection refused" interval="1.6s" Jan 22 05:50:07 crc kubenswrapper[4933]: I0122 05:50:07.490188 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:50:07 crc kubenswrapper[4933]: I0122 05:50:07.491133 4933 status_manager.go:851] "Failed to get status for pod" podUID="520b2813-3aaf-46c7-9592-f8a8be0f9348" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.163:6443: connect: connection refused" Jan 22 05:50:07 crc kubenswrapper[4933]: I0122 05:50:07.510361 4933 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd" Jan 22 05:50:07 crc kubenswrapper[4933]: I0122 05:50:07.510398 4933 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd" Jan 22 05:50:07 crc kubenswrapper[4933]: E0122 05:50:07.510851 4933 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.163:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:50:07 crc kubenswrapper[4933]: I0122 05:50:07.511491 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:50:07 crc kubenswrapper[4933]: W0122 05:50:07.531317 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-0f604c7138894681567aeb193dabba398e13d2499536e1476510eccb0674cb24 WatchSource:0}: Error finding container 0f604c7138894681567aeb193dabba398e13d2499536e1476510eccb0674cb24: Status 404 returned error can't find the container with id 0f604c7138894681567aeb193dabba398e13d2499536e1476510eccb0674cb24 Jan 22 05:50:07 crc kubenswrapper[4933]: I0122 05:50:07.549836 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"0f604c7138894681567aeb193dabba398e13d2499536e1476510eccb0674cb24"} Jan 22 05:50:08 crc kubenswrapper[4933]: E0122 05:50:08.531169 4933 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.163:6443: connect: connection refused" interval="3.2s" Jan 22 05:50:08 crc kubenswrapper[4933]: I0122 05:50:08.558895 4933 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="9a766f6544b604880114e84c6667250cad0779aedaa24d6b973fc115c9809d21" exitCode=0 Jan 22 05:50:08 crc kubenswrapper[4933]: I0122 05:50:08.559023 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"9a766f6544b604880114e84c6667250cad0779aedaa24d6b973fc115c9809d21"} Jan 22 05:50:08 crc kubenswrapper[4933]: I0122 05:50:08.559455 4933 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd" Jan 22 05:50:08 crc kubenswrapper[4933]: I0122 05:50:08.559501 4933 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd" Jan 22 05:50:08 crc kubenswrapper[4933]: E0122 05:50:08.560414 4933 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.163:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:50:08 crc kubenswrapper[4933]: I0122 05:50:08.562687 4933 status_manager.go:851] "Failed to get status for pod" podUID="520b2813-3aaf-46c7-9592-f8a8be0f9348" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.163:6443: connect: connection refused" Jan 22 05:50:08 crc kubenswrapper[4933]: I0122 05:50:08.564120 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 22 05:50:08 crc kubenswrapper[4933]: I0122 05:50:08.564232 4933 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731" exitCode=1 Jan 22 05:50:08 crc kubenswrapper[4933]: I0122 05:50:08.564280 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731"} Jan 22 05:50:08 crc kubenswrapper[4933]: I0122 05:50:08.565597 4933 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.163:6443: connect: connection refused" Jan 22 05:50:08 crc kubenswrapper[4933]: I0122 05:50:08.566375 4933 status_manager.go:851] "Failed to get status for pod" podUID="520b2813-3aaf-46c7-9592-f8a8be0f9348" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.163:6443: connect: connection refused" Jan 22 05:50:08 crc kubenswrapper[4933]: I0122 05:50:08.566901 4933 scope.go:117] "RemoveContainer" containerID="9ad10cfd00a2bd6dab0704baa3988eab6b7b97812ef8a6074b9bc2c2a03df731" Jan 22 05:50:09 crc kubenswrapper[4933]: I0122 05:50:09.576317 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 22 05:50:09 crc kubenswrapper[4933]: I0122 05:50:09.576897 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2016ada3abe65d0d2e2b43d0dc4a7e0adee88a44813328c7a96da4572936508d"} Jan 22 05:50:09 crc kubenswrapper[4933]: I0122 05:50:09.582554 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"08d509c3c8fb7e241bcd51bc10b992fcd3921842ccaf99622468a6cda86bfa65"} Jan 22 05:50:09 crc kubenswrapper[4933]: I0122 05:50:09.582594 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"555769388dcf8f69efe94bb3fe4298e5172b8b9edf6918ebbcb8cddb9fbf666d"} Jan 22 05:50:09 crc kubenswrapper[4933]: I0122 05:50:09.582603 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"996c382a58d69e77afb65a8df634c0d075eee1fcc6b33c2fc52587de7a8d2e09"} Jan 22 05:50:09 crc kubenswrapper[4933]: I0122 05:50:09.582612 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"2c26facef6eaad7e4249f8e67e4693e94fb46e8edcb43b5dc2a4145f32b171af"} Jan 22 05:50:10 crc kubenswrapper[4933]: I0122 05:50:10.590800 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"94c94fda20f5c273b13d4eab4a200c07f75c218b0f8e161f740dd4df9ea54314"} Jan 22 05:50:10 crc kubenswrapper[4933]: I0122 05:50:10.591036 4933 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd" Jan 22 05:50:10 crc kubenswrapper[4933]: I0122 05:50:10.591051 4933 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd" Jan 22 05:50:10 crc kubenswrapper[4933]: I0122 05:50:10.591242 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:50:12 crc kubenswrapper[4933]: I0122 05:50:12.511769 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:50:12 crc kubenswrapper[4933]: I0122 05:50:12.512278 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:50:12 crc kubenswrapper[4933]: I0122 05:50:12.521211 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.144173 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" podUID="2c4864a0-5981-4eef-a0db-c33a535e02de" containerName="oauth-openshift" containerID="cri-o://e6931cc142b2e1c5f3a3175149720060774695b264fe43588a371489467574bf" gracePeriod=15 Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.607675 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.614024 4933 generic.go:334] "Generic (PLEG): container finished" podID="2c4864a0-5981-4eef-a0db-c33a535e02de" containerID="e6931cc142b2e1c5f3a3175149720060774695b264fe43588a371489467574bf" exitCode=0 Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.614053 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" event={"ID":"2c4864a0-5981-4eef-a0db-c33a535e02de","Type":"ContainerDied","Data":"e6931cc142b2e1c5f3a3175149720060774695b264fe43588a371489467574bf"} Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.614089 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" event={"ID":"2c4864a0-5981-4eef-a0db-c33a535e02de","Type":"ContainerDied","Data":"9afbe5332bbb521deaa995917ae79f51154ec419d0ef122d54e9879f4a01b79b"} Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.614104 4933 scope.go:117] "RemoveContainer" containerID="e6931cc142b2e1c5f3a3175149720060774695b264fe43588a371489467574bf" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.614177 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gj98r" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.630106 4933 scope.go:117] "RemoveContainer" containerID="e6931cc142b2e1c5f3a3175149720060774695b264fe43588a371489467574bf" Jan 22 05:50:14 crc kubenswrapper[4933]: E0122 05:50:14.630508 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6931cc142b2e1c5f3a3175149720060774695b264fe43588a371489467574bf\": container with ID starting with e6931cc142b2e1c5f3a3175149720060774695b264fe43588a371489467574bf not found: ID does not exist" containerID="e6931cc142b2e1c5f3a3175149720060774695b264fe43588a371489467574bf" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.630565 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6931cc142b2e1c5f3a3175149720060774695b264fe43588a371489467574bf"} err="failed to get container status \"e6931cc142b2e1c5f3a3175149720060774695b264fe43588a371489467574bf\": rpc error: code = NotFound desc = could not find container \"e6931cc142b2e1c5f3a3175149720060774695b264fe43588a371489467574bf\": container with ID starting with e6931cc142b2e1c5f3a3175149720060774695b264fe43588a371489467574bf not found: ID does not exist" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.631932 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k9c2p\" (UniqueName: \"kubernetes.io/projected/2c4864a0-5981-4eef-a0db-c33a535e02de-kube-api-access-k9c2p\") pod \"2c4864a0-5981-4eef-a0db-c33a535e02de\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.631982 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-idp-0-file-data\") pod \"2c4864a0-5981-4eef-a0db-c33a535e02de\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.632006 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2c4864a0-5981-4eef-a0db-c33a535e02de-audit-dir\") pod \"2c4864a0-5981-4eef-a0db-c33a535e02de\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.632033 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-session\") pod \"2c4864a0-5981-4eef-a0db-c33a535e02de\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.632130 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-trusted-ca-bundle\") pod \"2c4864a0-5981-4eef-a0db-c33a535e02de\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.632174 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-audit-policies\") pod \"2c4864a0-5981-4eef-a0db-c33a535e02de\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.632204 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-template-error\") pod \"2c4864a0-5981-4eef-a0db-c33a535e02de\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.632257 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-service-ca\") pod \"2c4864a0-5981-4eef-a0db-c33a535e02de\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.632296 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-ocp-branding-template\") pod \"2c4864a0-5981-4eef-a0db-c33a535e02de\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.632329 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-serving-cert\") pod \"2c4864a0-5981-4eef-a0db-c33a535e02de\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.632366 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-cliconfig\") pod \"2c4864a0-5981-4eef-a0db-c33a535e02de\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.632397 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-router-certs\") pod \"2c4864a0-5981-4eef-a0db-c33a535e02de\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.632424 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-template-provider-selection\") pod \"2c4864a0-5981-4eef-a0db-c33a535e02de\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.632462 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-template-login\") pod \"2c4864a0-5981-4eef-a0db-c33a535e02de\" (UID: \"2c4864a0-5981-4eef-a0db-c33a535e02de\") " Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.633514 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2c4864a0-5981-4eef-a0db-c33a535e02de-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "2c4864a0-5981-4eef-a0db-c33a535e02de" (UID: "2c4864a0-5981-4eef-a0db-c33a535e02de"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.633529 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "2c4864a0-5981-4eef-a0db-c33a535e02de" (UID: "2c4864a0-5981-4eef-a0db-c33a535e02de"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.633668 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "2c4864a0-5981-4eef-a0db-c33a535e02de" (UID: "2c4864a0-5981-4eef-a0db-c33a535e02de"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.633735 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "2c4864a0-5981-4eef-a0db-c33a535e02de" (UID: "2c4864a0-5981-4eef-a0db-c33a535e02de"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.633760 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "2c4864a0-5981-4eef-a0db-c33a535e02de" (UID: "2c4864a0-5981-4eef-a0db-c33a535e02de"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.638508 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "2c4864a0-5981-4eef-a0db-c33a535e02de" (UID: "2c4864a0-5981-4eef-a0db-c33a535e02de"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.643299 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "2c4864a0-5981-4eef-a0db-c33a535e02de" (UID: "2c4864a0-5981-4eef-a0db-c33a535e02de"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.643822 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "2c4864a0-5981-4eef-a0db-c33a535e02de" (UID: "2c4864a0-5981-4eef-a0db-c33a535e02de"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.643927 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c4864a0-5981-4eef-a0db-c33a535e02de-kube-api-access-k9c2p" (OuterVolumeSpecName: "kube-api-access-k9c2p") pod "2c4864a0-5981-4eef-a0db-c33a535e02de" (UID: "2c4864a0-5981-4eef-a0db-c33a535e02de"). InnerVolumeSpecName "kube-api-access-k9c2p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.643971 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "2c4864a0-5981-4eef-a0db-c33a535e02de" (UID: "2c4864a0-5981-4eef-a0db-c33a535e02de"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.644118 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "2c4864a0-5981-4eef-a0db-c33a535e02de" (UID: "2c4864a0-5981-4eef-a0db-c33a535e02de"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.644499 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "2c4864a0-5981-4eef-a0db-c33a535e02de" (UID: "2c4864a0-5981-4eef-a0db-c33a535e02de"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.644643 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "2c4864a0-5981-4eef-a0db-c33a535e02de" (UID: "2c4864a0-5981-4eef-a0db-c33a535e02de"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.644817 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "2c4864a0-5981-4eef-a0db-c33a535e02de" (UID: "2c4864a0-5981-4eef-a0db-c33a535e02de"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.741009 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k9c2p\" (UniqueName: \"kubernetes.io/projected/2c4864a0-5981-4eef-a0db-c33a535e02de-kube-api-access-k9c2p\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.741050 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.741064 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.741103 4933 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2c4864a0-5981-4eef-a0db-c33a535e02de-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.741120 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.741133 4933 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.741145 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.741159 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.741172 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.741185 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.741197 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.741209 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.741221 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:14 crc kubenswrapper[4933]: I0122 05:50:14.741234 4933 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2c4864a0-5981-4eef-a0db-c33a535e02de-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:15 crc kubenswrapper[4933]: E0122 05:50:15.041176 4933 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c4864a0_5981_4eef_a0db_c33a535e02de.slice/crio-9afbe5332bbb521deaa995917ae79f51154ec419d0ef122d54e9879f4a01b79b\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c4864a0_5981_4eef_a0db_c33a535e02de.slice\": RecentStats: unable to find data in memory cache]" Jan 22 05:50:15 crc kubenswrapper[4933]: I0122 05:50:15.598297 4933 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:50:15 crc kubenswrapper[4933]: I0122 05:50:15.621801 4933 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd" Jan 22 05:50:15 crc kubenswrapper[4933]: I0122 05:50:15.621834 4933 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd" Jan 22 05:50:15 crc kubenswrapper[4933]: I0122 05:50:15.626177 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:50:15 crc kubenswrapper[4933]: I0122 05:50:15.628036 4933 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="6f50e5a0-0dab-4c43-afaf-e70236b8e986" Jan 22 05:50:16 crc kubenswrapper[4933]: I0122 05:50:16.626123 4933 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd" Jan 22 05:50:16 crc kubenswrapper[4933]: I0122 05:50:16.626456 4933 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd" Jan 22 05:50:17 crc kubenswrapper[4933]: I0122 05:50:17.064140 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:50:17 crc kubenswrapper[4933]: I0122 05:50:17.631412 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:50:17 crc kubenswrapper[4933]: I0122 05:50:17.638450 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:50:18 crc kubenswrapper[4933]: I0122 05:50:18.649035 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:50:22 crc kubenswrapper[4933]: E0122 05:50:22.519590 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-cqllr], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:50:22 crc kubenswrapper[4933]: I0122 05:50:22.533353 4933 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="6f50e5a0-0dab-4c43-afaf-e70236b8e986" Jan 22 05:50:22 crc kubenswrapper[4933]: E0122 05:50:22.544308 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[networking-console-plugin-cert nginx-conf], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:50:22 crc kubenswrapper[4933]: E0122 05:50:22.553417 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-s2dwl], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:50:22 crc kubenswrapper[4933]: I0122 05:50:22.643660 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:50:22 crc kubenswrapper[4933]: I0122 05:50:22.643746 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:50:22 crc kubenswrapper[4933]: I0122 05:50:22.645114 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:50:22 crc kubenswrapper[4933]: I0122 05:50:22.667470 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:50:22 crc kubenswrapper[4933]: I0122 05:50:22.947506 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:50:22 crc kubenswrapper[4933]: I0122 05:50:22.954793 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:50:23 crc kubenswrapper[4933]: I0122 05:50:23.048436 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:50:23 crc kubenswrapper[4933]: I0122 05:50:23.052679 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:50:25 crc kubenswrapper[4933]: I0122 05:50:25.643910 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 22 05:50:25 crc kubenswrapper[4933]: I0122 05:50:25.946580 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 22 05:50:26 crc kubenswrapper[4933]: I0122 05:50:26.275051 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 22 05:50:26 crc kubenswrapper[4933]: I0122 05:50:26.716287 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 22 05:50:26 crc kubenswrapper[4933]: I0122 05:50:26.870842 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 22 05:50:26 crc kubenswrapper[4933]: I0122 05:50:26.970247 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 22 05:50:27 crc kubenswrapper[4933]: I0122 05:50:27.321190 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 22 05:50:27 crc kubenswrapper[4933]: I0122 05:50:27.360987 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 22 05:50:27 crc kubenswrapper[4933]: I0122 05:50:27.582790 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 22 05:50:27 crc kubenswrapper[4933]: I0122 05:50:27.712630 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 22 05:50:27 crc kubenswrapper[4933]: I0122 05:50:27.716669 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 22 05:50:27 crc kubenswrapper[4933]: I0122 05:50:27.828721 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 22 05:50:27 crc kubenswrapper[4933]: I0122 05:50:27.853230 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 22 05:50:28 crc kubenswrapper[4933]: I0122 05:50:28.313060 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 22 05:50:28 crc kubenswrapper[4933]: I0122 05:50:28.370066 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 22 05:50:28 crc kubenswrapper[4933]: I0122 05:50:28.389798 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 22 05:50:28 crc kubenswrapper[4933]: I0122 05:50:28.421325 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 22 05:50:28 crc kubenswrapper[4933]: I0122 05:50:28.457278 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 22 05:50:28 crc kubenswrapper[4933]: I0122 05:50:28.473318 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 22 05:50:28 crc kubenswrapper[4933]: I0122 05:50:28.579915 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 22 05:50:28 crc kubenswrapper[4933]: I0122 05:50:28.586531 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 22 05:50:28 crc kubenswrapper[4933]: I0122 05:50:28.925581 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 22 05:50:28 crc kubenswrapper[4933]: I0122 05:50:28.945122 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 22 05:50:28 crc kubenswrapper[4933]: I0122 05:50:28.946663 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 22 05:50:29 crc kubenswrapper[4933]: I0122 05:50:29.269933 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 22 05:50:29 crc kubenswrapper[4933]: I0122 05:50:29.293819 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 22 05:50:29 crc kubenswrapper[4933]: I0122 05:50:29.375983 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 22 05:50:29 crc kubenswrapper[4933]: I0122 05:50:29.522217 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 22 05:50:29 crc kubenswrapper[4933]: I0122 05:50:29.568966 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 22 05:50:29 crc kubenswrapper[4933]: I0122 05:50:29.584487 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 22 05:50:29 crc kubenswrapper[4933]: I0122 05:50:29.635139 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 22 05:50:29 crc kubenswrapper[4933]: I0122 05:50:29.661999 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 22 05:50:29 crc kubenswrapper[4933]: I0122 05:50:29.713944 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 22 05:50:29 crc kubenswrapper[4933]: I0122 05:50:29.980361 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 22 05:50:29 crc kubenswrapper[4933]: I0122 05:50:29.982719 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 22 05:50:30 crc kubenswrapper[4933]: I0122 05:50:30.017546 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 22 05:50:30 crc kubenswrapper[4933]: I0122 05:50:30.055570 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 22 05:50:30 crc kubenswrapper[4933]: I0122 05:50:30.101144 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 22 05:50:30 crc kubenswrapper[4933]: I0122 05:50:30.124606 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 22 05:50:30 crc kubenswrapper[4933]: I0122 05:50:30.148019 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 22 05:50:30 crc kubenswrapper[4933]: I0122 05:50:30.268030 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 22 05:50:30 crc kubenswrapper[4933]: I0122 05:50:30.285457 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 22 05:50:30 crc kubenswrapper[4933]: I0122 05:50:30.316044 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 22 05:50:30 crc kubenswrapper[4933]: I0122 05:50:30.407300 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 22 05:50:30 crc kubenswrapper[4933]: I0122 05:50:30.441817 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 22 05:50:30 crc kubenswrapper[4933]: I0122 05:50:30.591741 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 22 05:50:30 crc kubenswrapper[4933]: I0122 05:50:30.645616 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 22 05:50:30 crc kubenswrapper[4933]: I0122 05:50:30.764162 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 22 05:50:30 crc kubenswrapper[4933]: I0122 05:50:30.843116 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 22 05:50:30 crc kubenswrapper[4933]: I0122 05:50:30.899643 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 22 05:50:31 crc kubenswrapper[4933]: I0122 05:50:31.023580 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 22 05:50:31 crc kubenswrapper[4933]: I0122 05:50:31.095052 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 22 05:50:31 crc kubenswrapper[4933]: I0122 05:50:31.143569 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 22 05:50:31 crc kubenswrapper[4933]: I0122 05:50:31.310740 4933 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 22 05:50:31 crc kubenswrapper[4933]: I0122 05:50:31.323553 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 22 05:50:31 crc kubenswrapper[4933]: I0122 05:50:31.403989 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 22 05:50:31 crc kubenswrapper[4933]: I0122 05:50:31.640721 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 22 05:50:31 crc kubenswrapper[4933]: I0122 05:50:31.849731 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 22 05:50:32 crc kubenswrapper[4933]: I0122 05:50:32.096974 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 22 05:50:32 crc kubenswrapper[4933]: I0122 05:50:32.137018 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 22 05:50:32 crc kubenswrapper[4933]: I0122 05:50:32.148623 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 22 05:50:32 crc kubenswrapper[4933]: I0122 05:50:32.296508 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 22 05:50:32 crc kubenswrapper[4933]: I0122 05:50:32.350212 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 22 05:50:32 crc kubenswrapper[4933]: I0122 05:50:32.389779 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 22 05:50:32 crc kubenswrapper[4933]: I0122 05:50:32.399437 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 22 05:50:32 crc kubenswrapper[4933]: I0122 05:50:32.509530 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 22 05:50:32 crc kubenswrapper[4933]: I0122 05:50:32.533303 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 22 05:50:32 crc kubenswrapper[4933]: I0122 05:50:32.609980 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 22 05:50:32 crc kubenswrapper[4933]: I0122 05:50:32.642930 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 22 05:50:32 crc kubenswrapper[4933]: I0122 05:50:32.677303 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 22 05:50:32 crc kubenswrapper[4933]: I0122 05:50:32.741348 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 22 05:50:32 crc kubenswrapper[4933]: I0122 05:50:32.851692 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 22 05:50:32 crc kubenswrapper[4933]: I0122 05:50:32.885102 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.026587 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.031216 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.073095 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.084279 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.088695 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.101756 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.141238 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.156797 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.257686 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.272384 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.317522 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.387565 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.482608 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.505101 4933 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.536903 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.539977 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.671966 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.757464 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.777628 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.782175 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.833113 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 22 05:50:33 crc kubenswrapper[4933]: I0122 05:50:33.919155 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.194792 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.273339 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.301408 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.309424 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.398650 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.489898 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.490370 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.520093 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.536692 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.549404 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.551287 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.559454 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.604415 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.695795 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.705796 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.729135 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.734135 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.745262 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"1efad7ba624b3324d525e596ec6549a5f19a2e98848862198ac9a4ddbc01cc9b"} Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.772287 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.817973 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.824095 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 22 05:50:34 crc kubenswrapper[4933]: I0122 05:50:34.996961 4933 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.204335 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.242480 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.278528 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.365953 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.374651 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.411507 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.459745 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.477607 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.490224 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.490856 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.509597 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.511588 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.520528 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.669217 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.754573 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"9a98dd6ebb98e05c8b78ae6519970f18704858727935cfc3b4ce23d0698dcb18"} Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.756708 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"546faaf7f691eaafd7edcf6a350e78009290739f13534849822d30a2d6237e60"} Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.796758 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.798695 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.839955 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.841275 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.852954 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 22 05:50:35 crc kubenswrapper[4933]: I0122 05:50:35.920633 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.041515 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.303382 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.421635 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.449160 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.466232 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.488894 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.489429 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.526466 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.534626 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.576299 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.607264 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.656186 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.708096 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.766724 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-network-diagnostics_network-check-source-55646444c4-trplf_9d751cbb-f2e2-430d-9754-c882a5e924a5/check-endpoints/0.log" Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.766783 4933 generic.go:334] "Generic (PLEG): container finished" podID="9d751cbb-f2e2-430d-9754-c882a5e924a5" containerID="9a98dd6ebb98e05c8b78ae6519970f18704858727935cfc3b4ce23d0698dcb18" exitCode=255 Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.766856 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerDied","Data":"9a98dd6ebb98e05c8b78ae6519970f18704858727935cfc3b4ce23d0698dcb18"} Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.767456 4933 scope.go:117] "RemoveContainer" containerID="9a98dd6ebb98e05c8b78ae6519970f18704858727935cfc3b4ce23d0698dcb18" Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.772686 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"14627fa1c9dc79b2df25f829d3f44f1cafa0c8f332b5d1d538f871207722ece5"} Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.772876 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.824934 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.827443 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 22 05:50:36 crc kubenswrapper[4933]: I0122 05:50:36.854713 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.039924 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.081839 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.171951 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.222518 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.271957 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.292535 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.299028 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.368357 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.402965 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.443487 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.470829 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.490181 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.490887 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.499745 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.519292 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.571847 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.652728 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.740317 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.777738 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-network-diagnostics_network-check-source-55646444c4-trplf_9d751cbb-f2e2-430d-9754-c882a5e924a5/check-endpoints/0.log" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.778414 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"687a1118f6ad54eba25b2e3a38454c6e89bbdedd3af69fa624361085be5e8c8a"} Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.780238 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.838184 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.908945 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.909661 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.920515 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 22 05:50:37 crc kubenswrapper[4933]: I0122 05:50:37.926944 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 22 05:50:37 crc kubenswrapper[4933]: W0122 05:50:37.942025 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-0fa5678817898b5be0188cd4bd429bf4241c0938f432ac246d36b8146c7c99c9 WatchSource:0}: Error finding container 0fa5678817898b5be0188cd4bd429bf4241c0938f432ac246d36b8146c7c99c9: Status 404 returned error can't find the container with id 0fa5678817898b5be0188cd4bd429bf4241c0938f432ac246d36b8146c7c99c9 Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.018416 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.061995 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.083722 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.101612 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.196832 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.204343 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.340247 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.637432 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.676395 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.692156 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.786927 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-network-diagnostics_network-check-source-55646444c4-trplf_9d751cbb-f2e2-430d-9754-c882a5e924a5/check-endpoints/1.log" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.787605 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-network-diagnostics_network-check-source-55646444c4-trplf_9d751cbb-f2e2-430d-9754-c882a5e924a5/check-endpoints/0.log" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.787663 4933 generic.go:334] "Generic (PLEG): container finished" podID="9d751cbb-f2e2-430d-9754-c882a5e924a5" containerID="687a1118f6ad54eba25b2e3a38454c6e89bbdedd3af69fa624361085be5e8c8a" exitCode=255 Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.787707 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerDied","Data":"687a1118f6ad54eba25b2e3a38454c6e89bbdedd3af69fa624361085be5e8c8a"} Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.787794 4933 scope.go:117] "RemoveContainer" containerID="9a98dd6ebb98e05c8b78ae6519970f18704858727935cfc3b4ce23d0698dcb18" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.788427 4933 scope.go:117] "RemoveContainer" containerID="687a1118f6ad54eba25b2e3a38454c6e89bbdedd3af69fa624361085be5e8c8a" Jan 22 05:50:38 crc kubenswrapper[4933]: E0122 05:50:38.789029 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=check-endpoints pod=network-check-source-55646444c4-trplf_openshift-network-diagnostics(9d751cbb-f2e2-430d-9754-c882a5e924a5)\"" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.791558 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.791930 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"1e160bc9954b19af9769ba69b971f0d898aea5ea4e7ac5782f5de0c5c2aca878"} Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.791972 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"0fa5678817898b5be0188cd4bd429bf4241c0938f432ac246d36b8146c7c99c9"} Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.916992 4933 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.924752 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-558db77b4-gj98r"] Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.924843 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-76b8488fcc-7n6zb"] Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.925305 4933 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.925327 4933 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="e8d24e56-7c7e-49cb-90b1-f79b0a6a63dd" Jan 22 05:50:38 crc kubenswrapper[4933]: E0122 05:50:38.926247 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="520b2813-3aaf-46c7-9592-f8a8be0f9348" containerName="installer" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.926278 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="520b2813-3aaf-46c7-9592-f8a8be0f9348" containerName="installer" Jan 22 05:50:38 crc kubenswrapper[4933]: E0122 05:50:38.926299 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c4864a0-5981-4eef-a0db-c33a535e02de" containerName="oauth-openshift" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.926312 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c4864a0-5981-4eef-a0db-c33a535e02de" containerName="oauth-openshift" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.926490 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="520b2813-3aaf-46c7-9592-f8a8be0f9348" containerName="installer" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.926512 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c4864a0-5981-4eef-a0db-c33a535e02de" containerName="oauth-openshift" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.937587 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.937873 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.942137 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.946362 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.946469 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.946562 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.947393 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.947408 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.947668 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.947699 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.947827 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.948028 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.948107 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.948111 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.956606 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.963930 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.966247 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.966574 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 22 05:50:38 crc kubenswrapper[4933]: I0122 05:50:38.988474 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.004723 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=24.004704577 podStartE2EDuration="24.004704577s" podCreationTimestamp="2026-01-22 05:50:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:50:38.978440754 +0000 UTC m=+286.815566127" watchObservedRunningTime="2026-01-22 05:50:39.004704577 +0000 UTC m=+286.841829920" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.067412 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-audit-dir\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.067501 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-router-certs\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.067536 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-serving-cert\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.067569 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-service-ca\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.067594 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7c66v\" (UniqueName: \"kubernetes.io/projected/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-kube-api-access-7c66v\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.067680 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-audit-policies\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.067813 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.067839 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-user-template-error\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.067866 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-cliconfig\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.067914 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.067959 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.067976 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-user-template-login\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.068036 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-session\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.068065 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.168884 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.168944 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-user-template-error\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.168981 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-cliconfig\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.169023 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.169094 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.169131 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-user-template-login\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.169175 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-session\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.169202 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.169223 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-audit-dir\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.169251 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-router-certs\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.169279 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-serving-cert\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.169357 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-service-ca\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.169383 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7c66v\" (UniqueName: \"kubernetes.io/projected/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-kube-api-access-7c66v\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.169403 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-audit-policies\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.169862 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.170554 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-audit-policies\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.170789 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-audit-dir\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.170906 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.172329 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-cliconfig\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.172999 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-service-ca\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.174605 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-serving-cert\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.174777 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.175471 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-session\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.175865 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-user-template-error\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.176305 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.178352 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-router-certs\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.179562 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-user-template-login\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.181208 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.189220 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7c66v\" (UniqueName: \"kubernetes.io/projected/a0c8f3a5-05e3-49da-bce3-7b71bebb3d59-kube-api-access-7c66v\") pod \"oauth-openshift-76b8488fcc-7n6zb\" (UID: \"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59\") " pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.209687 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.270670 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.273347 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.285770 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.305461 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.352422 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.462679 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-76b8488fcc-7n6zb"] Jan 22 05:50:39 crc kubenswrapper[4933]: W0122 05:50:39.469487 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda0c8f3a5_05e3_49da_bce3_7b71bebb3d59.slice/crio-d975bf2495065044ea97d65dc80f3aabdcacbddecebc0df7200adf0d3e9f63af WatchSource:0}: Error finding container d975bf2495065044ea97d65dc80f3aabdcacbddecebc0df7200adf0d3e9f63af: Status 404 returned error can't find the container with id d975bf2495065044ea97d65dc80f3aabdcacbddecebc0df7200adf0d3e9f63af Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.470788 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.665926 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.690131 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.799668 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-network-diagnostics_network-check-source-55646444c4-trplf_9d751cbb-f2e2-430d-9754-c882a5e924a5/check-endpoints/1.log" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.800470 4933 scope.go:117] "RemoveContainer" containerID="687a1118f6ad54eba25b2e3a38454c6e89bbdedd3af69fa624361085be5e8c8a" Jan 22 05:50:39 crc kubenswrapper[4933]: E0122 05:50:39.800788 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=check-endpoints pod=network-check-source-55646444c4-trplf_openshift-network-diagnostics(9d751cbb-f2e2-430d-9754-c882a5e924a5)\"" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.802749 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" event={"ID":"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59","Type":"ContainerStarted","Data":"0a7ac79681b86bb1885d38d4164e7ef6d35cdfa8ce12ba1d17c6ad4e64f8a1a3"} Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.802801 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" event={"ID":"a0c8f3a5-05e3-49da-bce3-7b71bebb3d59","Type":"ContainerStarted","Data":"d975bf2495065044ea97d65dc80f3aabdcacbddecebc0df7200adf0d3e9f63af"} Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.802822 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.814026 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.844206 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" podStartSLOduration=50.844183442 podStartE2EDuration="50.844183442s" podCreationTimestamp="2026-01-22 05:49:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:50:39.839387418 +0000 UTC m=+287.676512781" watchObservedRunningTime="2026-01-22 05:50:39.844183442 +0000 UTC m=+287.681308795" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.914693 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 22 05:50:39 crc kubenswrapper[4933]: I0122 05:50:39.932783 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 22 05:50:40 crc kubenswrapper[4933]: I0122 05:50:40.142450 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 22 05:50:40 crc kubenswrapper[4933]: I0122 05:50:40.177973 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 22 05:50:40 crc kubenswrapper[4933]: I0122 05:50:40.254779 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-76b8488fcc-7n6zb" Jan 22 05:50:40 crc kubenswrapper[4933]: I0122 05:50:40.286898 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 22 05:50:40 crc kubenswrapper[4933]: I0122 05:50:40.287930 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 22 05:50:40 crc kubenswrapper[4933]: I0122 05:50:40.291661 4933 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 22 05:50:40 crc kubenswrapper[4933]: I0122 05:50:40.371443 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 22 05:50:40 crc kubenswrapper[4933]: I0122 05:50:40.384053 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 22 05:50:40 crc kubenswrapper[4933]: I0122 05:50:40.435634 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 22 05:50:40 crc kubenswrapper[4933]: I0122 05:50:40.498962 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c4864a0-5981-4eef-a0db-c33a535e02de" path="/var/lib/kubelet/pods/2c4864a0-5981-4eef-a0db-c33a535e02de/volumes" Jan 22 05:50:40 crc kubenswrapper[4933]: I0122 05:50:40.538904 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 22 05:50:40 crc kubenswrapper[4933]: I0122 05:50:40.691508 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 22 05:50:40 crc kubenswrapper[4933]: I0122 05:50:40.703143 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 22 05:50:40 crc kubenswrapper[4933]: I0122 05:50:40.749183 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 22 05:50:41 crc kubenswrapper[4933]: I0122 05:50:41.084973 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 22 05:50:41 crc kubenswrapper[4933]: I0122 05:50:41.314983 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 22 05:50:41 crc kubenswrapper[4933]: I0122 05:50:41.387334 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 22 05:50:41 crc kubenswrapper[4933]: I0122 05:50:41.387969 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 22 05:50:41 crc kubenswrapper[4933]: I0122 05:50:41.423250 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 22 05:50:41 crc kubenswrapper[4933]: I0122 05:50:41.518608 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 22 05:50:41 crc kubenswrapper[4933]: I0122 05:50:41.758025 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 22 05:50:41 crc kubenswrapper[4933]: I0122 05:50:41.945961 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 22 05:50:41 crc kubenswrapper[4933]: I0122 05:50:41.948460 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 22 05:50:41 crc kubenswrapper[4933]: I0122 05:50:41.949486 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 22 05:50:41 crc kubenswrapper[4933]: I0122 05:50:41.955061 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 22 05:50:41 crc kubenswrapper[4933]: I0122 05:50:41.992481 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 22 05:50:42 crc kubenswrapper[4933]: I0122 05:50:42.020540 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 22 05:50:42 crc kubenswrapper[4933]: I0122 05:50:42.089279 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 22 05:50:42 crc kubenswrapper[4933]: I0122 05:50:42.144997 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 22 05:50:42 crc kubenswrapper[4933]: I0122 05:50:42.382485 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 22 05:50:42 crc kubenswrapper[4933]: I0122 05:50:42.407725 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 22 05:50:42 crc kubenswrapper[4933]: I0122 05:50:42.657322 4933 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 22 05:50:42 crc kubenswrapper[4933]: I0122 05:50:42.659285 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 22 05:50:42 crc kubenswrapper[4933]: I0122 05:50:42.721890 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 22 05:50:42 crc kubenswrapper[4933]: I0122 05:50:42.980709 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 22 05:50:43 crc kubenswrapper[4933]: I0122 05:50:43.087263 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 22 05:50:43 crc kubenswrapper[4933]: I0122 05:50:43.480123 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 22 05:50:43 crc kubenswrapper[4933]: I0122 05:50:43.578924 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 22 05:50:43 crc kubenswrapper[4933]: I0122 05:50:43.626377 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 22 05:50:43 crc kubenswrapper[4933]: I0122 05:50:43.661581 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 22 05:50:44 crc kubenswrapper[4933]: I0122 05:50:44.081601 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 22 05:50:44 crc kubenswrapper[4933]: I0122 05:50:44.120193 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 22 05:50:44 crc kubenswrapper[4933]: I0122 05:50:44.547496 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-977898fdc-8bmw7"] Jan 22 05:50:44 crc kubenswrapper[4933]: I0122 05:50:44.547790 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" podUID="30297fb1-7354-44f3-a4a6-a3729e48c214" containerName="controller-manager" containerID="cri-o://5e1b2813cc52dfb2b45b851545517adc40866378da4ee5a8389e3961c63fee61" gracePeriod=30 Jan 22 05:50:44 crc kubenswrapper[4933]: I0122 05:50:44.651023 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2"] Jan 22 05:50:44 crc kubenswrapper[4933]: I0122 05:50:44.651371 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" podUID="984d8f16-0ff7-46a6-8099-f5a87ef0d5ed" containerName="route-controller-manager" containerID="cri-o://fdb515748f4c39f1b9554d1f768bd8aa9f00bbfe979a2e9529798d691adfed0c" gracePeriod=30 Jan 22 05:50:44 crc kubenswrapper[4933]: I0122 05:50:44.836556 4933 generic.go:334] "Generic (PLEG): container finished" podID="984d8f16-0ff7-46a6-8099-f5a87ef0d5ed" containerID="fdb515748f4c39f1b9554d1f768bd8aa9f00bbfe979a2e9529798d691adfed0c" exitCode=0 Jan 22 05:50:44 crc kubenswrapper[4933]: I0122 05:50:44.836634 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" event={"ID":"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed","Type":"ContainerDied","Data":"fdb515748f4c39f1b9554d1f768bd8aa9f00bbfe979a2e9529798d691adfed0c"} Jan 22 05:50:44 crc kubenswrapper[4933]: I0122 05:50:44.842735 4933 generic.go:334] "Generic (PLEG): container finished" podID="30297fb1-7354-44f3-a4a6-a3729e48c214" containerID="5e1b2813cc52dfb2b45b851545517adc40866378da4ee5a8389e3961c63fee61" exitCode=0 Jan 22 05:50:44 crc kubenswrapper[4933]: I0122 05:50:44.842792 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" event={"ID":"30297fb1-7354-44f3-a4a6-a3729e48c214","Type":"ContainerDied","Data":"5e1b2813cc52dfb2b45b851545517adc40866378da4ee5a8389e3961c63fee61"} Jan 22 05:50:44 crc kubenswrapper[4933]: I0122 05:50:44.888410 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.023809 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.031347 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.154996 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mt7qz\" (UniqueName: \"kubernetes.io/projected/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-kube-api-access-mt7qz\") pod \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\" (UID: \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\") " Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.155040 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/30297fb1-7354-44f3-a4a6-a3729e48c214-config\") pod \"30297fb1-7354-44f3-a4a6-a3729e48c214\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.155056 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/30297fb1-7354-44f3-a4a6-a3729e48c214-client-ca\") pod \"30297fb1-7354-44f3-a4a6-a3729e48c214\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.155109 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-serving-cert\") pod \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\" (UID: \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\") " Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.155143 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-client-ca\") pod \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\" (UID: \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\") " Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.155190 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7qbwl\" (UniqueName: \"kubernetes.io/projected/30297fb1-7354-44f3-a4a6-a3729e48c214-kube-api-access-7qbwl\") pod \"30297fb1-7354-44f3-a4a6-a3729e48c214\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.155215 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-config\") pod \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\" (UID: \"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed\") " Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.155254 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/30297fb1-7354-44f3-a4a6-a3729e48c214-proxy-ca-bundles\") pod \"30297fb1-7354-44f3-a4a6-a3729e48c214\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.155279 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30297fb1-7354-44f3-a4a6-a3729e48c214-serving-cert\") pod \"30297fb1-7354-44f3-a4a6-a3729e48c214\" (UID: \"30297fb1-7354-44f3-a4a6-a3729e48c214\") " Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.155878 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/30297fb1-7354-44f3-a4a6-a3729e48c214-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "30297fb1-7354-44f3-a4a6-a3729e48c214" (UID: "30297fb1-7354-44f3-a4a6-a3729e48c214"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.155905 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/30297fb1-7354-44f3-a4a6-a3729e48c214-client-ca" (OuterVolumeSpecName: "client-ca") pod "30297fb1-7354-44f3-a4a6-a3729e48c214" (UID: "30297fb1-7354-44f3-a4a6-a3729e48c214"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.156064 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-client-ca" (OuterVolumeSpecName: "client-ca") pod "984d8f16-0ff7-46a6-8099-f5a87ef0d5ed" (UID: "984d8f16-0ff7-46a6-8099-f5a87ef0d5ed"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.156187 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-config" (OuterVolumeSpecName: "config") pod "984d8f16-0ff7-46a6-8099-f5a87ef0d5ed" (UID: "984d8f16-0ff7-46a6-8099-f5a87ef0d5ed"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.157203 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/30297fb1-7354-44f3-a4a6-a3729e48c214-config" (OuterVolumeSpecName: "config") pod "30297fb1-7354-44f3-a4a6-a3729e48c214" (UID: "30297fb1-7354-44f3-a4a6-a3729e48c214"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.160835 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30297fb1-7354-44f3-a4a6-a3729e48c214-kube-api-access-7qbwl" (OuterVolumeSpecName: "kube-api-access-7qbwl") pod "30297fb1-7354-44f3-a4a6-a3729e48c214" (UID: "30297fb1-7354-44f3-a4a6-a3729e48c214"). InnerVolumeSpecName "kube-api-access-7qbwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.160928 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-kube-api-access-mt7qz" (OuterVolumeSpecName: "kube-api-access-mt7qz") pod "984d8f16-0ff7-46a6-8099-f5a87ef0d5ed" (UID: "984d8f16-0ff7-46a6-8099-f5a87ef0d5ed"). InnerVolumeSpecName "kube-api-access-mt7qz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.161538 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "984d8f16-0ff7-46a6-8099-f5a87ef0d5ed" (UID: "984d8f16-0ff7-46a6-8099-f5a87ef0d5ed"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.161679 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30297fb1-7354-44f3-a4a6-a3729e48c214-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "30297fb1-7354-44f3-a4a6-a3729e48c214" (UID: "30297fb1-7354-44f3-a4a6-a3729e48c214"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.256782 4933 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.256848 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7qbwl\" (UniqueName: \"kubernetes.io/projected/30297fb1-7354-44f3-a4a6-a3729e48c214-kube-api-access-7qbwl\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.256870 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.256890 4933 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/30297fb1-7354-44f3-a4a6-a3729e48c214-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.256907 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30297fb1-7354-44f3-a4a6-a3729e48c214-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.256924 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/30297fb1-7354-44f3-a4a6-a3729e48c214-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.256943 4933 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/30297fb1-7354-44f3-a4a6-a3729e48c214-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.256968 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mt7qz\" (UniqueName: \"kubernetes.io/projected/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-kube-api-access-mt7qz\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.256990 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.852466 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" event={"ID":"984d8f16-0ff7-46a6-8099-f5a87ef0d5ed","Type":"ContainerDied","Data":"96a6453fc187e4acb73afdb2aba1df22fcaf5e561b9adc1bb3c18edeb3312d26"} Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.852529 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.852553 4933 scope.go:117] "RemoveContainer" containerID="fdb515748f4c39f1b9554d1f768bd8aa9f00bbfe979a2e9529798d691adfed0c" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.854635 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" event={"ID":"30297fb1-7354-44f3-a4a6-a3729e48c214","Type":"ContainerDied","Data":"5c8906dfc01fd6c1c43d989d9f049421c1e4a64314aa9970f4d3c03bc86747a6"} Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.854762 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-977898fdc-8bmw7" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.880029 4933 scope.go:117] "RemoveContainer" containerID="5e1b2813cc52dfb2b45b851545517adc40866378da4ee5a8389e3961c63fee61" Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.909663 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-977898fdc-8bmw7"] Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.918809 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-977898fdc-8bmw7"] Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.926284 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2"] Jan 22 05:50:45 crc kubenswrapper[4933]: I0122 05:50:45.931752 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6b5dfcd9f-f8jd2"] Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.063320 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5"] Jan 22 05:50:46 crc kubenswrapper[4933]: E0122 05:50:46.063686 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="984d8f16-0ff7-46a6-8099-f5a87ef0d5ed" containerName="route-controller-manager" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.063706 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="984d8f16-0ff7-46a6-8099-f5a87ef0d5ed" containerName="route-controller-manager" Jan 22 05:50:46 crc kubenswrapper[4933]: E0122 05:50:46.063725 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30297fb1-7354-44f3-a4a6-a3729e48c214" containerName="controller-manager" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.063735 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="30297fb1-7354-44f3-a4a6-a3729e48c214" containerName="controller-manager" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.063861 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="984d8f16-0ff7-46a6-8099-f5a87ef0d5ed" containerName="route-controller-manager" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.063876 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="30297fb1-7354-44f3-a4a6-a3729e48c214" containerName="controller-manager" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.064623 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.069777 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.070061 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.070252 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.070380 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.070546 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.070672 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.075218 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8"] Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.077916 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.082684 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.083051 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.083246 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.083307 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.084037 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.085234 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.096382 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.100207 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5"] Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.109167 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8"] Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.174161 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sndg2\" (UniqueName: \"kubernetes.io/projected/6a21b4bc-ac49-4926-9fd8-82597f52b529-kube-api-access-sndg2\") pod \"route-controller-manager-7695d8c949-8kjv5\" (UID: \"6a21b4bc-ac49-4926-9fd8-82597f52b529\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.174258 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a21b4bc-ac49-4926-9fd8-82597f52b529-serving-cert\") pod \"route-controller-manager-7695d8c949-8kjv5\" (UID: \"6a21b4bc-ac49-4926-9fd8-82597f52b529\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.174305 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7z97\" (UniqueName: \"kubernetes.io/projected/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-kube-api-access-x7z97\") pod \"controller-manager-7f5f69b8b5-ldwt8\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.174330 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-proxy-ca-bundles\") pod \"controller-manager-7f5f69b8b5-ldwt8\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.174359 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a21b4bc-ac49-4926-9fd8-82597f52b529-config\") pod \"route-controller-manager-7695d8c949-8kjv5\" (UID: \"6a21b4bc-ac49-4926-9fd8-82597f52b529\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.174491 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-config\") pod \"controller-manager-7f5f69b8b5-ldwt8\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.174589 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a21b4bc-ac49-4926-9fd8-82597f52b529-client-ca\") pod \"route-controller-manager-7695d8c949-8kjv5\" (UID: \"6a21b4bc-ac49-4926-9fd8-82597f52b529\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.174657 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-serving-cert\") pod \"controller-manager-7f5f69b8b5-ldwt8\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.174707 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-client-ca\") pod \"controller-manager-7f5f69b8b5-ldwt8\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.276543 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sndg2\" (UniqueName: \"kubernetes.io/projected/6a21b4bc-ac49-4926-9fd8-82597f52b529-kube-api-access-sndg2\") pod \"route-controller-manager-7695d8c949-8kjv5\" (UID: \"6a21b4bc-ac49-4926-9fd8-82597f52b529\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.276631 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a21b4bc-ac49-4926-9fd8-82597f52b529-serving-cert\") pod \"route-controller-manager-7695d8c949-8kjv5\" (UID: \"6a21b4bc-ac49-4926-9fd8-82597f52b529\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.276675 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7z97\" (UniqueName: \"kubernetes.io/projected/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-kube-api-access-x7z97\") pod \"controller-manager-7f5f69b8b5-ldwt8\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.276705 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a21b4bc-ac49-4926-9fd8-82597f52b529-config\") pod \"route-controller-manager-7695d8c949-8kjv5\" (UID: \"6a21b4bc-ac49-4926-9fd8-82597f52b529\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.276738 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-proxy-ca-bundles\") pod \"controller-manager-7f5f69b8b5-ldwt8\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.276784 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-config\") pod \"controller-manager-7f5f69b8b5-ldwt8\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.276828 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a21b4bc-ac49-4926-9fd8-82597f52b529-client-ca\") pod \"route-controller-manager-7695d8c949-8kjv5\" (UID: \"6a21b4bc-ac49-4926-9fd8-82597f52b529\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.276870 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-serving-cert\") pod \"controller-manager-7f5f69b8b5-ldwt8\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.276907 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-client-ca\") pod \"controller-manager-7f5f69b8b5-ldwt8\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.278180 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a21b4bc-ac49-4926-9fd8-82597f52b529-client-ca\") pod \"route-controller-manager-7695d8c949-8kjv5\" (UID: \"6a21b4bc-ac49-4926-9fd8-82597f52b529\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.278259 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-client-ca\") pod \"controller-manager-7f5f69b8b5-ldwt8\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.278487 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-config\") pod \"controller-manager-7f5f69b8b5-ldwt8\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.279116 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a21b4bc-ac49-4926-9fd8-82597f52b529-config\") pod \"route-controller-manager-7695d8c949-8kjv5\" (UID: \"6a21b4bc-ac49-4926-9fd8-82597f52b529\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.279960 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-proxy-ca-bundles\") pod \"controller-manager-7f5f69b8b5-ldwt8\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.285725 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a21b4bc-ac49-4926-9fd8-82597f52b529-serving-cert\") pod \"route-controller-manager-7695d8c949-8kjv5\" (UID: \"6a21b4bc-ac49-4926-9fd8-82597f52b529\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.299756 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sndg2\" (UniqueName: \"kubernetes.io/projected/6a21b4bc-ac49-4926-9fd8-82597f52b529-kube-api-access-sndg2\") pod \"route-controller-manager-7695d8c949-8kjv5\" (UID: \"6a21b4bc-ac49-4926-9fd8-82597f52b529\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.300600 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-serving-cert\") pod \"controller-manager-7f5f69b8b5-ldwt8\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.308661 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7z97\" (UniqueName: \"kubernetes.io/projected/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-kube-api-access-x7z97\") pod \"controller-manager-7f5f69b8b5-ldwt8\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.399805 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.409692 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.502559 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30297fb1-7354-44f3-a4a6-a3729e48c214" path="/var/lib/kubelet/pods/30297fb1-7354-44f3-a4a6-a3729e48c214/volumes" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.504055 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="984d8f16-0ff7-46a6-8099-f5a87ef0d5ed" path="/var/lib/kubelet/pods/984d8f16-0ff7-46a6-8099-f5a87ef0d5ed/volumes" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.594005 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5"] Jan 22 05:50:46 crc kubenswrapper[4933]: W0122 05:50:46.601702 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a21b4bc_ac49_4926_9fd8_82597f52b529.slice/crio-7f2c311b0f3e9649b4da53475c539979dc2fe34a2b69988ada2100d105f38091 WatchSource:0}: Error finding container 7f2c311b0f3e9649b4da53475c539979dc2fe34a2b69988ada2100d105f38091: Status 404 returned error can't find the container with id 7f2c311b0f3e9649b4da53475c539979dc2fe34a2b69988ada2100d105f38091 Jan 22 05:50:46 crc kubenswrapper[4933]: W0122 05:50:46.677523 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9532dcde_8f1b_44b7_8e10_e7e76d9548fc.slice/crio-345f0eb6aeb1a3daf36a10cd3b136724ea0c1709f66ec257a9fd4e3d5dcc7476 WatchSource:0}: Error finding container 345f0eb6aeb1a3daf36a10cd3b136724ea0c1709f66ec257a9fd4e3d5dcc7476: Status 404 returned error can't find the container with id 345f0eb6aeb1a3daf36a10cd3b136724ea0c1709f66ec257a9fd4e3d5dcc7476 Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.680229 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8"] Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.863037 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" event={"ID":"6a21b4bc-ac49-4926-9fd8-82597f52b529","Type":"ContainerStarted","Data":"7376e01f4d920fb2398c39cb3f4b003c733a2368ef4cfb146d74b822af081bc8"} Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.863448 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.863466 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" event={"ID":"6a21b4bc-ac49-4926-9fd8-82597f52b529","Type":"ContainerStarted","Data":"7f2c311b0f3e9649b4da53475c539979dc2fe34a2b69988ada2100d105f38091"} Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.865703 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" event={"ID":"9532dcde-8f1b-44b7-8e10-e7e76d9548fc","Type":"ContainerStarted","Data":"3d89c0c6fdb787cbd092aac13876e43dc86babfda2df94ada483418ad8ef32a1"} Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.865745 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" event={"ID":"9532dcde-8f1b-44b7-8e10-e7e76d9548fc","Type":"ContainerStarted","Data":"345f0eb6aeb1a3daf36a10cd3b136724ea0c1709f66ec257a9fd4e3d5dcc7476"} Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.866576 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.868130 4933 patch_prober.go:28] interesting pod/controller-manager-7f5f69b8b5-ldwt8 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.64:8443/healthz\": dial tcp 10.217.0.64:8443: connect: connection refused" start-of-body= Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.868165 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" podUID="9532dcde-8f1b-44b7-8e10-e7e76d9548fc" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.64:8443/healthz\": dial tcp 10.217.0.64:8443: connect: connection refused" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.883239 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" podStartSLOduration=2.883224665 podStartE2EDuration="2.883224665s" podCreationTimestamp="2026-01-22 05:50:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:50:46.880947585 +0000 UTC m=+294.718072948" watchObservedRunningTime="2026-01-22 05:50:46.883224665 +0000 UTC m=+294.720350018" Jan 22 05:50:46 crc kubenswrapper[4933]: I0122 05:50:46.899851 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" podStartSLOduration=2.899836997 podStartE2EDuration="2.899836997s" podCreationTimestamp="2026-01-22 05:50:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:50:46.897814723 +0000 UTC m=+294.734940076" watchObservedRunningTime="2026-01-22 05:50:46.899836997 +0000 UTC m=+294.736962350" Jan 22 05:50:47 crc kubenswrapper[4933]: I0122 05:50:47.094663 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" Jan 22 05:50:47 crc kubenswrapper[4933]: I0122 05:50:47.874821 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:50:49 crc kubenswrapper[4933]: I0122 05:50:49.366528 4933 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 22 05:50:49 crc kubenswrapper[4933]: I0122 05:50:49.367070 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://316e39fa653004d010cfb005d9eb456c2d844660f5a292f6bba64a8d5e75b4b6" gracePeriod=5 Jan 22 05:50:50 crc kubenswrapper[4933]: I0122 05:50:50.491541 4933 scope.go:117] "RemoveContainer" containerID="687a1118f6ad54eba25b2e3a38454c6e89bbdedd3af69fa624361085be5e8c8a" Jan 22 05:50:50 crc kubenswrapper[4933]: I0122 05:50:50.888983 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-network-diagnostics_network-check-source-55646444c4-trplf_9d751cbb-f2e2-430d-9754-c882a5e924a5/check-endpoints/1.log" Jan 22 05:50:50 crc kubenswrapper[4933]: I0122 05:50:50.889350 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"299f34f819c082c5155d8bcae16e1a3bd9f840d52439f23143c93271f7420546"} Jan 22 05:50:52 crc kubenswrapper[4933]: I0122 05:50:52.335470 4933 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.024198 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jcpdb"] Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.025847 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-jcpdb" podUID="4c343d48-14c7-4862-ab0a-7851d4e0e72a" containerName="registry-server" containerID="cri-o://21079466bf7b8a1df4fe564f4f8e3cb054f0591b1723e899a0eddcee7e88c730" gracePeriod=30 Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.033269 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qxtfm"] Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.033788 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qxtfm" podUID="ec6f4762-c94a-4c73-a84f-469729ae7bae" containerName="registry-server" containerID="cri-o://af68a8ac02eef998264dc15be2dc378a128bbf6fb1cbb9ebe692f30ccb8f7bb1" gracePeriod=30 Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.059524 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vb2np"] Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.059834 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" podUID="245c05e3-0c9d-4b20-8bef-b16bb0b492c1" containerName="marketplace-operator" containerID="cri-o://1967073f5c3232ad17533da893effca10e97bdb0639fd872b7ba9e0bc687b32b" gracePeriod=30 Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.070406 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6cws8"] Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.070726 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6cws8" podUID="0992ece5-d7dd-40c1-adc4-12711a7b3b69" containerName="registry-server" containerID="cri-o://4851553e5c57ce9097722f4c14d6ea50a54af0d57ca603b5f5b44985a51fa584" gracePeriod=30 Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.074175 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xmhw6"] Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.074473 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xmhw6" podUID="480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d" containerName="registry-server" containerID="cri-o://7e176891d2476c96df95c5ff737fa32f2b5e3eb921918aad3e10b42fedaf5029" gracePeriod=30 Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.082851 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9pxct"] Jan 22 05:50:54 crc kubenswrapper[4933]: E0122 05:50:54.083163 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.083183 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.083292 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.083735 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-9pxct" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.093092 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9pxct"] Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.187598 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/48949ed4-e60d-474a-9b57-1cf96d9428d6-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9pxct\" (UID: \"48949ed4-e60d-474a-9b57-1cf96d9428d6\") " pod="openshift-marketplace/marketplace-operator-79b997595-9pxct" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.187950 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxqlm\" (UniqueName: \"kubernetes.io/projected/48949ed4-e60d-474a-9b57-1cf96d9428d6-kube-api-access-pxqlm\") pod \"marketplace-operator-79b997595-9pxct\" (UID: \"48949ed4-e60d-474a-9b57-1cf96d9428d6\") " pod="openshift-marketplace/marketplace-operator-79b997595-9pxct" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.187979 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/48949ed4-e60d-474a-9b57-1cf96d9428d6-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9pxct\" (UID: \"48949ed4-e60d-474a-9b57-1cf96d9428d6\") " pod="openshift-marketplace/marketplace-operator-79b997595-9pxct" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.289157 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/48949ed4-e60d-474a-9b57-1cf96d9428d6-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9pxct\" (UID: \"48949ed4-e60d-474a-9b57-1cf96d9428d6\") " pod="openshift-marketplace/marketplace-operator-79b997595-9pxct" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.289204 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxqlm\" (UniqueName: \"kubernetes.io/projected/48949ed4-e60d-474a-9b57-1cf96d9428d6-kube-api-access-pxqlm\") pod \"marketplace-operator-79b997595-9pxct\" (UID: \"48949ed4-e60d-474a-9b57-1cf96d9428d6\") " pod="openshift-marketplace/marketplace-operator-79b997595-9pxct" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.289231 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/48949ed4-e60d-474a-9b57-1cf96d9428d6-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9pxct\" (UID: \"48949ed4-e60d-474a-9b57-1cf96d9428d6\") " pod="openshift-marketplace/marketplace-operator-79b997595-9pxct" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.290287 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/48949ed4-e60d-474a-9b57-1cf96d9428d6-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9pxct\" (UID: \"48949ed4-e60d-474a-9b57-1cf96d9428d6\") " pod="openshift-marketplace/marketplace-operator-79b997595-9pxct" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.295061 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/48949ed4-e60d-474a-9b57-1cf96d9428d6-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9pxct\" (UID: \"48949ed4-e60d-474a-9b57-1cf96d9428d6\") " pod="openshift-marketplace/marketplace-operator-79b997595-9pxct" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.304715 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxqlm\" (UniqueName: \"kubernetes.io/projected/48949ed4-e60d-474a-9b57-1cf96d9428d6-kube-api-access-pxqlm\") pod \"marketplace-operator-79b997595-9pxct\" (UID: \"48949ed4-e60d-474a-9b57-1cf96d9428d6\") " pod="openshift-marketplace/marketplace-operator-79b997595-9pxct" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.420063 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-9pxct" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.875931 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9pxct"] Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.907297 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.907353 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.916215 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.916286 4933 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="316e39fa653004d010cfb005d9eb456c2d844660f5a292f6bba64a8d5e75b4b6" exitCode=137 Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.916387 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.916745 4933 scope.go:117] "RemoveContainer" containerID="316e39fa653004d010cfb005d9eb456c2d844660f5a292f6bba64a8d5e75b4b6" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.919316 4933 generic.go:334] "Generic (PLEG): container finished" podID="0992ece5-d7dd-40c1-adc4-12711a7b3b69" containerID="4851553e5c57ce9097722f4c14d6ea50a54af0d57ca603b5f5b44985a51fa584" exitCode=0 Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.919384 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6cws8" event={"ID":"0992ece5-d7dd-40c1-adc4-12711a7b3b69","Type":"ContainerDied","Data":"4851553e5c57ce9097722f4c14d6ea50a54af0d57ca603b5f5b44985a51fa584"} Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.920305 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9pxct" event={"ID":"48949ed4-e60d-474a-9b57-1cf96d9428d6","Type":"ContainerStarted","Data":"0019bce54917f53583369c509d7a83cc637dece0be7ae035b32d2a3bdfbb115e"} Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.921893 4933 generic.go:334] "Generic (PLEG): container finished" podID="ec6f4762-c94a-4c73-a84f-469729ae7bae" containerID="af68a8ac02eef998264dc15be2dc378a128bbf6fb1cbb9ebe692f30ccb8f7bb1" exitCode=0 Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.921932 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxtfm" event={"ID":"ec6f4762-c94a-4c73-a84f-469729ae7bae","Type":"ContainerDied","Data":"af68a8ac02eef998264dc15be2dc378a128bbf6fb1cbb9ebe692f30ccb8f7bb1"} Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.923296 4933 generic.go:334] "Generic (PLEG): container finished" podID="4c343d48-14c7-4862-ab0a-7851d4e0e72a" containerID="21079466bf7b8a1df4fe564f4f8e3cb054f0591b1723e899a0eddcee7e88c730" exitCode=0 Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.923335 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jcpdb" event={"ID":"4c343d48-14c7-4862-ab0a-7851d4e0e72a","Type":"ContainerDied","Data":"21079466bf7b8a1df4fe564f4f8e3cb054f0591b1723e899a0eddcee7e88c730"} Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.926246 4933 generic.go:334] "Generic (PLEG): container finished" podID="480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d" containerID="7e176891d2476c96df95c5ff737fa32f2b5e3eb921918aad3e10b42fedaf5029" exitCode=0 Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.926335 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xmhw6" event={"ID":"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d","Type":"ContainerDied","Data":"7e176891d2476c96df95c5ff737fa32f2b5e3eb921918aad3e10b42fedaf5029"} Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.932290 4933 generic.go:334] "Generic (PLEG): container finished" podID="245c05e3-0c9d-4b20-8bef-b16bb0b492c1" containerID="1967073f5c3232ad17533da893effca10e97bdb0639fd872b7ba9e0bc687b32b" exitCode=0 Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.932333 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" event={"ID":"245c05e3-0c9d-4b20-8bef-b16bb0b492c1","Type":"ContainerDied","Data":"1967073f5c3232ad17533da893effca10e97bdb0639fd872b7ba9e0bc687b32b"} Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.935506 4933 scope.go:117] "RemoveContainer" containerID="316e39fa653004d010cfb005d9eb456c2d844660f5a292f6bba64a8d5e75b4b6" Jan 22 05:50:54 crc kubenswrapper[4933]: E0122 05:50:54.935963 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"316e39fa653004d010cfb005d9eb456c2d844660f5a292f6bba64a8d5e75b4b6\": container with ID starting with 316e39fa653004d010cfb005d9eb456c2d844660f5a292f6bba64a8d5e75b4b6 not found: ID does not exist" containerID="316e39fa653004d010cfb005d9eb456c2d844660f5a292f6bba64a8d5e75b4b6" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.936049 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"316e39fa653004d010cfb005d9eb456c2d844660f5a292f6bba64a8d5e75b4b6"} err="failed to get container status \"316e39fa653004d010cfb005d9eb456c2d844660f5a292f6bba64a8d5e75b4b6\": rpc error: code = NotFound desc = could not find container \"316e39fa653004d010cfb005d9eb456c2d844660f5a292f6bba64a8d5e75b4b6\": container with ID starting with 316e39fa653004d010cfb005d9eb456c2d844660f5a292f6bba64a8d5e75b4b6 not found: ID does not exist" Jan 22 05:50:54 crc kubenswrapper[4933]: I0122 05:50:54.999828 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:54.999914 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:54.999970 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:54.999998 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.000053 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.000381 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.000431 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.001854 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.002275 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.021985 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.036532 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jcpdb" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.101470 4933 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.101522 4933 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.101538 4933 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.101548 4933 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.101558 4933 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.207535 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c343d48-14c7-4862-ab0a-7851d4e0e72a-utilities\") pod \"4c343d48-14c7-4862-ab0a-7851d4e0e72a\" (UID: \"4c343d48-14c7-4862-ab0a-7851d4e0e72a\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.207619 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c343d48-14c7-4862-ab0a-7851d4e0e72a-catalog-content\") pod \"4c343d48-14c7-4862-ab0a-7851d4e0e72a\" (UID: \"4c343d48-14c7-4862-ab0a-7851d4e0e72a\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.207662 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sgjnd\" (UniqueName: \"kubernetes.io/projected/4c343d48-14c7-4862-ab0a-7851d4e0e72a-kube-api-access-sgjnd\") pod \"4c343d48-14c7-4862-ab0a-7851d4e0e72a\" (UID: \"4c343d48-14c7-4862-ab0a-7851d4e0e72a\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.208911 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c343d48-14c7-4862-ab0a-7851d4e0e72a-utilities" (OuterVolumeSpecName: "utilities") pod "4c343d48-14c7-4862-ab0a-7851d4e0e72a" (UID: "4c343d48-14c7-4862-ab0a-7851d4e0e72a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.211036 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c343d48-14c7-4862-ab0a-7851d4e0e72a-kube-api-access-sgjnd" (OuterVolumeSpecName: "kube-api-access-sgjnd") pod "4c343d48-14c7-4862-ab0a-7851d4e0e72a" (UID: "4c343d48-14c7-4862-ab0a-7851d4e0e72a"). InnerVolumeSpecName "kube-api-access-sgjnd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.265269 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c343d48-14c7-4862-ab0a-7851d4e0e72a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4c343d48-14c7-4862-ab0a-7851d4e0e72a" (UID: "4c343d48-14c7-4862-ab0a-7851d4e0e72a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.287282 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xmhw6" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.302614 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.309703 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c343d48-14c7-4862-ab0a-7851d4e0e72a-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.309741 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c343d48-14c7-4862-ab0a-7851d4e0e72a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.309755 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sgjnd\" (UniqueName: \"kubernetes.io/projected/4c343d48-14c7-4862-ab0a-7851d4e0e72a-kube-api-access-sgjnd\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.316506 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6cws8" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.321119 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qxtfm" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.410206 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-chxqs\" (UniqueName: \"kubernetes.io/projected/245c05e3-0c9d-4b20-8bef-b16bb0b492c1-kube-api-access-chxqs\") pod \"245c05e3-0c9d-4b20-8bef-b16bb0b492c1\" (UID: \"245c05e3-0c9d-4b20-8bef-b16bb0b492c1\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.410259 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/245c05e3-0c9d-4b20-8bef-b16bb0b492c1-marketplace-trusted-ca\") pod \"245c05e3-0c9d-4b20-8bef-b16bb0b492c1\" (UID: \"245c05e3-0c9d-4b20-8bef-b16bb0b492c1\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.410301 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6fb4\" (UniqueName: \"kubernetes.io/projected/0992ece5-d7dd-40c1-adc4-12711a7b3b69-kube-api-access-v6fb4\") pod \"0992ece5-d7dd-40c1-adc4-12711a7b3b69\" (UID: \"0992ece5-d7dd-40c1-adc4-12711a7b3b69\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.410333 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d-utilities\") pod \"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d\" (UID: \"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.410358 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7r9z\" (UniqueName: \"kubernetes.io/projected/480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d-kube-api-access-z7r9z\") pod \"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d\" (UID: \"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.410374 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0992ece5-d7dd-40c1-adc4-12711a7b3b69-catalog-content\") pod \"0992ece5-d7dd-40c1-adc4-12711a7b3b69\" (UID: \"0992ece5-d7dd-40c1-adc4-12711a7b3b69\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.410402 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/245c05e3-0c9d-4b20-8bef-b16bb0b492c1-marketplace-operator-metrics\") pod \"245c05e3-0c9d-4b20-8bef-b16bb0b492c1\" (UID: \"245c05e3-0c9d-4b20-8bef-b16bb0b492c1\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.410420 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0992ece5-d7dd-40c1-adc4-12711a7b3b69-utilities\") pod \"0992ece5-d7dd-40c1-adc4-12711a7b3b69\" (UID: \"0992ece5-d7dd-40c1-adc4-12711a7b3b69\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.410451 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d-catalog-content\") pod \"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d\" (UID: \"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.411443 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/245c05e3-0c9d-4b20-8bef-b16bb0b492c1-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "245c05e3-0c9d-4b20-8bef-b16bb0b492c1" (UID: "245c05e3-0c9d-4b20-8bef-b16bb0b492c1"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.411654 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d-utilities" (OuterVolumeSpecName: "utilities") pod "480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d" (UID: "480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.412100 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0992ece5-d7dd-40c1-adc4-12711a7b3b69-utilities" (OuterVolumeSpecName: "utilities") pod "0992ece5-d7dd-40c1-adc4-12711a7b3b69" (UID: "0992ece5-d7dd-40c1-adc4-12711a7b3b69"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.415699 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d-kube-api-access-z7r9z" (OuterVolumeSpecName: "kube-api-access-z7r9z") pod "480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d" (UID: "480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d"). InnerVolumeSpecName "kube-api-access-z7r9z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.415820 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0992ece5-d7dd-40c1-adc4-12711a7b3b69-kube-api-access-v6fb4" (OuterVolumeSpecName: "kube-api-access-v6fb4") pod "0992ece5-d7dd-40c1-adc4-12711a7b3b69" (UID: "0992ece5-d7dd-40c1-adc4-12711a7b3b69"). InnerVolumeSpecName "kube-api-access-v6fb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.416455 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/245c05e3-0c9d-4b20-8bef-b16bb0b492c1-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "245c05e3-0c9d-4b20-8bef-b16bb0b492c1" (UID: "245c05e3-0c9d-4b20-8bef-b16bb0b492c1"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.417723 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/245c05e3-0c9d-4b20-8bef-b16bb0b492c1-kube-api-access-chxqs" (OuterVolumeSpecName: "kube-api-access-chxqs") pod "245c05e3-0c9d-4b20-8bef-b16bb0b492c1" (UID: "245c05e3-0c9d-4b20-8bef-b16bb0b492c1"). InnerVolumeSpecName "kube-api-access-chxqs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.462111 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0992ece5-d7dd-40c1-adc4-12711a7b3b69-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0992ece5-d7dd-40c1-adc4-12711a7b3b69" (UID: "0992ece5-d7dd-40c1-adc4-12711a7b3b69"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.510969 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fpsgr\" (UniqueName: \"kubernetes.io/projected/ec6f4762-c94a-4c73-a84f-469729ae7bae-kube-api-access-fpsgr\") pod \"ec6f4762-c94a-4c73-a84f-469729ae7bae\" (UID: \"ec6f4762-c94a-4c73-a84f-469729ae7bae\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.511297 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec6f4762-c94a-4c73-a84f-469729ae7bae-utilities\") pod \"ec6f4762-c94a-4c73-a84f-469729ae7bae\" (UID: \"ec6f4762-c94a-4c73-a84f-469729ae7bae\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.511381 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec6f4762-c94a-4c73-a84f-469729ae7bae-catalog-content\") pod \"ec6f4762-c94a-4c73-a84f-469729ae7bae\" (UID: \"ec6f4762-c94a-4c73-a84f-469729ae7bae\") " Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.511599 4933 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/245c05e3-0c9d-4b20-8bef-b16bb0b492c1-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.511622 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0992ece5-d7dd-40c1-adc4-12711a7b3b69-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.511631 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-chxqs\" (UniqueName: \"kubernetes.io/projected/245c05e3-0c9d-4b20-8bef-b16bb0b492c1-kube-api-access-chxqs\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.511640 4933 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/245c05e3-0c9d-4b20-8bef-b16bb0b492c1-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.511648 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6fb4\" (UniqueName: \"kubernetes.io/projected/0992ece5-d7dd-40c1-adc4-12711a7b3b69-kube-api-access-v6fb4\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.511656 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.511664 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7r9z\" (UniqueName: \"kubernetes.io/projected/480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d-kube-api-access-z7r9z\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.511673 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0992ece5-d7dd-40c1-adc4-12711a7b3b69-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.512301 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec6f4762-c94a-4c73-a84f-469729ae7bae-utilities" (OuterVolumeSpecName: "utilities") pod "ec6f4762-c94a-4c73-a84f-469729ae7bae" (UID: "ec6f4762-c94a-4c73-a84f-469729ae7bae"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.514610 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec6f4762-c94a-4c73-a84f-469729ae7bae-kube-api-access-fpsgr" (OuterVolumeSpecName: "kube-api-access-fpsgr") pod "ec6f4762-c94a-4c73-a84f-469729ae7bae" (UID: "ec6f4762-c94a-4c73-a84f-469729ae7bae"). InnerVolumeSpecName "kube-api-access-fpsgr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.552765 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d" (UID: "480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.568276 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ec6f4762-c94a-4c73-a84f-469729ae7bae-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ec6f4762-c94a-4c73-a84f-469729ae7bae" (UID: "ec6f4762-c94a-4c73-a84f-469729ae7bae"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.612301 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fpsgr\" (UniqueName: \"kubernetes.io/projected/ec6f4762-c94a-4c73-a84f-469729ae7bae-kube-api-access-fpsgr\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.612345 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ec6f4762-c94a-4c73-a84f-469729ae7bae-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.612361 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.612371 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ec6f4762-c94a-4c73-a84f-469729ae7bae-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.938560 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.938508 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-vb2np" event={"ID":"245c05e3-0c9d-4b20-8bef-b16bb0b492c1","Type":"ContainerDied","Data":"67252655a871b4a981865fcc0234ec1317250be407ebcd390e47bb87cf977ee5"} Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.938734 4933 scope.go:117] "RemoveContainer" containerID="1967073f5c3232ad17533da893effca10e97bdb0639fd872b7ba9e0bc687b32b" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.943036 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6cws8" event={"ID":"0992ece5-d7dd-40c1-adc4-12711a7b3b69","Type":"ContainerDied","Data":"e8bc1bb5924f2fe5aaeaa451bfe67e1c84ecdd22205846ad32f15fb0926a8035"} Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.943211 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6cws8" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.944591 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9pxct" event={"ID":"48949ed4-e60d-474a-9b57-1cf96d9428d6","Type":"ContainerStarted","Data":"393c95e4239fec3d3435b055c8cea86fd9231eb1fb00783525f7c9af27fc7a72"} Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.944800 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-9pxct" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.947498 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qxtfm" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.947495 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxtfm" event={"ID":"ec6f4762-c94a-4c73-a84f-469729ae7bae","Type":"ContainerDied","Data":"9030a143aad277e7ed43f0a5bcaf04a3ba5f8b4c51ce39f7fc34face79482428"} Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.949213 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jcpdb" event={"ID":"4c343d48-14c7-4862-ab0a-7851d4e0e72a","Type":"ContainerDied","Data":"e325e8f31faf9929e9c3eec0c5fa251af8ea7aa3dfe6b0cfea13a9868daa89bb"} Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.949282 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jcpdb" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.950900 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-9pxct" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.956229 4933 scope.go:117] "RemoveContainer" containerID="4851553e5c57ce9097722f4c14d6ea50a54af0d57ca603b5f5b44985a51fa584" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.956761 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xmhw6" event={"ID":"480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d","Type":"ContainerDied","Data":"c28d8ebcd6eefb076f1eb828e3508c40964973525c8f928d92faebc43e21ef9b"} Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.956813 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xmhw6" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.962730 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-9pxct" podStartSLOduration=1.962712397 podStartE2EDuration="1.962712397s" podCreationTimestamp="2026-01-22 05:50:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:50:55.959817074 +0000 UTC m=+303.796942427" watchObservedRunningTime="2026-01-22 05:50:55.962712397 +0000 UTC m=+303.799837750" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.977544 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vb2np"] Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.977692 4933 scope.go:117] "RemoveContainer" containerID="567daa808d7de70d9ba86adefc69c207e7ad743f7887e8965ef3eda716462bf3" Jan 22 05:50:55 crc kubenswrapper[4933]: I0122 05:50:55.981408 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vb2np"] Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.021238 4933 scope.go:117] "RemoveContainer" containerID="947cfa8c81af705b5addf0892ac881baf528d00ac7c27a078325dea971669f52" Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.033018 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6cws8"] Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.044776 4933 scope.go:117] "RemoveContainer" containerID="af68a8ac02eef998264dc15be2dc378a128bbf6fb1cbb9ebe692f30ccb8f7bb1" Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.051811 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6cws8"] Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.054795 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qxtfm"] Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.057358 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qxtfm"] Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.060125 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jcpdb"] Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.060570 4933 scope.go:117] "RemoveContainer" containerID="b8c7dcef9a346edddb43b8a85eae7c2faadaa6d5aea7bdcdffb0e5c145835f3f" Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.064321 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-jcpdb"] Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.065543 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xmhw6"] Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.067988 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xmhw6"] Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.076963 4933 scope.go:117] "RemoveContainer" containerID="7bde1b9339824c20e786404fb59e84045e564639a843fcd52557dd0f2b1e4528" Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.089276 4933 scope.go:117] "RemoveContainer" containerID="21079466bf7b8a1df4fe564f4f8e3cb054f0591b1723e899a0eddcee7e88c730" Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.099958 4933 scope.go:117] "RemoveContainer" containerID="a4a6a7dbe6e4a3fd1f1a9286a2ffe87c9e796865e88e22307b69863ef9e258ef" Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.117157 4933 scope.go:117] "RemoveContainer" containerID="cd289e74b2354873776c2812136e49480804104fbd74ed32096474a7c4834622" Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.130501 4933 scope.go:117] "RemoveContainer" containerID="7e176891d2476c96df95c5ff737fa32f2b5e3eb921918aad3e10b42fedaf5029" Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.170142 4933 scope.go:117] "RemoveContainer" containerID="eda1389c004388f74d26f6548a9539440547b1a26f39d0f4dc8f2c7356c2d42a" Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.185221 4933 scope.go:117] "RemoveContainer" containerID="85e636ac8af1c85c8d8df702289b2ed04fbbaf9c26247491f57c57a32cfa66f8" Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.506200 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0992ece5-d7dd-40c1-adc4-12711a7b3b69" path="/var/lib/kubelet/pods/0992ece5-d7dd-40c1-adc4-12711a7b3b69/volumes" Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.507679 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="245c05e3-0c9d-4b20-8bef-b16bb0b492c1" path="/var/lib/kubelet/pods/245c05e3-0c9d-4b20-8bef-b16bb0b492c1/volumes" Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.508736 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d" path="/var/lib/kubelet/pods/480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d/volumes" Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.511220 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c343d48-14c7-4862-ab0a-7851d4e0e72a" path="/var/lib/kubelet/pods/4c343d48-14c7-4862-ab0a-7851d4e0e72a/volumes" Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.512840 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec6f4762-c94a-4c73-a84f-469729ae7bae" path="/var/lib/kubelet/pods/ec6f4762-c94a-4c73-a84f-469729ae7bae/volumes" Jan 22 05:50:56 crc kubenswrapper[4933]: I0122 05:50:56.514888 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 22 05:51:04 crc kubenswrapper[4933]: I0122 05:51:04.538382 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8"] Jan 22 05:51:04 crc kubenswrapper[4933]: I0122 05:51:04.539371 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" podUID="9532dcde-8f1b-44b7-8e10-e7e76d9548fc" containerName="controller-manager" containerID="cri-o://3d89c0c6fdb787cbd092aac13876e43dc86babfda2df94ada483418ad8ef32a1" gracePeriod=30 Jan 22 05:51:04 crc kubenswrapper[4933]: I0122 05:51:04.564711 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5"] Jan 22 05:51:04 crc kubenswrapper[4933]: I0122 05:51:04.564952 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" podUID="6a21b4bc-ac49-4926-9fd8-82597f52b529" containerName="route-controller-manager" containerID="cri-o://7376e01f4d920fb2398c39cb3f4b003c733a2368ef4cfb146d74b822af081bc8" gracePeriod=30 Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.003158 4933 generic.go:334] "Generic (PLEG): container finished" podID="6a21b4bc-ac49-4926-9fd8-82597f52b529" containerID="7376e01f4d920fb2398c39cb3f4b003c733a2368ef4cfb146d74b822af081bc8" exitCode=0 Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.003241 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" event={"ID":"6a21b4bc-ac49-4926-9fd8-82597f52b529","Type":"ContainerDied","Data":"7376e01f4d920fb2398c39cb3f4b003c733a2368ef4cfb146d74b822af081bc8"} Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.003516 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" event={"ID":"6a21b4bc-ac49-4926-9fd8-82597f52b529","Type":"ContainerDied","Data":"7f2c311b0f3e9649b4da53475c539979dc2fe34a2b69988ada2100d105f38091"} Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.003542 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f2c311b0f3e9649b4da53475c539979dc2fe34a2b69988ada2100d105f38091" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.004875 4933 generic.go:334] "Generic (PLEG): container finished" podID="9532dcde-8f1b-44b7-8e10-e7e76d9548fc" containerID="3d89c0c6fdb787cbd092aac13876e43dc86babfda2df94ada483418ad8ef32a1" exitCode=0 Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.004910 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" event={"ID":"9532dcde-8f1b-44b7-8e10-e7e76d9548fc","Type":"ContainerDied","Data":"3d89c0c6fdb787cbd092aac13876e43dc86babfda2df94ada483418ad8ef32a1"} Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.004943 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" event={"ID":"9532dcde-8f1b-44b7-8e10-e7e76d9548fc","Type":"ContainerDied","Data":"345f0eb6aeb1a3daf36a10cd3b136724ea0c1709f66ec257a9fd4e3d5dcc7476"} Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.004957 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="345f0eb6aeb1a3daf36a10cd3b136724ea0c1709f66ec257a9fd4e3d5dcc7476" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.017574 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.023177 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.140417 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-client-ca\") pod \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.140489 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-config\") pod \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.140566 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a21b4bc-ac49-4926-9fd8-82597f52b529-config\") pod \"6a21b4bc-ac49-4926-9fd8-82597f52b529\" (UID: \"6a21b4bc-ac49-4926-9fd8-82597f52b529\") " Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.140589 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a21b4bc-ac49-4926-9fd8-82597f52b529-client-ca\") pod \"6a21b4bc-ac49-4926-9fd8-82597f52b529\" (UID: \"6a21b4bc-ac49-4926-9fd8-82597f52b529\") " Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.140617 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7z97\" (UniqueName: \"kubernetes.io/projected/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-kube-api-access-x7z97\") pod \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.140645 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-serving-cert\") pod \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.140713 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sndg2\" (UniqueName: \"kubernetes.io/projected/6a21b4bc-ac49-4926-9fd8-82597f52b529-kube-api-access-sndg2\") pod \"6a21b4bc-ac49-4926-9fd8-82597f52b529\" (UID: \"6a21b4bc-ac49-4926-9fd8-82597f52b529\") " Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.140808 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a21b4bc-ac49-4926-9fd8-82597f52b529-serving-cert\") pod \"6a21b4bc-ac49-4926-9fd8-82597f52b529\" (UID: \"6a21b4bc-ac49-4926-9fd8-82597f52b529\") " Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.140838 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-proxy-ca-bundles\") pod \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\" (UID: \"9532dcde-8f1b-44b7-8e10-e7e76d9548fc\") " Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.141253 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-client-ca" (OuterVolumeSpecName: "client-ca") pod "9532dcde-8f1b-44b7-8e10-e7e76d9548fc" (UID: "9532dcde-8f1b-44b7-8e10-e7e76d9548fc"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.141406 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-config" (OuterVolumeSpecName: "config") pod "9532dcde-8f1b-44b7-8e10-e7e76d9548fc" (UID: "9532dcde-8f1b-44b7-8e10-e7e76d9548fc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.141455 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a21b4bc-ac49-4926-9fd8-82597f52b529-client-ca" (OuterVolumeSpecName: "client-ca") pod "6a21b4bc-ac49-4926-9fd8-82597f52b529" (UID: "6a21b4bc-ac49-4926-9fd8-82597f52b529"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.141511 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a21b4bc-ac49-4926-9fd8-82597f52b529-config" (OuterVolumeSpecName: "config") pod "6a21b4bc-ac49-4926-9fd8-82597f52b529" (UID: "6a21b4bc-ac49-4926-9fd8-82597f52b529"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.141607 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "9532dcde-8f1b-44b7-8e10-e7e76d9548fc" (UID: "9532dcde-8f1b-44b7-8e10-e7e76d9548fc"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.145698 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a21b4bc-ac49-4926-9fd8-82597f52b529-kube-api-access-sndg2" (OuterVolumeSpecName: "kube-api-access-sndg2") pod "6a21b4bc-ac49-4926-9fd8-82597f52b529" (UID: "6a21b4bc-ac49-4926-9fd8-82597f52b529"). InnerVolumeSpecName "kube-api-access-sndg2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.146814 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-kube-api-access-x7z97" (OuterVolumeSpecName: "kube-api-access-x7z97") pod "9532dcde-8f1b-44b7-8e10-e7e76d9548fc" (UID: "9532dcde-8f1b-44b7-8e10-e7e76d9548fc"). InnerVolumeSpecName "kube-api-access-x7z97". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.146878 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9532dcde-8f1b-44b7-8e10-e7e76d9548fc" (UID: "9532dcde-8f1b-44b7-8e10-e7e76d9548fc"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.150570 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a21b4bc-ac49-4926-9fd8-82597f52b529-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6a21b4bc-ac49-4926-9fd8-82597f52b529" (UID: "6a21b4bc-ac49-4926-9fd8-82597f52b529"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.241530 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sndg2\" (UniqueName: \"kubernetes.io/projected/6a21b4bc-ac49-4926-9fd8-82597f52b529-kube-api-access-sndg2\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.241563 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a21b4bc-ac49-4926-9fd8-82597f52b529-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.241572 4933 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.241581 4933 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.241588 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.241596 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a21b4bc-ac49-4926-9fd8-82597f52b529-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.241604 4933 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6a21b4bc-ac49-4926-9fd8-82597f52b529-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.241612 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7z97\" (UniqueName: \"kubernetes.io/projected/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-kube-api-access-x7z97\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.241620 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9532dcde-8f1b-44b7-8e10-e7e76d9548fc-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:05 crc kubenswrapper[4933]: I0122 05:51:05.495104 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.009612 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.009636 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.039941 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5"] Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.043102 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7695d8c949-8kjv5"] Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.051473 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8"] Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.054068 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-7f5f69b8b5-ldwt8"] Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.073899 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn"] Jan 22 05:51:06 crc kubenswrapper[4933]: E0122 05:51:06.074188 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec6f4762-c94a-4c73-a84f-469729ae7bae" containerName="registry-server" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074210 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec6f4762-c94a-4c73-a84f-469729ae7bae" containerName="registry-server" Jan 22 05:51:06 crc kubenswrapper[4933]: E0122 05:51:06.074225 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec6f4762-c94a-4c73-a84f-469729ae7bae" containerName="extract-utilities" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074234 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec6f4762-c94a-4c73-a84f-469729ae7bae" containerName="extract-utilities" Jan 22 05:51:06 crc kubenswrapper[4933]: E0122 05:51:06.074248 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0992ece5-d7dd-40c1-adc4-12711a7b3b69" containerName="extract-content" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074257 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="0992ece5-d7dd-40c1-adc4-12711a7b3b69" containerName="extract-content" Jan 22 05:51:06 crc kubenswrapper[4933]: E0122 05:51:06.074271 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d" containerName="extract-utilities" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074278 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d" containerName="extract-utilities" Jan 22 05:51:06 crc kubenswrapper[4933]: E0122 05:51:06.074287 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a21b4bc-ac49-4926-9fd8-82597f52b529" containerName="route-controller-manager" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074295 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a21b4bc-ac49-4926-9fd8-82597f52b529" containerName="route-controller-manager" Jan 22 05:51:06 crc kubenswrapper[4933]: E0122 05:51:06.074308 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec6f4762-c94a-4c73-a84f-469729ae7bae" containerName="extract-content" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074315 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec6f4762-c94a-4c73-a84f-469729ae7bae" containerName="extract-content" Jan 22 05:51:06 crc kubenswrapper[4933]: E0122 05:51:06.074325 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0992ece5-d7dd-40c1-adc4-12711a7b3b69" containerName="registry-server" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074332 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="0992ece5-d7dd-40c1-adc4-12711a7b3b69" containerName="registry-server" Jan 22 05:51:06 crc kubenswrapper[4933]: E0122 05:51:06.074343 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d" containerName="registry-server" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074351 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d" containerName="registry-server" Jan 22 05:51:06 crc kubenswrapper[4933]: E0122 05:51:06.074359 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c343d48-14c7-4862-ab0a-7851d4e0e72a" containerName="extract-utilities" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074367 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c343d48-14c7-4862-ab0a-7851d4e0e72a" containerName="extract-utilities" Jan 22 05:51:06 crc kubenswrapper[4933]: E0122 05:51:06.074379 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c343d48-14c7-4862-ab0a-7851d4e0e72a" containerName="extract-content" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074389 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c343d48-14c7-4862-ab0a-7851d4e0e72a" containerName="extract-content" Jan 22 05:51:06 crc kubenswrapper[4933]: E0122 05:51:06.074399 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="245c05e3-0c9d-4b20-8bef-b16bb0b492c1" containerName="marketplace-operator" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074408 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="245c05e3-0c9d-4b20-8bef-b16bb0b492c1" containerName="marketplace-operator" Jan 22 05:51:06 crc kubenswrapper[4933]: E0122 05:51:06.074420 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0992ece5-d7dd-40c1-adc4-12711a7b3b69" containerName="extract-utilities" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074427 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="0992ece5-d7dd-40c1-adc4-12711a7b3b69" containerName="extract-utilities" Jan 22 05:51:06 crc kubenswrapper[4933]: E0122 05:51:06.074436 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9532dcde-8f1b-44b7-8e10-e7e76d9548fc" containerName="controller-manager" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074443 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9532dcde-8f1b-44b7-8e10-e7e76d9548fc" containerName="controller-manager" Jan 22 05:51:06 crc kubenswrapper[4933]: E0122 05:51:06.074455 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d" containerName="extract-content" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074463 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d" containerName="extract-content" Jan 22 05:51:06 crc kubenswrapper[4933]: E0122 05:51:06.074474 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c343d48-14c7-4862-ab0a-7851d4e0e72a" containerName="registry-server" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074481 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c343d48-14c7-4862-ab0a-7851d4e0e72a" containerName="registry-server" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074591 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="480c0b7c-fa2f-4a02-b29f-0fb1c05c2e2d" containerName="registry-server" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074606 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="245c05e3-0c9d-4b20-8bef-b16bb0b492c1" containerName="marketplace-operator" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074615 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="9532dcde-8f1b-44b7-8e10-e7e76d9548fc" containerName="controller-manager" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074626 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a21b4bc-ac49-4926-9fd8-82597f52b529" containerName="route-controller-manager" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074639 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec6f4762-c94a-4c73-a84f-469729ae7bae" containerName="registry-server" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074649 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="0992ece5-d7dd-40c1-adc4-12711a7b3b69" containerName="registry-server" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.074663 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c343d48-14c7-4862-ab0a-7851d4e0e72a" containerName="registry-server" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.075128 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.076543 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-58c6dfb578-vnnkh"] Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.077125 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.078232 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.078634 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.078955 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.079051 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.079237 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.079464 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.079519 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.079712 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.079911 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.079945 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.081706 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.089766 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.105193 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.110155 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58c6dfb578-vnnkh"] Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.115440 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn"] Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.253036 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ed77d791-9cd1-478c-a0bc-fad66f13c466-client-ca\") pod \"controller-manager-58c6dfb578-vnnkh\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.253116 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-client-ca\") pod \"route-controller-manager-d49bbb4c4-qnqtn\" (UID: \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\") " pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.253154 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed77d791-9cd1-478c-a0bc-fad66f13c466-config\") pod \"controller-manager-58c6dfb578-vnnkh\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.253219 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ed77d791-9cd1-478c-a0bc-fad66f13c466-proxy-ca-bundles\") pod \"controller-manager-58c6dfb578-vnnkh\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.253244 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgngv\" (UniqueName: \"kubernetes.io/projected/ed77d791-9cd1-478c-a0bc-fad66f13c466-kube-api-access-xgngv\") pod \"controller-manager-58c6dfb578-vnnkh\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.253328 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-serving-cert\") pod \"route-controller-manager-d49bbb4c4-qnqtn\" (UID: \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\") " pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.253404 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v578b\" (UniqueName: \"kubernetes.io/projected/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-kube-api-access-v578b\") pod \"route-controller-manager-d49bbb4c4-qnqtn\" (UID: \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\") " pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.253451 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-config\") pod \"route-controller-manager-d49bbb4c4-qnqtn\" (UID: \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\") " pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.253555 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed77d791-9cd1-478c-a0bc-fad66f13c466-serving-cert\") pod \"controller-manager-58c6dfb578-vnnkh\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.354794 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed77d791-9cd1-478c-a0bc-fad66f13c466-serving-cert\") pod \"controller-manager-58c6dfb578-vnnkh\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.354859 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ed77d791-9cd1-478c-a0bc-fad66f13c466-client-ca\") pod \"controller-manager-58c6dfb578-vnnkh\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.354881 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed77d791-9cd1-478c-a0bc-fad66f13c466-config\") pod \"controller-manager-58c6dfb578-vnnkh\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.354898 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-client-ca\") pod \"route-controller-manager-d49bbb4c4-qnqtn\" (UID: \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\") " pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.354928 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ed77d791-9cd1-478c-a0bc-fad66f13c466-proxy-ca-bundles\") pod \"controller-manager-58c6dfb578-vnnkh\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.354948 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgngv\" (UniqueName: \"kubernetes.io/projected/ed77d791-9cd1-478c-a0bc-fad66f13c466-kube-api-access-xgngv\") pod \"controller-manager-58c6dfb578-vnnkh\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.354974 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-serving-cert\") pod \"route-controller-manager-d49bbb4c4-qnqtn\" (UID: \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\") " pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.354996 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v578b\" (UniqueName: \"kubernetes.io/projected/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-kube-api-access-v578b\") pod \"route-controller-manager-d49bbb4c4-qnqtn\" (UID: \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\") " pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.355015 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-config\") pod \"route-controller-manager-d49bbb4c4-qnqtn\" (UID: \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\") " pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.356149 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-config\") pod \"route-controller-manager-d49bbb4c4-qnqtn\" (UID: \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\") " pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.356496 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ed77d791-9cd1-478c-a0bc-fad66f13c466-proxy-ca-bundles\") pod \"controller-manager-58c6dfb578-vnnkh\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.357451 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ed77d791-9cd1-478c-a0bc-fad66f13c466-client-ca\") pod \"controller-manager-58c6dfb578-vnnkh\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.357895 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed77d791-9cd1-478c-a0bc-fad66f13c466-config\") pod \"controller-manager-58c6dfb578-vnnkh\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.359497 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-client-ca\") pod \"route-controller-manager-d49bbb4c4-qnqtn\" (UID: \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\") " pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.360416 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed77d791-9cd1-478c-a0bc-fad66f13c466-serving-cert\") pod \"controller-manager-58c6dfb578-vnnkh\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.371422 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-serving-cert\") pod \"route-controller-manager-d49bbb4c4-qnqtn\" (UID: \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\") " pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.372856 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgngv\" (UniqueName: \"kubernetes.io/projected/ed77d791-9cd1-478c-a0bc-fad66f13c466-kube-api-access-xgngv\") pod \"controller-manager-58c6dfb578-vnnkh\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.385341 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v578b\" (UniqueName: \"kubernetes.io/projected/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-kube-api-access-v578b\") pod \"route-controller-manager-d49bbb4c4-qnqtn\" (UID: \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\") " pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.449838 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.457989 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.502527 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a21b4bc-ac49-4926-9fd8-82597f52b529" path="/var/lib/kubelet/pods/6a21b4bc-ac49-4926-9fd8-82597f52b529/volumes" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.503663 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9532dcde-8f1b-44b7-8e10-e7e76d9548fc" path="/var/lib/kubelet/pods/9532dcde-8f1b-44b7-8e10-e7e76d9548fc/volumes" Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.656216 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn"] Jan 22 05:51:06 crc kubenswrapper[4933]: I0122 05:51:06.926933 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-58c6dfb578-vnnkh"] Jan 22 05:51:06 crc kubenswrapper[4933]: W0122 05:51:06.930980 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poded77d791_9cd1_478c_a0bc_fad66f13c466.slice/crio-c798cfc7c34862ab75ac1b2afa537289f7c14d14f69daa5caa69c6a136c1024c WatchSource:0}: Error finding container c798cfc7c34862ab75ac1b2afa537289f7c14d14f69daa5caa69c6a136c1024c: Status 404 returned error can't find the container with id c798cfc7c34862ab75ac1b2afa537289f7c14d14f69daa5caa69c6a136c1024c Jan 22 05:51:07 crc kubenswrapper[4933]: I0122 05:51:07.017953 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" event={"ID":"bd5aa933-89b8-4db3-9ff0-f6065d023e6f","Type":"ContainerStarted","Data":"41cef4319a6c53a5207b5a41e854b2d75c1424e4ec07ba9a5ead7b1680f80255"} Jan 22 05:51:07 crc kubenswrapper[4933]: I0122 05:51:07.018649 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" Jan 22 05:51:07 crc kubenswrapper[4933]: I0122 05:51:07.018664 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" event={"ID":"bd5aa933-89b8-4db3-9ff0-f6065d023e6f","Type":"ContainerStarted","Data":"d742ceda2d303b9d8bee262d462e3b25620ac05dde23094653db32630b31d96a"} Jan 22 05:51:07 crc kubenswrapper[4933]: I0122 05:51:07.019555 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" event={"ID":"ed77d791-9cd1-478c-a0bc-fad66f13c466","Type":"ContainerStarted","Data":"c798cfc7c34862ab75ac1b2afa537289f7c14d14f69daa5caa69c6a136c1024c"} Jan 22 05:51:07 crc kubenswrapper[4933]: I0122 05:51:07.206761 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" Jan 22 05:51:07 crc kubenswrapper[4933]: I0122 05:51:07.224546 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" podStartSLOduration=3.224527674 podStartE2EDuration="3.224527674s" podCreationTimestamp="2026-01-22 05:51:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:51:07.035212798 +0000 UTC m=+314.872338191" watchObservedRunningTime="2026-01-22 05:51:07.224527674 +0000 UTC m=+315.061653037" Jan 22 05:51:08 crc kubenswrapper[4933]: I0122 05:51:08.027062 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" event={"ID":"ed77d791-9cd1-478c-a0bc-fad66f13c466","Type":"ContainerStarted","Data":"7bea6d68bde9105dbc2e8966005f1239b1e0a7c2e9892887dd43b109273b5fb8"} Jan 22 05:51:08 crc kubenswrapper[4933]: I0122 05:51:08.045028 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" podStartSLOduration=4.045009876 podStartE2EDuration="4.045009876s" podCreationTimestamp="2026-01-22 05:51:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:51:08.044526774 +0000 UTC m=+315.881652127" watchObservedRunningTime="2026-01-22 05:51:08.045009876 +0000 UTC m=+315.882135229" Jan 22 05:51:09 crc kubenswrapper[4933]: I0122 05:51:09.030518 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:09 crc kubenswrapper[4933]: I0122 05:51:09.035015 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:44 crc kubenswrapper[4933]: I0122 05:51:44.538171 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-58c6dfb578-vnnkh"] Jan 22 05:51:44 crc kubenswrapper[4933]: I0122 05:51:44.539042 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" podUID="ed77d791-9cd1-478c-a0bc-fad66f13c466" containerName="controller-manager" containerID="cri-o://7bea6d68bde9105dbc2e8966005f1239b1e0a7c2e9892887dd43b109273b5fb8" gracePeriod=30 Jan 22 05:51:44 crc kubenswrapper[4933]: I0122 05:51:44.551107 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn"] Jan 22 05:51:44 crc kubenswrapper[4933]: I0122 05:51:44.551478 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" podUID="bd5aa933-89b8-4db3-9ff0-f6065d023e6f" containerName="route-controller-manager" containerID="cri-o://41cef4319a6c53a5207b5a41e854b2d75c1424e4ec07ba9a5ead7b1680f80255" gracePeriod=30 Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.006577 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.011299 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.062340 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed77d791-9cd1-478c-a0bc-fad66f13c466-serving-cert\") pod \"ed77d791-9cd1-478c-a0bc-fad66f13c466\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.062406 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-client-ca\") pod \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\" (UID: \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\") " Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.062443 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v578b\" (UniqueName: \"kubernetes.io/projected/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-kube-api-access-v578b\") pod \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\" (UID: \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\") " Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.062489 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-serving-cert\") pod \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\" (UID: \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\") " Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.062533 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ed77d791-9cd1-478c-a0bc-fad66f13c466-proxy-ca-bundles\") pod \"ed77d791-9cd1-478c-a0bc-fad66f13c466\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.062570 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgngv\" (UniqueName: \"kubernetes.io/projected/ed77d791-9cd1-478c-a0bc-fad66f13c466-kube-api-access-xgngv\") pod \"ed77d791-9cd1-478c-a0bc-fad66f13c466\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.062634 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-config\") pod \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\" (UID: \"bd5aa933-89b8-4db3-9ff0-f6065d023e6f\") " Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.062669 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ed77d791-9cd1-478c-a0bc-fad66f13c466-client-ca\") pod \"ed77d791-9cd1-478c-a0bc-fad66f13c466\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.062709 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed77d791-9cd1-478c-a0bc-fad66f13c466-config\") pod \"ed77d791-9cd1-478c-a0bc-fad66f13c466\" (UID: \"ed77d791-9cd1-478c-a0bc-fad66f13c466\") " Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.063269 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-client-ca" (OuterVolumeSpecName: "client-ca") pod "bd5aa933-89b8-4db3-9ff0-f6065d023e6f" (UID: "bd5aa933-89b8-4db3-9ff0-f6065d023e6f"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.063298 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed77d791-9cd1-478c-a0bc-fad66f13c466-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "ed77d791-9cd1-478c-a0bc-fad66f13c466" (UID: "ed77d791-9cd1-478c-a0bc-fad66f13c466"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.063337 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-config" (OuterVolumeSpecName: "config") pod "bd5aa933-89b8-4db3-9ff0-f6065d023e6f" (UID: "bd5aa933-89b8-4db3-9ff0-f6065d023e6f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.063593 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed77d791-9cd1-478c-a0bc-fad66f13c466-client-ca" (OuterVolumeSpecName: "client-ca") pod "ed77d791-9cd1-478c-a0bc-fad66f13c466" (UID: "ed77d791-9cd1-478c-a0bc-fad66f13c466"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.063859 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed77d791-9cd1-478c-a0bc-fad66f13c466-config" (OuterVolumeSpecName: "config") pod "ed77d791-9cd1-478c-a0bc-fad66f13c466" (UID: "ed77d791-9cd1-478c-a0bc-fad66f13c466"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.064195 4933 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ed77d791-9cd1-478c-a0bc-fad66f13c466-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.064231 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.064252 4933 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.067542 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-kube-api-access-v578b" (OuterVolumeSpecName: "kube-api-access-v578b") pod "bd5aa933-89b8-4db3-9ff0-f6065d023e6f" (UID: "bd5aa933-89b8-4db3-9ff0-f6065d023e6f"). InnerVolumeSpecName "kube-api-access-v578b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.067631 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed77d791-9cd1-478c-a0bc-fad66f13c466-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "ed77d791-9cd1-478c-a0bc-fad66f13c466" (UID: "ed77d791-9cd1-478c-a0bc-fad66f13c466"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.067877 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed77d791-9cd1-478c-a0bc-fad66f13c466-kube-api-access-xgngv" (OuterVolumeSpecName: "kube-api-access-xgngv") pod "ed77d791-9cd1-478c-a0bc-fad66f13c466" (UID: "ed77d791-9cd1-478c-a0bc-fad66f13c466"). InnerVolumeSpecName "kube-api-access-xgngv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.070511 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bd5aa933-89b8-4db3-9ff0-f6065d023e6f" (UID: "bd5aa933-89b8-4db3-9ff0-f6065d023e6f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.165607 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed77d791-9cd1-478c-a0bc-fad66f13c466-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.165662 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v578b\" (UniqueName: \"kubernetes.io/projected/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-kube-api-access-v578b\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.165678 4933 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bd5aa933-89b8-4db3-9ff0-f6065d023e6f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.165690 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgngv\" (UniqueName: \"kubernetes.io/projected/ed77d791-9cd1-478c-a0bc-fad66f13c466-kube-api-access-xgngv\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.165704 4933 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ed77d791-9cd1-478c-a0bc-fad66f13c466-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.165715 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed77d791-9cd1-478c-a0bc-fad66f13c466-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.236285 4933 generic.go:334] "Generic (PLEG): container finished" podID="ed77d791-9cd1-478c-a0bc-fad66f13c466" containerID="7bea6d68bde9105dbc2e8966005f1239b1e0a7c2e9892887dd43b109273b5fb8" exitCode=0 Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.236352 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.236375 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" event={"ID":"ed77d791-9cd1-478c-a0bc-fad66f13c466","Type":"ContainerDied","Data":"7bea6d68bde9105dbc2e8966005f1239b1e0a7c2e9892887dd43b109273b5fb8"} Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.236406 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-58c6dfb578-vnnkh" event={"ID":"ed77d791-9cd1-478c-a0bc-fad66f13c466","Type":"ContainerDied","Data":"c798cfc7c34862ab75ac1b2afa537289f7c14d14f69daa5caa69c6a136c1024c"} Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.236426 4933 scope.go:117] "RemoveContainer" containerID="7bea6d68bde9105dbc2e8966005f1239b1e0a7c2e9892887dd43b109273b5fb8" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.241375 4933 generic.go:334] "Generic (PLEG): container finished" podID="bd5aa933-89b8-4db3-9ff0-f6065d023e6f" containerID="41cef4319a6c53a5207b5a41e854b2d75c1424e4ec07ba9a5ead7b1680f80255" exitCode=0 Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.241421 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" event={"ID":"bd5aa933-89b8-4db3-9ff0-f6065d023e6f","Type":"ContainerDied","Data":"41cef4319a6c53a5207b5a41e854b2d75c1424e4ec07ba9a5ead7b1680f80255"} Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.241450 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" event={"ID":"bd5aa933-89b8-4db3-9ff0-f6065d023e6f","Type":"ContainerDied","Data":"d742ceda2d303b9d8bee262d462e3b25620ac05dde23094653db32630b31d96a"} Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.241502 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.258898 4933 scope.go:117] "RemoveContainer" containerID="7bea6d68bde9105dbc2e8966005f1239b1e0a7c2e9892887dd43b109273b5fb8" Jan 22 05:51:45 crc kubenswrapper[4933]: E0122 05:51:45.259332 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7bea6d68bde9105dbc2e8966005f1239b1e0a7c2e9892887dd43b109273b5fb8\": container with ID starting with 7bea6d68bde9105dbc2e8966005f1239b1e0a7c2e9892887dd43b109273b5fb8 not found: ID does not exist" containerID="7bea6d68bde9105dbc2e8966005f1239b1e0a7c2e9892887dd43b109273b5fb8" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.259361 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7bea6d68bde9105dbc2e8966005f1239b1e0a7c2e9892887dd43b109273b5fb8"} err="failed to get container status \"7bea6d68bde9105dbc2e8966005f1239b1e0a7c2e9892887dd43b109273b5fb8\": rpc error: code = NotFound desc = could not find container \"7bea6d68bde9105dbc2e8966005f1239b1e0a7c2e9892887dd43b109273b5fb8\": container with ID starting with 7bea6d68bde9105dbc2e8966005f1239b1e0a7c2e9892887dd43b109273b5fb8 not found: ID does not exist" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.259383 4933 scope.go:117] "RemoveContainer" containerID="41cef4319a6c53a5207b5a41e854b2d75c1424e4ec07ba9a5ead7b1680f80255" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.274511 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-58c6dfb578-vnnkh"] Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.280811 4933 scope.go:117] "RemoveContainer" containerID="41cef4319a6c53a5207b5a41e854b2d75c1424e4ec07ba9a5ead7b1680f80255" Jan 22 05:51:45 crc kubenswrapper[4933]: E0122 05:51:45.281662 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41cef4319a6c53a5207b5a41e854b2d75c1424e4ec07ba9a5ead7b1680f80255\": container with ID starting with 41cef4319a6c53a5207b5a41e854b2d75c1424e4ec07ba9a5ead7b1680f80255 not found: ID does not exist" containerID="41cef4319a6c53a5207b5a41e854b2d75c1424e4ec07ba9a5ead7b1680f80255" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.281708 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41cef4319a6c53a5207b5a41e854b2d75c1424e4ec07ba9a5ead7b1680f80255"} err="failed to get container status \"41cef4319a6c53a5207b5a41e854b2d75c1424e4ec07ba9a5ead7b1680f80255\": rpc error: code = NotFound desc = could not find container \"41cef4319a6c53a5207b5a41e854b2d75c1424e4ec07ba9a5ead7b1680f80255\": container with ID starting with 41cef4319a6c53a5207b5a41e854b2d75c1424e4ec07ba9a5ead7b1680f80255 not found: ID does not exist" Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.285297 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-58c6dfb578-vnnkh"] Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.289210 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn"] Jan 22 05:51:45 crc kubenswrapper[4933]: I0122 05:51:45.292218 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d49bbb4c4-qnqtn"] Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.098657 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6"] Jan 22 05:51:46 crc kubenswrapper[4933]: E0122 05:51:46.099317 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd5aa933-89b8-4db3-9ff0-f6065d023e6f" containerName="route-controller-manager" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.099334 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd5aa933-89b8-4db3-9ff0-f6065d023e6f" containerName="route-controller-manager" Jan 22 05:51:46 crc kubenswrapper[4933]: E0122 05:51:46.099346 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed77d791-9cd1-478c-a0bc-fad66f13c466" containerName="controller-manager" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.099354 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed77d791-9cd1-478c-a0bc-fad66f13c466" containerName="controller-manager" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.099499 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed77d791-9cd1-478c-a0bc-fad66f13c466" containerName="controller-manager" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.099538 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd5aa933-89b8-4db3-9ff0-f6065d023e6f" containerName="route-controller-manager" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.100550 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.102484 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m"] Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.103044 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.106512 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.106734 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.107109 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.107324 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.107370 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.107370 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.107679 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.109178 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.109512 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.109620 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.109695 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.112330 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6"] Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.116478 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.119172 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.169280 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m"] Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.175773 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1a0ae99-89c9-4a23-841d-4620f3175ecd-serving-cert\") pod \"route-controller-manager-7695d8c949-zvkh6\" (UID: \"a1a0ae99-89c9-4a23-841d-4620f3175ecd\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.175843 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlk6l\" (UniqueName: \"kubernetes.io/projected/5e38e0f2-cd05-4f41-8aff-80e9cbcb9085-kube-api-access-mlk6l\") pod \"controller-manager-7f5f69b8b5-p6v8m\" (UID: \"5e38e0f2-cd05-4f41-8aff-80e9cbcb9085\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.175878 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a1a0ae99-89c9-4a23-841d-4620f3175ecd-client-ca\") pod \"route-controller-manager-7695d8c949-zvkh6\" (UID: \"a1a0ae99-89c9-4a23-841d-4620f3175ecd\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.175909 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1a0ae99-89c9-4a23-841d-4620f3175ecd-config\") pod \"route-controller-manager-7695d8c949-zvkh6\" (UID: \"a1a0ae99-89c9-4a23-841d-4620f3175ecd\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.175938 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5e38e0f2-cd05-4f41-8aff-80e9cbcb9085-client-ca\") pod \"controller-manager-7f5f69b8b5-p6v8m\" (UID: \"5e38e0f2-cd05-4f41-8aff-80e9cbcb9085\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.175966 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-md8kw\" (UniqueName: \"kubernetes.io/projected/a1a0ae99-89c9-4a23-841d-4620f3175ecd-kube-api-access-md8kw\") pod \"route-controller-manager-7695d8c949-zvkh6\" (UID: \"a1a0ae99-89c9-4a23-841d-4620f3175ecd\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.175991 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e38e0f2-cd05-4f41-8aff-80e9cbcb9085-serving-cert\") pod \"controller-manager-7f5f69b8b5-p6v8m\" (UID: \"5e38e0f2-cd05-4f41-8aff-80e9cbcb9085\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.176104 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e38e0f2-cd05-4f41-8aff-80e9cbcb9085-config\") pod \"controller-manager-7f5f69b8b5-p6v8m\" (UID: \"5e38e0f2-cd05-4f41-8aff-80e9cbcb9085\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.176138 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5e38e0f2-cd05-4f41-8aff-80e9cbcb9085-proxy-ca-bundles\") pod \"controller-manager-7f5f69b8b5-p6v8m\" (UID: \"5e38e0f2-cd05-4f41-8aff-80e9cbcb9085\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.277205 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1a0ae99-89c9-4a23-841d-4620f3175ecd-serving-cert\") pod \"route-controller-manager-7695d8c949-zvkh6\" (UID: \"a1a0ae99-89c9-4a23-841d-4620f3175ecd\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.277288 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlk6l\" (UniqueName: \"kubernetes.io/projected/5e38e0f2-cd05-4f41-8aff-80e9cbcb9085-kube-api-access-mlk6l\") pod \"controller-manager-7f5f69b8b5-p6v8m\" (UID: \"5e38e0f2-cd05-4f41-8aff-80e9cbcb9085\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.277323 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a1a0ae99-89c9-4a23-841d-4620f3175ecd-client-ca\") pod \"route-controller-manager-7695d8c949-zvkh6\" (UID: \"a1a0ae99-89c9-4a23-841d-4620f3175ecd\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.277351 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1a0ae99-89c9-4a23-841d-4620f3175ecd-config\") pod \"route-controller-manager-7695d8c949-zvkh6\" (UID: \"a1a0ae99-89c9-4a23-841d-4620f3175ecd\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.277380 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5e38e0f2-cd05-4f41-8aff-80e9cbcb9085-client-ca\") pod \"controller-manager-7f5f69b8b5-p6v8m\" (UID: \"5e38e0f2-cd05-4f41-8aff-80e9cbcb9085\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.277410 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-md8kw\" (UniqueName: \"kubernetes.io/projected/a1a0ae99-89c9-4a23-841d-4620f3175ecd-kube-api-access-md8kw\") pod \"route-controller-manager-7695d8c949-zvkh6\" (UID: \"a1a0ae99-89c9-4a23-841d-4620f3175ecd\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.277440 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e38e0f2-cd05-4f41-8aff-80e9cbcb9085-serving-cert\") pod \"controller-manager-7f5f69b8b5-p6v8m\" (UID: \"5e38e0f2-cd05-4f41-8aff-80e9cbcb9085\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.277469 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e38e0f2-cd05-4f41-8aff-80e9cbcb9085-config\") pod \"controller-manager-7f5f69b8b5-p6v8m\" (UID: \"5e38e0f2-cd05-4f41-8aff-80e9cbcb9085\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.277893 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5e38e0f2-cd05-4f41-8aff-80e9cbcb9085-proxy-ca-bundles\") pod \"controller-manager-7f5f69b8b5-p6v8m\" (UID: \"5e38e0f2-cd05-4f41-8aff-80e9cbcb9085\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.278698 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5e38e0f2-cd05-4f41-8aff-80e9cbcb9085-client-ca\") pod \"controller-manager-7f5f69b8b5-p6v8m\" (UID: \"5e38e0f2-cd05-4f41-8aff-80e9cbcb9085\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.278937 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e38e0f2-cd05-4f41-8aff-80e9cbcb9085-config\") pod \"controller-manager-7f5f69b8b5-p6v8m\" (UID: \"5e38e0f2-cd05-4f41-8aff-80e9cbcb9085\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.279045 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5e38e0f2-cd05-4f41-8aff-80e9cbcb9085-proxy-ca-bundles\") pod \"controller-manager-7f5f69b8b5-p6v8m\" (UID: \"5e38e0f2-cd05-4f41-8aff-80e9cbcb9085\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.279222 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1a0ae99-89c9-4a23-841d-4620f3175ecd-config\") pod \"route-controller-manager-7695d8c949-zvkh6\" (UID: \"a1a0ae99-89c9-4a23-841d-4620f3175ecd\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.279584 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a1a0ae99-89c9-4a23-841d-4620f3175ecd-client-ca\") pod \"route-controller-manager-7695d8c949-zvkh6\" (UID: \"a1a0ae99-89c9-4a23-841d-4620f3175ecd\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.282580 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5e38e0f2-cd05-4f41-8aff-80e9cbcb9085-serving-cert\") pod \"controller-manager-7f5f69b8b5-p6v8m\" (UID: \"5e38e0f2-cd05-4f41-8aff-80e9cbcb9085\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.286376 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1a0ae99-89c9-4a23-841d-4620f3175ecd-serving-cert\") pod \"route-controller-manager-7695d8c949-zvkh6\" (UID: \"a1a0ae99-89c9-4a23-841d-4620f3175ecd\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.293588 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlk6l\" (UniqueName: \"kubernetes.io/projected/5e38e0f2-cd05-4f41-8aff-80e9cbcb9085-kube-api-access-mlk6l\") pod \"controller-manager-7f5f69b8b5-p6v8m\" (UID: \"5e38e0f2-cd05-4f41-8aff-80e9cbcb9085\") " pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.295691 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-md8kw\" (UniqueName: \"kubernetes.io/projected/a1a0ae99-89c9-4a23-841d-4620f3175ecd-kube-api-access-md8kw\") pod \"route-controller-manager-7695d8c949-zvkh6\" (UID: \"a1a0ae99-89c9-4a23-841d-4620f3175ecd\") " pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.461967 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.480437 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.504410 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd5aa933-89b8-4db3-9ff0-f6065d023e6f" path="/var/lib/kubelet/pods/bd5aa933-89b8-4db3-9ff0-f6065d023e6f/volumes" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.505227 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed77d791-9cd1-478c-a0bc-fad66f13c466" path="/var/lib/kubelet/pods/ed77d791-9cd1-478c-a0bc-fad66f13c466/volumes" Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.735964 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m"] Jan 22 05:51:46 crc kubenswrapper[4933]: W0122 05:51:46.743673 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5e38e0f2_cd05_4f41_8aff_80e9cbcb9085.slice/crio-956d018a478c4565b1f9b4f3806f1509646546b20d3bfc8daca42a43b4abfb5b WatchSource:0}: Error finding container 956d018a478c4565b1f9b4f3806f1509646546b20d3bfc8daca42a43b4abfb5b: Status 404 returned error can't find the container with id 956d018a478c4565b1f9b4f3806f1509646546b20d3bfc8daca42a43b4abfb5b Jan 22 05:51:46 crc kubenswrapper[4933]: I0122 05:51:46.893694 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6"] Jan 22 05:51:46 crc kubenswrapper[4933]: W0122 05:51:46.899713 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda1a0ae99_89c9_4a23_841d_4620f3175ecd.slice/crio-867dd2fa9c3c1b71b63640fe5bf4a468e2f54e01bd745bf8da4985dedc11f568 WatchSource:0}: Error finding container 867dd2fa9c3c1b71b63640fe5bf4a468e2f54e01bd745bf8da4985dedc11f568: Status 404 returned error can't find the container with id 867dd2fa9c3c1b71b63640fe5bf4a468e2f54e01bd745bf8da4985dedc11f568 Jan 22 05:51:47 crc kubenswrapper[4933]: I0122 05:51:47.254228 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6" event={"ID":"a1a0ae99-89c9-4a23-841d-4620f3175ecd","Type":"ContainerStarted","Data":"0360973f51032ccabbf1f1949e1021acd5368be87569ecf2f539c01d5ce25120"} Jan 22 05:51:47 crc kubenswrapper[4933]: I0122 05:51:47.254539 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6" event={"ID":"a1a0ae99-89c9-4a23-841d-4620f3175ecd","Type":"ContainerStarted","Data":"867dd2fa9c3c1b71b63640fe5bf4a468e2f54e01bd745bf8da4985dedc11f568"} Jan 22 05:51:47 crc kubenswrapper[4933]: I0122 05:51:47.255586 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6" Jan 22 05:51:47 crc kubenswrapper[4933]: I0122 05:51:47.257421 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" event={"ID":"5e38e0f2-cd05-4f41-8aff-80e9cbcb9085","Type":"ContainerStarted","Data":"383336f8e3a5d598c8b3ec3397bd3252515119421b9d3690990378a858236530"} Jan 22 05:51:47 crc kubenswrapper[4933]: I0122 05:51:47.257462 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" event={"ID":"5e38e0f2-cd05-4f41-8aff-80e9cbcb9085","Type":"ContainerStarted","Data":"956d018a478c4565b1f9b4f3806f1509646546b20d3bfc8daca42a43b4abfb5b"} Jan 22 05:51:47 crc kubenswrapper[4933]: I0122 05:51:47.257625 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" Jan 22 05:51:47 crc kubenswrapper[4933]: I0122 05:51:47.262778 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" Jan 22 05:51:47 crc kubenswrapper[4933]: I0122 05:51:47.276258 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6" podStartSLOduration=3.2762356710000002 podStartE2EDuration="3.276235671s" podCreationTimestamp="2026-01-22 05:51:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:51:47.276041917 +0000 UTC m=+355.113167270" watchObservedRunningTime="2026-01-22 05:51:47.276235671 +0000 UTC m=+355.113361024" Jan 22 05:51:47 crc kubenswrapper[4933]: I0122 05:51:47.295269 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7f5f69b8b5-p6v8m" podStartSLOduration=3.295249703 podStartE2EDuration="3.295249703s" podCreationTimestamp="2026-01-22 05:51:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:51:47.294788373 +0000 UTC m=+355.131913726" watchObservedRunningTime="2026-01-22 05:51:47.295249703 +0000 UTC m=+355.132375056" Jan 22 05:51:47 crc kubenswrapper[4933]: I0122 05:51:47.555771 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7695d8c949-zvkh6" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.247046 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-m4tv2"] Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.249670 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m4tv2" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.252217 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.261836 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-m4tv2"] Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.358927 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59c8d87a-51df-446d-8e17-197464398b18-utilities\") pod \"community-operators-m4tv2\" (UID: \"59c8d87a-51df-446d-8e17-197464398b18\") " pod="openshift-marketplace/community-operators-m4tv2" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.358993 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59c8d87a-51df-446d-8e17-197464398b18-catalog-content\") pod \"community-operators-m4tv2\" (UID: \"59c8d87a-51df-446d-8e17-197464398b18\") " pod="openshift-marketplace/community-operators-m4tv2" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.359022 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r85g8\" (UniqueName: \"kubernetes.io/projected/59c8d87a-51df-446d-8e17-197464398b18-kube-api-access-r85g8\") pod \"community-operators-m4tv2\" (UID: \"59c8d87a-51df-446d-8e17-197464398b18\") " pod="openshift-marketplace/community-operators-m4tv2" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.445297 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5jcpg"] Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.447151 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5jcpg" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.451604 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.454917 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5jcpg"] Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.460251 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59c8d87a-51df-446d-8e17-197464398b18-utilities\") pod \"community-operators-m4tv2\" (UID: \"59c8d87a-51df-446d-8e17-197464398b18\") " pod="openshift-marketplace/community-operators-m4tv2" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.460618 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59c8d87a-51df-446d-8e17-197464398b18-catalog-content\") pod \"community-operators-m4tv2\" (UID: \"59c8d87a-51df-446d-8e17-197464398b18\") " pod="openshift-marketplace/community-operators-m4tv2" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.460697 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r85g8\" (UniqueName: \"kubernetes.io/projected/59c8d87a-51df-446d-8e17-197464398b18-kube-api-access-r85g8\") pod \"community-operators-m4tv2\" (UID: \"59c8d87a-51df-446d-8e17-197464398b18\") " pod="openshift-marketplace/community-operators-m4tv2" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.461285 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59c8d87a-51df-446d-8e17-197464398b18-utilities\") pod \"community-operators-m4tv2\" (UID: \"59c8d87a-51df-446d-8e17-197464398b18\") " pod="openshift-marketplace/community-operators-m4tv2" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.461763 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59c8d87a-51df-446d-8e17-197464398b18-catalog-content\") pod \"community-operators-m4tv2\" (UID: \"59c8d87a-51df-446d-8e17-197464398b18\") " pod="openshift-marketplace/community-operators-m4tv2" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.503215 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r85g8\" (UniqueName: \"kubernetes.io/projected/59c8d87a-51df-446d-8e17-197464398b18-kube-api-access-r85g8\") pod \"community-operators-m4tv2\" (UID: \"59c8d87a-51df-446d-8e17-197464398b18\") " pod="openshift-marketplace/community-operators-m4tv2" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.561933 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65b68271-f234-4a74-aa04-b5113c8c1d89-catalog-content\") pod \"certified-operators-5jcpg\" (UID: \"65b68271-f234-4a74-aa04-b5113c8c1d89\") " pod="openshift-marketplace/certified-operators-5jcpg" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.562507 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65b68271-f234-4a74-aa04-b5113c8c1d89-utilities\") pod \"certified-operators-5jcpg\" (UID: \"65b68271-f234-4a74-aa04-b5113c8c1d89\") " pod="openshift-marketplace/certified-operators-5jcpg" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.562643 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlt9k\" (UniqueName: \"kubernetes.io/projected/65b68271-f234-4a74-aa04-b5113c8c1d89-kube-api-access-qlt9k\") pod \"certified-operators-5jcpg\" (UID: \"65b68271-f234-4a74-aa04-b5113c8c1d89\") " pod="openshift-marketplace/certified-operators-5jcpg" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.574248 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m4tv2" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.663995 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65b68271-f234-4a74-aa04-b5113c8c1d89-utilities\") pod \"certified-operators-5jcpg\" (UID: \"65b68271-f234-4a74-aa04-b5113c8c1d89\") " pod="openshift-marketplace/certified-operators-5jcpg" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.664065 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlt9k\" (UniqueName: \"kubernetes.io/projected/65b68271-f234-4a74-aa04-b5113c8c1d89-kube-api-access-qlt9k\") pod \"certified-operators-5jcpg\" (UID: \"65b68271-f234-4a74-aa04-b5113c8c1d89\") " pod="openshift-marketplace/certified-operators-5jcpg" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.664244 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65b68271-f234-4a74-aa04-b5113c8c1d89-catalog-content\") pod \"certified-operators-5jcpg\" (UID: \"65b68271-f234-4a74-aa04-b5113c8c1d89\") " pod="openshift-marketplace/certified-operators-5jcpg" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.665349 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65b68271-f234-4a74-aa04-b5113c8c1d89-utilities\") pod \"certified-operators-5jcpg\" (UID: \"65b68271-f234-4a74-aa04-b5113c8c1d89\") " pod="openshift-marketplace/certified-operators-5jcpg" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.666189 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65b68271-f234-4a74-aa04-b5113c8c1d89-catalog-content\") pod \"certified-operators-5jcpg\" (UID: \"65b68271-f234-4a74-aa04-b5113c8c1d89\") " pod="openshift-marketplace/certified-operators-5jcpg" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.692412 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlt9k\" (UniqueName: \"kubernetes.io/projected/65b68271-f234-4a74-aa04-b5113c8c1d89-kube-api-access-qlt9k\") pod \"certified-operators-5jcpg\" (UID: \"65b68271-f234-4a74-aa04-b5113c8c1d89\") " pod="openshift-marketplace/certified-operators-5jcpg" Jan 22 05:52:00 crc kubenswrapper[4933]: I0122 05:52:00.774562 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5jcpg" Jan 22 05:52:01 crc kubenswrapper[4933]: I0122 05:52:01.070250 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-m4tv2"] Jan 22 05:52:01 crc kubenswrapper[4933]: I0122 05:52:01.196147 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5jcpg"] Jan 22 05:52:01 crc kubenswrapper[4933]: W0122 05:52:01.208296 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod65b68271_f234_4a74_aa04_b5113c8c1d89.slice/crio-2ad9dcfd4473fe7c8b945aa457c4313856d7d14645ec44f9b8b424899fef506a WatchSource:0}: Error finding container 2ad9dcfd4473fe7c8b945aa457c4313856d7d14645ec44f9b8b424899fef506a: Status 404 returned error can't find the container with id 2ad9dcfd4473fe7c8b945aa457c4313856d7d14645ec44f9b8b424899fef506a Jan 22 05:52:01 crc kubenswrapper[4933]: I0122 05:52:01.339352 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5jcpg" event={"ID":"65b68271-f234-4a74-aa04-b5113c8c1d89","Type":"ContainerStarted","Data":"2ad9dcfd4473fe7c8b945aa457c4313856d7d14645ec44f9b8b424899fef506a"} Jan 22 05:52:01 crc kubenswrapper[4933]: I0122 05:52:01.340395 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m4tv2" event={"ID":"59c8d87a-51df-446d-8e17-197464398b18","Type":"ContainerStarted","Data":"44df16837340ce55fb81ce76c9d7c55bd0adbcb6204926ebf655bfdbb972046d"} Jan 22 05:52:02 crc kubenswrapper[4933]: I0122 05:52:02.347768 4933 generic.go:334] "Generic (PLEG): container finished" podID="65b68271-f234-4a74-aa04-b5113c8c1d89" containerID="fad64083bf404d99e0d409c94f335e9fea2e4c5f59356946d6fecd1e00adac17" exitCode=0 Jan 22 05:52:02 crc kubenswrapper[4933]: I0122 05:52:02.347833 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5jcpg" event={"ID":"65b68271-f234-4a74-aa04-b5113c8c1d89","Type":"ContainerDied","Data":"fad64083bf404d99e0d409c94f335e9fea2e4c5f59356946d6fecd1e00adac17"} Jan 22 05:52:02 crc kubenswrapper[4933]: I0122 05:52:02.351272 4933 generic.go:334] "Generic (PLEG): container finished" podID="59c8d87a-51df-446d-8e17-197464398b18" containerID="8823da99e4c204aa572d288ff99812e47162df808e4f479016887f72dccd5155" exitCode=0 Jan 22 05:52:02 crc kubenswrapper[4933]: I0122 05:52:02.351823 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m4tv2" event={"ID":"59c8d87a-51df-446d-8e17-197464398b18","Type":"ContainerDied","Data":"8823da99e4c204aa572d288ff99812e47162df808e4f479016887f72dccd5155"} Jan 22 05:52:02 crc kubenswrapper[4933]: I0122 05:52:02.832809 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kvffj"] Jan 22 05:52:02 crc kubenswrapper[4933]: I0122 05:52:02.833965 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kvffj" Jan 22 05:52:02 crc kubenswrapper[4933]: I0122 05:52:02.835942 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 22 05:52:02 crc kubenswrapper[4933]: I0122 05:52:02.846353 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kvffj"] Jan 22 05:52:02 crc kubenswrapper[4933]: I0122 05:52:02.902048 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cr8t9\" (UniqueName: \"kubernetes.io/projected/ed5c6166-9794-440e-9f36-6ae46897815b-kube-api-access-cr8t9\") pod \"redhat-marketplace-kvffj\" (UID: \"ed5c6166-9794-440e-9f36-6ae46897815b\") " pod="openshift-marketplace/redhat-marketplace-kvffj" Jan 22 05:52:02 crc kubenswrapper[4933]: I0122 05:52:02.902137 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed5c6166-9794-440e-9f36-6ae46897815b-catalog-content\") pod \"redhat-marketplace-kvffj\" (UID: \"ed5c6166-9794-440e-9f36-6ae46897815b\") " pod="openshift-marketplace/redhat-marketplace-kvffj" Jan 22 05:52:02 crc kubenswrapper[4933]: I0122 05:52:02.902223 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed5c6166-9794-440e-9f36-6ae46897815b-utilities\") pod \"redhat-marketplace-kvffj\" (UID: \"ed5c6166-9794-440e-9f36-6ae46897815b\") " pod="openshift-marketplace/redhat-marketplace-kvffj" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.003194 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed5c6166-9794-440e-9f36-6ae46897815b-catalog-content\") pod \"redhat-marketplace-kvffj\" (UID: \"ed5c6166-9794-440e-9f36-6ae46897815b\") " pod="openshift-marketplace/redhat-marketplace-kvffj" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.003509 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed5c6166-9794-440e-9f36-6ae46897815b-utilities\") pod \"redhat-marketplace-kvffj\" (UID: \"ed5c6166-9794-440e-9f36-6ae46897815b\") " pod="openshift-marketplace/redhat-marketplace-kvffj" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.003596 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cr8t9\" (UniqueName: \"kubernetes.io/projected/ed5c6166-9794-440e-9f36-6ae46897815b-kube-api-access-cr8t9\") pod \"redhat-marketplace-kvffj\" (UID: \"ed5c6166-9794-440e-9f36-6ae46897815b\") " pod="openshift-marketplace/redhat-marketplace-kvffj" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.004417 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed5c6166-9794-440e-9f36-6ae46897815b-catalog-content\") pod \"redhat-marketplace-kvffj\" (UID: \"ed5c6166-9794-440e-9f36-6ae46897815b\") " pod="openshift-marketplace/redhat-marketplace-kvffj" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.007533 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed5c6166-9794-440e-9f36-6ae46897815b-utilities\") pod \"redhat-marketplace-kvffj\" (UID: \"ed5c6166-9794-440e-9f36-6ae46897815b\") " pod="openshift-marketplace/redhat-marketplace-kvffj" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.021595 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cr8t9\" (UniqueName: \"kubernetes.io/projected/ed5c6166-9794-440e-9f36-6ae46897815b-kube-api-access-cr8t9\") pod \"redhat-marketplace-kvffj\" (UID: \"ed5c6166-9794-440e-9f36-6ae46897815b\") " pod="openshift-marketplace/redhat-marketplace-kvffj" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.034062 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fcs2s"] Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.035279 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fcs2s" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.041545 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.045785 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fcs2s"] Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.104563 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4902f901-8853-4071-bc3a-0a5b32065bf3-catalog-content\") pod \"redhat-operators-fcs2s\" (UID: \"4902f901-8853-4071-bc3a-0a5b32065bf3\") " pod="openshift-marketplace/redhat-operators-fcs2s" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.104618 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4n4hs\" (UniqueName: \"kubernetes.io/projected/4902f901-8853-4071-bc3a-0a5b32065bf3-kube-api-access-4n4hs\") pod \"redhat-operators-fcs2s\" (UID: \"4902f901-8853-4071-bc3a-0a5b32065bf3\") " pod="openshift-marketplace/redhat-operators-fcs2s" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.104639 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4902f901-8853-4071-bc3a-0a5b32065bf3-utilities\") pod \"redhat-operators-fcs2s\" (UID: \"4902f901-8853-4071-bc3a-0a5b32065bf3\") " pod="openshift-marketplace/redhat-operators-fcs2s" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.158462 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kvffj" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.205829 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4902f901-8853-4071-bc3a-0a5b32065bf3-catalog-content\") pod \"redhat-operators-fcs2s\" (UID: \"4902f901-8853-4071-bc3a-0a5b32065bf3\") " pod="openshift-marketplace/redhat-operators-fcs2s" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.205889 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4n4hs\" (UniqueName: \"kubernetes.io/projected/4902f901-8853-4071-bc3a-0a5b32065bf3-kube-api-access-4n4hs\") pod \"redhat-operators-fcs2s\" (UID: \"4902f901-8853-4071-bc3a-0a5b32065bf3\") " pod="openshift-marketplace/redhat-operators-fcs2s" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.205916 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4902f901-8853-4071-bc3a-0a5b32065bf3-utilities\") pod \"redhat-operators-fcs2s\" (UID: \"4902f901-8853-4071-bc3a-0a5b32065bf3\") " pod="openshift-marketplace/redhat-operators-fcs2s" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.206494 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4902f901-8853-4071-bc3a-0a5b32065bf3-catalog-content\") pod \"redhat-operators-fcs2s\" (UID: \"4902f901-8853-4071-bc3a-0a5b32065bf3\") " pod="openshift-marketplace/redhat-operators-fcs2s" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.206518 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4902f901-8853-4071-bc3a-0a5b32065bf3-utilities\") pod \"redhat-operators-fcs2s\" (UID: \"4902f901-8853-4071-bc3a-0a5b32065bf3\") " pod="openshift-marketplace/redhat-operators-fcs2s" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.230340 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4n4hs\" (UniqueName: \"kubernetes.io/projected/4902f901-8853-4071-bc3a-0a5b32065bf3-kube-api-access-4n4hs\") pod \"redhat-operators-fcs2s\" (UID: \"4902f901-8853-4071-bc3a-0a5b32065bf3\") " pod="openshift-marketplace/redhat-operators-fcs2s" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.359303 4933 generic.go:334] "Generic (PLEG): container finished" podID="65b68271-f234-4a74-aa04-b5113c8c1d89" containerID="6a390c4df7f54df8052347e2f34e92e03a1f04a95a60f1c955066d9b8b7f27de" exitCode=0 Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.359892 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5jcpg" event={"ID":"65b68271-f234-4a74-aa04-b5113c8c1d89","Type":"ContainerDied","Data":"6a390c4df7f54df8052347e2f34e92e03a1f04a95a60f1c955066d9b8b7f27de"} Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.363614 4933 generic.go:334] "Generic (PLEG): container finished" podID="59c8d87a-51df-446d-8e17-197464398b18" containerID="c4b2603445e41ae580ddd7f20c1c7e65104977f0a65c94839e7e805bade17074" exitCode=0 Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.363639 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m4tv2" event={"ID":"59c8d87a-51df-446d-8e17-197464398b18","Type":"ContainerDied","Data":"c4b2603445e41ae580ddd7f20c1c7e65104977f0a65c94839e7e805bade17074"} Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.375536 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fcs2s" Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.560309 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kvffj"] Jan 22 05:52:03 crc kubenswrapper[4933]: I0122 05:52:03.828641 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fcs2s"] Jan 22 05:52:04 crc kubenswrapper[4933]: I0122 05:52:04.372030 4933 generic.go:334] "Generic (PLEG): container finished" podID="4902f901-8853-4071-bc3a-0a5b32065bf3" containerID="a051a0163a76b2be6ffd02863846447b57048b9cb6bd2ec1013f4cd65ebcbb7f" exitCode=0 Jan 22 05:52:04 crc kubenswrapper[4933]: I0122 05:52:04.372186 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fcs2s" event={"ID":"4902f901-8853-4071-bc3a-0a5b32065bf3","Type":"ContainerDied","Data":"a051a0163a76b2be6ffd02863846447b57048b9cb6bd2ec1013f4cd65ebcbb7f"} Jan 22 05:52:04 crc kubenswrapper[4933]: I0122 05:52:04.372600 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fcs2s" event={"ID":"4902f901-8853-4071-bc3a-0a5b32065bf3","Type":"ContainerStarted","Data":"db9b01b61379d215a66bc6172e20c9b080a488000beb3383431214e15b48e715"} Jan 22 05:52:04 crc kubenswrapper[4933]: I0122 05:52:04.374560 4933 generic.go:334] "Generic (PLEG): container finished" podID="ed5c6166-9794-440e-9f36-6ae46897815b" containerID="7f36e2644c9ea5c51ab3e9123e0e7cda48787dc3657bb765d7694c2a2381bf11" exitCode=0 Jan 22 05:52:04 crc kubenswrapper[4933]: I0122 05:52:04.374615 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kvffj" event={"ID":"ed5c6166-9794-440e-9f36-6ae46897815b","Type":"ContainerDied","Data":"7f36e2644c9ea5c51ab3e9123e0e7cda48787dc3657bb765d7694c2a2381bf11"} Jan 22 05:52:04 crc kubenswrapper[4933]: I0122 05:52:04.374690 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kvffj" event={"ID":"ed5c6166-9794-440e-9f36-6ae46897815b","Type":"ContainerStarted","Data":"470c607017c16a8ea6be011226a9a3f4acab3d295cbb82b196b0b04860ce158f"} Jan 22 05:52:05 crc kubenswrapper[4933]: I0122 05:52:05.381580 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5jcpg" event={"ID":"65b68271-f234-4a74-aa04-b5113c8c1d89","Type":"ContainerStarted","Data":"0ecd03ff35664d2738550090ec4f0c54ca8e5e200228b42780fa290074a2aa51"} Jan 22 05:52:05 crc kubenswrapper[4933]: I0122 05:52:05.384914 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m4tv2" event={"ID":"59c8d87a-51df-446d-8e17-197464398b18","Type":"ContainerStarted","Data":"561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246"} Jan 22 05:52:05 crc kubenswrapper[4933]: I0122 05:52:05.405260 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5jcpg" podStartSLOduration=3.611352057 podStartE2EDuration="5.405240327s" podCreationTimestamp="2026-01-22 05:52:00 +0000 UTC" firstStartedPulling="2026-01-22 05:52:02.352875951 +0000 UTC m=+370.190001304" lastFinishedPulling="2026-01-22 05:52:04.146764221 +0000 UTC m=+371.983889574" observedRunningTime="2026-01-22 05:52:05.404566472 +0000 UTC m=+373.241691835" watchObservedRunningTime="2026-01-22 05:52:05.405240327 +0000 UTC m=+373.242365690" Jan 22 05:52:05 crc kubenswrapper[4933]: I0122 05:52:05.426271 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-m4tv2" podStartSLOduration=3.693027673 podStartE2EDuration="5.426258444s" podCreationTimestamp="2026-01-22 05:52:00 +0000 UTC" firstStartedPulling="2026-01-22 05:52:02.352906572 +0000 UTC m=+370.190031925" lastFinishedPulling="2026-01-22 05:52:04.086137343 +0000 UTC m=+371.923262696" observedRunningTime="2026-01-22 05:52:05.422835837 +0000 UTC m=+373.259961190" watchObservedRunningTime="2026-01-22 05:52:05.426258444 +0000 UTC m=+373.263383797" Jan 22 05:52:06 crc kubenswrapper[4933]: I0122 05:52:06.396021 4933 generic.go:334] "Generic (PLEG): container finished" podID="4902f901-8853-4071-bc3a-0a5b32065bf3" containerID="f4d2f4e06e9cf9f866301d688cd000eecc52bb7254a824ad0eff452307f140cf" exitCode=0 Jan 22 05:52:06 crc kubenswrapper[4933]: I0122 05:52:06.396098 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fcs2s" event={"ID":"4902f901-8853-4071-bc3a-0a5b32065bf3","Type":"ContainerDied","Data":"f4d2f4e06e9cf9f866301d688cd000eecc52bb7254a824ad0eff452307f140cf"} Jan 22 05:52:06 crc kubenswrapper[4933]: I0122 05:52:06.400101 4933 generic.go:334] "Generic (PLEG): container finished" podID="ed5c6166-9794-440e-9f36-6ae46897815b" containerID="1607c1a673616a4045f0c7dc4a0cea67766e071a76574e4f5a62756e36f58c7e" exitCode=0 Jan 22 05:52:06 crc kubenswrapper[4933]: I0122 05:52:06.400264 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kvffj" event={"ID":"ed5c6166-9794-440e-9f36-6ae46897815b","Type":"ContainerDied","Data":"1607c1a673616a4045f0c7dc4a0cea67766e071a76574e4f5a62756e36f58c7e"} Jan 22 05:52:07 crc kubenswrapper[4933]: I0122 05:52:07.407676 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fcs2s" event={"ID":"4902f901-8853-4071-bc3a-0a5b32065bf3","Type":"ContainerStarted","Data":"774e95e4ff66b9e047fa3064cd9412c21cbe755ae22045fc5d063b681fcd2068"} Jan 22 05:52:07 crc kubenswrapper[4933]: I0122 05:52:07.409902 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kvffj" event={"ID":"ed5c6166-9794-440e-9f36-6ae46897815b","Type":"ContainerStarted","Data":"e2bf24a673e8af746e3a7fbf732b5ff3eff921de1a200a465b2882c779978f7a"} Jan 22 05:52:07 crc kubenswrapper[4933]: I0122 05:52:07.435034 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fcs2s" podStartSLOduration=1.9451312440000001 podStartE2EDuration="4.435007953s" podCreationTimestamp="2026-01-22 05:52:03 +0000 UTC" firstStartedPulling="2026-01-22 05:52:04.373140832 +0000 UTC m=+372.210266185" lastFinishedPulling="2026-01-22 05:52:06.863017541 +0000 UTC m=+374.700142894" observedRunningTime="2026-01-22 05:52:07.431634817 +0000 UTC m=+375.268760210" watchObservedRunningTime="2026-01-22 05:52:07.435007953 +0000 UTC m=+375.272133346" Jan 22 05:52:07 crc kubenswrapper[4933]: I0122 05:52:07.453354 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kvffj" podStartSLOduration=2.929121952 podStartE2EDuration="5.453304909s" podCreationTimestamp="2026-01-22 05:52:02 +0000 UTC" firstStartedPulling="2026-01-22 05:52:04.376151371 +0000 UTC m=+372.213276724" lastFinishedPulling="2026-01-22 05:52:06.900334328 +0000 UTC m=+374.737459681" observedRunningTime="2026-01-22 05:52:07.450841633 +0000 UTC m=+375.287967026" watchObservedRunningTime="2026-01-22 05:52:07.453304909 +0000 UTC m=+375.290430302" Jan 22 05:52:09 crc kubenswrapper[4933]: I0122 05:52:09.703837 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-tlm2t"] Jan 22 05:52:09 crc kubenswrapper[4933]: I0122 05:52:09.705325 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:09 crc kubenswrapper[4933]: I0122 05:52:09.725705 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-tlm2t"] Jan 22 05:52:09 crc kubenswrapper[4933]: I0122 05:52:09.898475 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-ca-trust-extracted\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:09 crc kubenswrapper[4933]: I0122 05:52:09.898549 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-trusted-ca\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:09 crc kubenswrapper[4933]: I0122 05:52:09.898581 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-registry-certificates\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:09 crc kubenswrapper[4933]: I0122 05:52:09.898626 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mh8mk\" (UniqueName: \"kubernetes.io/projected/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-kube-api-access-mh8mk\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:09 crc kubenswrapper[4933]: I0122 05:52:09.898806 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-registry-tls\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:09 crc kubenswrapper[4933]: I0122 05:52:09.898857 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-bound-sa-token\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:09 crc kubenswrapper[4933]: I0122 05:52:09.898926 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:09 crc kubenswrapper[4933]: I0122 05:52:09.898968 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-installation-pull-secrets\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:09 crc kubenswrapper[4933]: I0122 05:52:09.920889 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.000462 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-installation-pull-secrets\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.000558 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-ca-trust-extracted\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.000582 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-trusted-ca\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.000599 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-registry-certificates\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.000626 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mh8mk\" (UniqueName: \"kubernetes.io/projected/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-kube-api-access-mh8mk\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.000653 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-registry-tls\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.000679 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-bound-sa-token\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.001111 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-ca-trust-extracted\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.001944 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-registry-certificates\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.002754 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-trusted-ca\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.006506 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-installation-pull-secrets\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.006756 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-registry-tls\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.017294 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-bound-sa-token\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.025928 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mh8mk\" (UniqueName: \"kubernetes.io/projected/a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf-kube-api-access-mh8mk\") pod \"image-registry-66df7c8f76-tlm2t\" (UID: \"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf\") " pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.031607 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.443722 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-tlm2t"] Jan 22 05:52:10 crc kubenswrapper[4933]: W0122 05:52:10.452082 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda3cb3a87_6d94_471b_b5aa_c1f3121eeeaf.slice/crio-4b2a571d9a33a962ede89ac76cd00aa643caae06467babde940552f64a51adf7 WatchSource:0}: Error finding container 4b2a571d9a33a962ede89ac76cd00aa643caae06467babde940552f64a51adf7: Status 404 returned error can't find the container with id 4b2a571d9a33a962ede89ac76cd00aa643caae06467babde940552f64a51adf7 Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.574397 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-m4tv2" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.574451 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-m4tv2" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.637756 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-m4tv2" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.775302 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5jcpg" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.775360 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5jcpg" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.836260 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5jcpg" Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.942960 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:52:10 crc kubenswrapper[4933]: I0122 05:52:10.943038 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:52:11 crc kubenswrapper[4933]: I0122 05:52:11.441721 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" event={"ID":"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf","Type":"ContainerStarted","Data":"4b2a571d9a33a962ede89ac76cd00aa643caae06467babde940552f64a51adf7"} Jan 22 05:52:11 crc kubenswrapper[4933]: I0122 05:52:11.495555 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5jcpg" Jan 22 05:52:11 crc kubenswrapper[4933]: I0122 05:52:11.496919 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-m4tv2" Jan 22 05:52:12 crc kubenswrapper[4933]: I0122 05:52:12.451893 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" event={"ID":"a3cb3a87-6d94-471b-b5aa-c1f3121eeeaf","Type":"ContainerStarted","Data":"d501d2ba7c983bca615ac19417660dbfaee95dffeffea8e4ba5bd8466369dffd"} Jan 22 05:52:12 crc kubenswrapper[4933]: I0122 05:52:12.484376 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" podStartSLOduration=3.48434175 podStartE2EDuration="3.48434175s" podCreationTimestamp="2026-01-22 05:52:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:52:12.479753406 +0000 UTC m=+380.316878789" watchObservedRunningTime="2026-01-22 05:52:12.48434175 +0000 UTC m=+380.321467143" Jan 22 05:52:13 crc kubenswrapper[4933]: I0122 05:52:13.158934 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kvffj" Jan 22 05:52:13 crc kubenswrapper[4933]: I0122 05:52:13.158999 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kvffj" Jan 22 05:52:13 crc kubenswrapper[4933]: I0122 05:52:13.201330 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kvffj" Jan 22 05:52:13 crc kubenswrapper[4933]: I0122 05:52:13.376739 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fcs2s" Jan 22 05:52:13 crc kubenswrapper[4933]: I0122 05:52:13.376801 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fcs2s" Jan 22 05:52:13 crc kubenswrapper[4933]: I0122 05:52:13.456254 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:13 crc kubenswrapper[4933]: I0122 05:52:13.499336 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kvffj" Jan 22 05:52:14 crc kubenswrapper[4933]: I0122 05:52:14.413178 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fcs2s" podUID="4902f901-8853-4071-bc3a-0a5b32065bf3" containerName="registry-server" probeResult="failure" output=< Jan 22 05:52:14 crc kubenswrapper[4933]: timeout: failed to connect service ":50051" within 1s Jan 22 05:52:14 crc kubenswrapper[4933]: > Jan 22 05:52:23 crc kubenswrapper[4933]: I0122 05:52:23.412918 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fcs2s" Jan 22 05:52:23 crc kubenswrapper[4933]: I0122 05:52:23.466145 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fcs2s" Jan 22 05:52:30 crc kubenswrapper[4933]: I0122 05:52:30.037588 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-tlm2t" Jan 22 05:52:30 crc kubenswrapper[4933]: I0122 05:52:30.100833 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-4ptvd"] Jan 22 05:52:40 crc kubenswrapper[4933]: I0122 05:52:40.943783 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:52:40 crc kubenswrapper[4933]: I0122 05:52:40.944257 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.145118 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" podUID="26b5f8af-bb33-40cc-8ef7-03b0c931896c" containerName="registry" containerID="cri-o://e401e638793baaebb51ebc3a0056b236cf22b5ad4d2202f61b825f30983bdce5" gracePeriod=30 Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.520270 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.584989 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/26b5f8af-bb33-40cc-8ef7-03b0c931896c-trusted-ca\") pod \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.585299 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.585339 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4gsr5\" (UniqueName: \"kubernetes.io/projected/26b5f8af-bb33-40cc-8ef7-03b0c931896c-kube-api-access-4gsr5\") pod \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.585369 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/26b5f8af-bb33-40cc-8ef7-03b0c931896c-installation-pull-secrets\") pod \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.585388 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/26b5f8af-bb33-40cc-8ef7-03b0c931896c-registry-tls\") pod \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.585437 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/26b5f8af-bb33-40cc-8ef7-03b0c931896c-bound-sa-token\") pod \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.585467 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/26b5f8af-bb33-40cc-8ef7-03b0c931896c-ca-trust-extracted\") pod \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.585504 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/26b5f8af-bb33-40cc-8ef7-03b0c931896c-registry-certificates\") pod \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\" (UID: \"26b5f8af-bb33-40cc-8ef7-03b0c931896c\") " Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.585859 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26b5f8af-bb33-40cc-8ef7-03b0c931896c-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "26b5f8af-bb33-40cc-8ef7-03b0c931896c" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.587041 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26b5f8af-bb33-40cc-8ef7-03b0c931896c-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "26b5f8af-bb33-40cc-8ef7-03b0c931896c" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.590571 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26b5f8af-bb33-40cc-8ef7-03b0c931896c-kube-api-access-4gsr5" (OuterVolumeSpecName: "kube-api-access-4gsr5") pod "26b5f8af-bb33-40cc-8ef7-03b0c931896c" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c"). InnerVolumeSpecName "kube-api-access-4gsr5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.591594 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26b5f8af-bb33-40cc-8ef7-03b0c931896c-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "26b5f8af-bb33-40cc-8ef7-03b0c931896c" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.591901 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/26b5f8af-bb33-40cc-8ef7-03b0c931896c-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "26b5f8af-bb33-40cc-8ef7-03b0c931896c" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.593148 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26b5f8af-bb33-40cc-8ef7-03b0c931896c-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "26b5f8af-bb33-40cc-8ef7-03b0c931896c" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.606628 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "26b5f8af-bb33-40cc-8ef7-03b0c931896c" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.608130 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26b5f8af-bb33-40cc-8ef7-03b0c931896c-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "26b5f8af-bb33-40cc-8ef7-03b0c931896c" (UID: "26b5f8af-bb33-40cc-8ef7-03b0c931896c"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.686959 4933 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/26b5f8af-bb33-40cc-8ef7-03b0c931896c-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.686998 4933 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/26b5f8af-bb33-40cc-8ef7-03b0c931896c-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.687009 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4gsr5\" (UniqueName: \"kubernetes.io/projected/26b5f8af-bb33-40cc-8ef7-03b0c931896c-kube-api-access-4gsr5\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.687017 4933 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/26b5f8af-bb33-40cc-8ef7-03b0c931896c-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.687026 4933 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/26b5f8af-bb33-40cc-8ef7-03b0c931896c-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.687034 4933 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/26b5f8af-bb33-40cc-8ef7-03b0c931896c-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.687041 4933 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/26b5f8af-bb33-40cc-8ef7-03b0c931896c-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.713518 4933 generic.go:334] "Generic (PLEG): container finished" podID="26b5f8af-bb33-40cc-8ef7-03b0c931896c" containerID="e401e638793baaebb51ebc3a0056b236cf22b5ad4d2202f61b825f30983bdce5" exitCode=0 Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.713585 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.713588 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" event={"ID":"26b5f8af-bb33-40cc-8ef7-03b0c931896c","Type":"ContainerDied","Data":"e401e638793baaebb51ebc3a0056b236cf22b5ad4d2202f61b825f30983bdce5"} Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.713642 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-4ptvd" event={"ID":"26b5f8af-bb33-40cc-8ef7-03b0c931896c","Type":"ContainerDied","Data":"a6579b8b789ed946ec8b920e6700606151f70f6e0acea9805509b5caf9587243"} Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.713681 4933 scope.go:117] "RemoveContainer" containerID="e401e638793baaebb51ebc3a0056b236cf22b5ad4d2202f61b825f30983bdce5" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.733688 4933 scope.go:117] "RemoveContainer" containerID="e401e638793baaebb51ebc3a0056b236cf22b5ad4d2202f61b825f30983bdce5" Jan 22 05:52:55 crc kubenswrapper[4933]: E0122 05:52:55.734207 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e401e638793baaebb51ebc3a0056b236cf22b5ad4d2202f61b825f30983bdce5\": container with ID starting with e401e638793baaebb51ebc3a0056b236cf22b5ad4d2202f61b825f30983bdce5 not found: ID does not exist" containerID="e401e638793baaebb51ebc3a0056b236cf22b5ad4d2202f61b825f30983bdce5" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.734264 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e401e638793baaebb51ebc3a0056b236cf22b5ad4d2202f61b825f30983bdce5"} err="failed to get container status \"e401e638793baaebb51ebc3a0056b236cf22b5ad4d2202f61b825f30983bdce5\": rpc error: code = NotFound desc = could not find container \"e401e638793baaebb51ebc3a0056b236cf22b5ad4d2202f61b825f30983bdce5\": container with ID starting with e401e638793baaebb51ebc3a0056b236cf22b5ad4d2202f61b825f30983bdce5 not found: ID does not exist" Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.748614 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-4ptvd"] Jan 22 05:52:55 crc kubenswrapper[4933]: I0122 05:52:55.754146 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-4ptvd"] Jan 22 05:52:56 crc kubenswrapper[4933]: I0122 05:52:56.501633 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26b5f8af-bb33-40cc-8ef7-03b0c931896c" path="/var/lib/kubelet/pods/26b5f8af-bb33-40cc-8ef7-03b0c931896c/volumes" Jan 22 05:53:10 crc kubenswrapper[4933]: I0122 05:53:10.942700 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:53:10 crc kubenswrapper[4933]: I0122 05:53:10.943259 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:53:10 crc kubenswrapper[4933]: I0122 05:53:10.943303 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 05:53:10 crc kubenswrapper[4933]: I0122 05:53:10.943874 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f2ae10bf6f7d37b27e86b0e464e488c6851c87577378f41c92c7954310583b11"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 05:53:10 crc kubenswrapper[4933]: I0122 05:53:10.943929 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://f2ae10bf6f7d37b27e86b0e464e488c6851c87577378f41c92c7954310583b11" gracePeriod=600 Jan 22 05:53:11 crc kubenswrapper[4933]: I0122 05:53:11.822113 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="f2ae10bf6f7d37b27e86b0e464e488c6851c87577378f41c92c7954310583b11" exitCode=0 Jan 22 05:53:11 crc kubenswrapper[4933]: I0122 05:53:11.822245 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"f2ae10bf6f7d37b27e86b0e464e488c6851c87577378f41c92c7954310583b11"} Jan 22 05:53:11 crc kubenswrapper[4933]: I0122 05:53:11.822818 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"2580ee867ae51438006c1c5f9c6959c2f36e5ea5bae190d4909ac0a463e77f1e"} Jan 22 05:53:11 crc kubenswrapper[4933]: I0122 05:53:11.822854 4933 scope.go:117] "RemoveContainer" containerID="99fe3d10aa7ee2f1de1f5f23ed8c4c7ac9efe69ce9e366f02e2e8efca2c017e0" Jan 22 05:55:40 crc kubenswrapper[4933]: I0122 05:55:40.943297 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:55:40 crc kubenswrapper[4933]: I0122 05:55:40.943947 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:56:10 crc kubenswrapper[4933]: I0122 05:56:10.942818 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:56:10 crc kubenswrapper[4933]: I0122 05:56:10.943452 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:56:40 crc kubenswrapper[4933]: I0122 05:56:40.943447 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:56:40 crc kubenswrapper[4933]: I0122 05:56:40.944059 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:56:40 crc kubenswrapper[4933]: I0122 05:56:40.944178 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 05:56:40 crc kubenswrapper[4933]: I0122 05:56:40.945029 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2580ee867ae51438006c1c5f9c6959c2f36e5ea5bae190d4909ac0a463e77f1e"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 05:56:40 crc kubenswrapper[4933]: I0122 05:56:40.945145 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://2580ee867ae51438006c1c5f9c6959c2f36e5ea5bae190d4909ac0a463e77f1e" gracePeriod=600 Jan 22 05:56:41 crc kubenswrapper[4933]: I0122 05:56:41.750686 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="2580ee867ae51438006c1c5f9c6959c2f36e5ea5bae190d4909ac0a463e77f1e" exitCode=0 Jan 22 05:56:41 crc kubenswrapper[4933]: I0122 05:56:41.750777 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"2580ee867ae51438006c1c5f9c6959c2f36e5ea5bae190d4909ac0a463e77f1e"} Jan 22 05:56:41 crc kubenswrapper[4933]: I0122 05:56:41.751423 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"828e74f34c881560f38ae267428103e3c4e3e91319d786584ec0777e00c67304"} Jan 22 05:56:41 crc kubenswrapper[4933]: I0122 05:56:41.751458 4933 scope.go:117] "RemoveContainer" containerID="f2ae10bf6f7d37b27e86b0e464e488c6851c87577378f41c92c7954310583b11" Jan 22 05:56:52 crc kubenswrapper[4933]: I0122 05:56:52.753052 4933 scope.go:117] "RemoveContainer" containerID="7376e01f4d920fb2398c39cb3f4b003c733a2368ef4cfb146d74b822af081bc8" Jan 22 05:56:52 crc kubenswrapper[4933]: I0122 05:56:52.778717 4933 scope.go:117] "RemoveContainer" containerID="3d89c0c6fdb787cbd092aac13876e43dc86babfda2df94ada483418ad8ef32a1" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.207034 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-z88sj"] Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.208060 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovn-controller" containerID="cri-o://a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1" gracePeriod=30 Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.208132 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="nbdb" containerID="cri-o://8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0" gracePeriod=30 Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.208192 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="northd" containerID="cri-o://c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15" gracePeriod=30 Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.208195 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac" gracePeriod=30 Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.208222 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="sbdb" containerID="cri-o://e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed" gracePeriod=30 Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.208261 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="kube-rbac-proxy-node" containerID="cri-o://a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68" gracePeriod=30 Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.208274 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovn-acl-logging" containerID="cri-o://c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741" gracePeriod=30 Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.289096 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovnkube-controller" containerID="cri-o://5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6" gracePeriod=30 Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.568247 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovnkube-controller/3.log" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.570998 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovn-acl-logging/0.log" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.571513 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovn-controller/0.log" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.572051 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578325 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-var-lib-openvswitch\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578369 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-run-netns\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578398 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s7lwp\" (UniqueName: \"kubernetes.io/projected/6a721333-1932-4bb0-b384-c034492e59c4-kube-api-access-s7lwp\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578458 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578478 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6a721333-1932-4bb0-b384-c034492e59c4-env-overrides\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578555 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-log-socket\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578582 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6a721333-1932-4bb0-b384-c034492e59c4-ovn-node-metrics-cert\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578471 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578609 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6a721333-1932-4bb0-b384-c034492e59c4-ovnkube-script-lib\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578625 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-etc-openvswitch\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578641 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-log-socket" (OuterVolumeSpecName: "log-socket") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578655 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-systemd-units\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578671 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-kubelet\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578686 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-node-log\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578703 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-cni-netd\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578720 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-cni-bin\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578732 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-run-openvswitch\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578746 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-var-lib-cni-networks-ovn-kubernetes\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578771 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-run-ovn-kubernetes\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578790 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-run-systemd\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578803 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6a721333-1932-4bb0-b384-c034492e59c4-ovnkube-config\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578818 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-slash\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578833 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-run-ovn\") pod \"6a721333-1932-4bb0-b384-c034492e59c4\" (UID: \"6a721333-1932-4bb0-b384-c034492e59c4\") " Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.578969 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a721333-1932-4bb0-b384-c034492e59c4-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.579001 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.579024 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.579043 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.579061 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-node-log" (OuterVolumeSpecName: "node-log") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.579067 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.579099 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.579119 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.579144 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.579158 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-slash" (OuterVolumeSpecName: "host-slash") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.579276 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.579325 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.579594 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a721333-1932-4bb0-b384-c034492e59c4-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.580198 4933 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.580219 4933 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.580231 4933 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.580241 4933 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-log-socket\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.580254 4933 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6a721333-1932-4bb0-b384-c034492e59c4-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.580266 4933 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.580277 4933 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.580288 4933 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.580298 4933 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-node-log\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.580309 4933 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.580319 4933 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.580330 4933 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.580341 4933 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.580353 4933 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.580364 4933 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6a721333-1932-4bb0-b384-c034492e59c4-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.580376 4933 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-host-slash\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.580929 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a721333-1932-4bb0-b384-c034492e59c4-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.583585 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a721333-1932-4bb0-b384-c034492e59c4-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.584446 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a721333-1932-4bb0-b384-c034492e59c4-kube-api-access-s7lwp" (OuterVolumeSpecName: "kube-api-access-s7lwp") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "kube-api-access-s7lwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.591328 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "6a721333-1932-4bb0-b384-c034492e59c4" (UID: "6a721333-1932-4bb0-b384-c034492e59c4"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.626504 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-sptqh"] Jan 22 05:57:13 crc kubenswrapper[4933]: E0122 05:57:13.626765 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovnkube-controller" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.626783 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovnkube-controller" Jan 22 05:57:13 crc kubenswrapper[4933]: E0122 05:57:13.626793 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="sbdb" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.626802 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="sbdb" Jan 22 05:57:13 crc kubenswrapper[4933]: E0122 05:57:13.626814 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovnkube-controller" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.626821 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovnkube-controller" Jan 22 05:57:13 crc kubenswrapper[4933]: E0122 05:57:13.626832 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="kube-rbac-proxy-node" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.626839 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="kube-rbac-proxy-node" Jan 22 05:57:13 crc kubenswrapper[4933]: E0122 05:57:13.626850 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovnkube-controller" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.626856 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovnkube-controller" Jan 22 05:57:13 crc kubenswrapper[4933]: E0122 05:57:13.626866 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovn-controller" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.626873 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovn-controller" Jan 22 05:57:13 crc kubenswrapper[4933]: E0122 05:57:13.626884 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="kubecfg-setup" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.626892 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="kubecfg-setup" Jan 22 05:57:13 crc kubenswrapper[4933]: E0122 05:57:13.626901 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovnkube-controller" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.626910 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovnkube-controller" Jan 22 05:57:13 crc kubenswrapper[4933]: E0122 05:57:13.626920 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovn-acl-logging" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.626929 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovn-acl-logging" Jan 22 05:57:13 crc kubenswrapper[4933]: E0122 05:57:13.626939 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26b5f8af-bb33-40cc-8ef7-03b0c931896c" containerName="registry" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.626946 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="26b5f8af-bb33-40cc-8ef7-03b0c931896c" containerName="registry" Jan 22 05:57:13 crc kubenswrapper[4933]: E0122 05:57:13.626957 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="nbdb" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.626963 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="nbdb" Jan 22 05:57:13 crc kubenswrapper[4933]: E0122 05:57:13.626978 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="northd" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.626986 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="northd" Jan 22 05:57:13 crc kubenswrapper[4933]: E0122 05:57:13.626996 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="kube-rbac-proxy-ovn-metrics" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.627002 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="kube-rbac-proxy-ovn-metrics" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.627114 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovnkube-controller" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.627126 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovnkube-controller" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.627134 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovnkube-controller" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.627143 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="sbdb" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.627153 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="northd" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.627161 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovn-acl-logging" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.627172 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="26b5f8af-bb33-40cc-8ef7-03b0c931896c" containerName="registry" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.627182 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="kube-rbac-proxy-ovn-metrics" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.627191 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="nbdb" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.627199 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovn-controller" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.627208 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="kube-rbac-proxy-node" Jan 22 05:57:13 crc kubenswrapper[4933]: E0122 05:57:13.627312 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovnkube-controller" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.627322 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovnkube-controller" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.627426 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovnkube-controller" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.627437 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a721333-1932-4bb0-b384-c034492e59c4" containerName="ovnkube-controller" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.628982 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.681726 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-env-overrides\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.681788 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.681852 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-run-openvswitch\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.681877 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-cni-netd\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.681893 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-log-socket\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.681912 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-ovnkube-script-lib\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.681927 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-run-netns\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.681941 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-ovn-node-metrics-cert\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.681958 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-var-lib-openvswitch\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.681974 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkm6m\" (UniqueName: \"kubernetes.io/projected/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-kube-api-access-dkm6m\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.681995 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-etc-openvswitch\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.682017 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-cni-bin\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.682039 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-systemd-units\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.682056 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-node-log\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.682094 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-kubelet\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.682131 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-ovnkube-config\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.682150 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-run-ovn\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.682165 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-run-ovn-kubernetes\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.682187 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-slash\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.682202 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-run-systemd\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.682235 4933 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6a721333-1932-4bb0-b384-c034492e59c4-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.682246 4933 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6a721333-1932-4bb0-b384-c034492e59c4-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.682255 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s7lwp\" (UniqueName: \"kubernetes.io/projected/6a721333-1932-4bb0-b384-c034492e59c4-kube-api-access-s7lwp\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.682264 4933 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6a721333-1932-4bb0-b384-c034492e59c4-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.783325 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-ovnkube-config\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.783404 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-run-ovn\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.783458 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-run-ovn-kubernetes\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.783526 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-slash\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.783560 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-run-systemd\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.783595 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-slash\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.783640 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-env-overrides\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.783661 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-run-systemd\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.783558 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-run-ovn-kubernetes\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.783711 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.783825 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-run-openvswitch\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.783871 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-log-socket\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.783939 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-cni-netd\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784011 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-ovnkube-script-lib\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784045 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-run-netns\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784127 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-ovn-node-metrics-cert\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784162 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-var-lib-openvswitch\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784234 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkm6m\" (UniqueName: \"kubernetes.io/projected/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-kube-api-access-dkm6m\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784311 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-etc-openvswitch\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784341 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-cni-netd\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784386 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-cni-bin\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784428 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784435 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-systemd-units\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784487 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-node-log\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784506 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-systemd-units\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784520 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-kubelet\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784509 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-run-openvswitch\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784541 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-node-log\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784562 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-log-socket\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784586 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-run-netns\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784582 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-env-overrides\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.783739 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-run-ovn\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784645 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-var-lib-openvswitch\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784690 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-kubelet\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784719 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-etc-openvswitch\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784810 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-host-cni-bin\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.784966 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-ovnkube-config\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.785724 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-ovnkube-script-lib\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.790856 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-ovn-node-metrics-cert\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.814281 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkm6m\" (UniqueName: \"kubernetes.io/projected/bffcdf36-90a4-4daf-a9d4-17bbb65626f7-kube-api-access-dkm6m\") pod \"ovnkube-node-sptqh\" (UID: \"bffcdf36-90a4-4daf-a9d4-17bbb65626f7\") " pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.952322 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.972265 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jr6rw_f066dd84-0cd5-4e8c-8411-cf12cc83ea7d/kube-multus/2.log" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.972816 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jr6rw_f066dd84-0cd5-4e8c-8411-cf12cc83ea7d/kube-multus/1.log" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.972952 4933 generic.go:334] "Generic (PLEG): container finished" podID="f066dd84-0cd5-4e8c-8411-cf12cc83ea7d" containerID="554f6ede0a925394463d392b48700a7c5dcd211ebffe4e3dc51046123872907f" exitCode=2 Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.973186 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jr6rw" event={"ID":"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d","Type":"ContainerDied","Data":"554f6ede0a925394463d392b48700a7c5dcd211ebffe4e3dc51046123872907f"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.973388 4933 scope.go:117] "RemoveContainer" containerID="68112ca379a4d93e242173f33fb845379f58d92e02019847ede853e7a61df83f" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.973654 4933 scope.go:117] "RemoveContainer" containerID="554f6ede0a925394463d392b48700a7c5dcd211ebffe4e3dc51046123872907f" Jan 22 05:57:13 crc kubenswrapper[4933]: E0122 05:57:13.973830 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-jr6rw_openshift-multus(f066dd84-0cd5-4e8c-8411-cf12cc83ea7d)\"" pod="openshift-multus/multus-jr6rw" podUID="f066dd84-0cd5-4e8c-8411-cf12cc83ea7d" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.976970 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovnkube-controller/3.log" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.984205 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovn-acl-logging/0.log" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.987903 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z88sj_6a721333-1932-4bb0-b384-c034492e59c4/ovn-controller/0.log" Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.991913 4933 generic.go:334] "Generic (PLEG): container finished" podID="6a721333-1932-4bb0-b384-c034492e59c4" containerID="5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6" exitCode=0 Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.991984 4933 generic.go:334] "Generic (PLEG): container finished" podID="6a721333-1932-4bb0-b384-c034492e59c4" containerID="e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed" exitCode=0 Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.991996 4933 generic.go:334] "Generic (PLEG): container finished" podID="6a721333-1932-4bb0-b384-c034492e59c4" containerID="8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0" exitCode=0 Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992005 4933 generic.go:334] "Generic (PLEG): container finished" podID="6a721333-1932-4bb0-b384-c034492e59c4" containerID="c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15" exitCode=0 Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992012 4933 generic.go:334] "Generic (PLEG): container finished" podID="6a721333-1932-4bb0-b384-c034492e59c4" containerID="39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac" exitCode=0 Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992019 4933 generic.go:334] "Generic (PLEG): container finished" podID="6a721333-1932-4bb0-b384-c034492e59c4" containerID="a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68" exitCode=0 Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992060 4933 generic.go:334] "Generic (PLEG): container finished" podID="6a721333-1932-4bb0-b384-c034492e59c4" containerID="c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741" exitCode=143 Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992139 4933 generic.go:334] "Generic (PLEG): container finished" podID="6a721333-1932-4bb0-b384-c034492e59c4" containerID="a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1" exitCode=143 Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992167 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerDied","Data":"5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992236 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerDied","Data":"e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992256 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerDied","Data":"8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992269 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerDied","Data":"c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992318 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerDied","Data":"39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992333 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerDied","Data":"a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992346 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992381 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992390 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992396 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992403 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992409 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992415 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992454 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992465 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992472 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992507 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerDied","Data":"c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992523 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992532 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992539 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992546 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992553 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992562 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992569 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992575 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992582 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992588 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992598 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerDied","Data":"a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992609 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992619 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992626 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992633 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992640 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992647 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992681 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992689 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992696 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992702 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992713 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" event={"ID":"6a721333-1932-4bb0-b384-c034492e59c4","Type":"ContainerDied","Data":"36713c23b47eada3b139ae8f72ead2f7d6fb6f847054c31344d8b18e868400f9"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992725 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992733 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992740 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992746 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992753 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992760 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992766 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992773 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992780 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992787 4933 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d"} Jan 22 05:57:13 crc kubenswrapper[4933]: I0122 05:57:13.992907 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-z88sj" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.012891 4933 scope.go:117] "RemoveContainer" containerID="5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.030800 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-z88sj"] Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.032353 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-z88sj"] Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.041265 4933 scope.go:117] "RemoveContainer" containerID="2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.065718 4933 scope.go:117] "RemoveContainer" containerID="e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.095378 4933 scope.go:117] "RemoveContainer" containerID="8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.115319 4933 scope.go:117] "RemoveContainer" containerID="c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.129461 4933 scope.go:117] "RemoveContainer" containerID="39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.203473 4933 scope.go:117] "RemoveContainer" containerID="a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.225501 4933 scope.go:117] "RemoveContainer" containerID="c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.238214 4933 scope.go:117] "RemoveContainer" containerID="a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.262561 4933 scope.go:117] "RemoveContainer" containerID="e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.277015 4933 scope.go:117] "RemoveContainer" containerID="5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6" Jan 22 05:57:14 crc kubenswrapper[4933]: E0122 05:57:14.277512 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6\": container with ID starting with 5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6 not found: ID does not exist" containerID="5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.277558 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6"} err="failed to get container status \"5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6\": rpc error: code = NotFound desc = could not find container \"5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6\": container with ID starting with 5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.277600 4933 scope.go:117] "RemoveContainer" containerID="2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c" Jan 22 05:57:14 crc kubenswrapper[4933]: E0122 05:57:14.278250 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c\": container with ID starting with 2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c not found: ID does not exist" containerID="2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.278345 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c"} err="failed to get container status \"2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c\": rpc error: code = NotFound desc = could not find container \"2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c\": container with ID starting with 2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.278398 4933 scope.go:117] "RemoveContainer" containerID="e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed" Jan 22 05:57:14 crc kubenswrapper[4933]: E0122 05:57:14.278782 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\": container with ID starting with e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed not found: ID does not exist" containerID="e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.278966 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed"} err="failed to get container status \"e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\": rpc error: code = NotFound desc = could not find container \"e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\": container with ID starting with e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.279138 4933 scope.go:117] "RemoveContainer" containerID="8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0" Jan 22 05:57:14 crc kubenswrapper[4933]: E0122 05:57:14.279548 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\": container with ID starting with 8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0 not found: ID does not exist" containerID="8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.279574 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0"} err="failed to get container status \"8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\": rpc error: code = NotFound desc = could not find container \"8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\": container with ID starting with 8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.279590 4933 scope.go:117] "RemoveContainer" containerID="c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15" Jan 22 05:57:14 crc kubenswrapper[4933]: E0122 05:57:14.280115 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\": container with ID starting with c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15 not found: ID does not exist" containerID="c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.280169 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15"} err="failed to get container status \"c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\": rpc error: code = NotFound desc = could not find container \"c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\": container with ID starting with c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.280190 4933 scope.go:117] "RemoveContainer" containerID="39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac" Jan 22 05:57:14 crc kubenswrapper[4933]: E0122 05:57:14.280539 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\": container with ID starting with 39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac not found: ID does not exist" containerID="39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.280566 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac"} err="failed to get container status \"39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\": rpc error: code = NotFound desc = could not find container \"39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\": container with ID starting with 39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.280581 4933 scope.go:117] "RemoveContainer" containerID="a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68" Jan 22 05:57:14 crc kubenswrapper[4933]: E0122 05:57:14.280937 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\": container with ID starting with a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68 not found: ID does not exist" containerID="a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.280957 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68"} err="failed to get container status \"a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\": rpc error: code = NotFound desc = could not find container \"a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\": container with ID starting with a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.280969 4933 scope.go:117] "RemoveContainer" containerID="c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741" Jan 22 05:57:14 crc kubenswrapper[4933]: E0122 05:57:14.281309 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\": container with ID starting with c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741 not found: ID does not exist" containerID="c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.281441 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741"} err="failed to get container status \"c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\": rpc error: code = NotFound desc = could not find container \"c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\": container with ID starting with c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.281563 4933 scope.go:117] "RemoveContainer" containerID="a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1" Jan 22 05:57:14 crc kubenswrapper[4933]: E0122 05:57:14.282006 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\": container with ID starting with a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1 not found: ID does not exist" containerID="a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.282027 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1"} err="failed to get container status \"a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\": rpc error: code = NotFound desc = could not find container \"a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\": container with ID starting with a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.282040 4933 scope.go:117] "RemoveContainer" containerID="e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d" Jan 22 05:57:14 crc kubenswrapper[4933]: E0122 05:57:14.282311 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\": container with ID starting with e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d not found: ID does not exist" containerID="e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.282347 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d"} err="failed to get container status \"e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\": rpc error: code = NotFound desc = could not find container \"e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\": container with ID starting with e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.282391 4933 scope.go:117] "RemoveContainer" containerID="5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.282776 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6"} err="failed to get container status \"5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6\": rpc error: code = NotFound desc = could not find container \"5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6\": container with ID starting with 5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.282798 4933 scope.go:117] "RemoveContainer" containerID="2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.283087 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c"} err="failed to get container status \"2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c\": rpc error: code = NotFound desc = could not find container \"2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c\": container with ID starting with 2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.283116 4933 scope.go:117] "RemoveContainer" containerID="e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.283501 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed"} err="failed to get container status \"e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\": rpc error: code = NotFound desc = could not find container \"e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\": container with ID starting with e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.283568 4933 scope.go:117] "RemoveContainer" containerID="8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.283841 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0"} err="failed to get container status \"8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\": rpc error: code = NotFound desc = could not find container \"8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\": container with ID starting with 8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.283860 4933 scope.go:117] "RemoveContainer" containerID="c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.284769 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15"} err="failed to get container status \"c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\": rpc error: code = NotFound desc = could not find container \"c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\": container with ID starting with c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.284822 4933 scope.go:117] "RemoveContainer" containerID="39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.285226 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac"} err="failed to get container status \"39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\": rpc error: code = NotFound desc = could not find container \"39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\": container with ID starting with 39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.285245 4933 scope.go:117] "RemoveContainer" containerID="a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.285524 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68"} err="failed to get container status \"a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\": rpc error: code = NotFound desc = could not find container \"a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\": container with ID starting with a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.285544 4933 scope.go:117] "RemoveContainer" containerID="c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.285818 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741"} err="failed to get container status \"c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\": rpc error: code = NotFound desc = could not find container \"c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\": container with ID starting with c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.285867 4933 scope.go:117] "RemoveContainer" containerID="a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.286289 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1"} err="failed to get container status \"a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\": rpc error: code = NotFound desc = could not find container \"a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\": container with ID starting with a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.286457 4933 scope.go:117] "RemoveContainer" containerID="e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.286843 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d"} err="failed to get container status \"e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\": rpc error: code = NotFound desc = could not find container \"e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\": container with ID starting with e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.286886 4933 scope.go:117] "RemoveContainer" containerID="5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.287194 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6"} err="failed to get container status \"5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6\": rpc error: code = NotFound desc = could not find container \"5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6\": container with ID starting with 5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.287213 4933 scope.go:117] "RemoveContainer" containerID="2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.287597 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c"} err="failed to get container status \"2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c\": rpc error: code = NotFound desc = could not find container \"2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c\": container with ID starting with 2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.287614 4933 scope.go:117] "RemoveContainer" containerID="e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.287895 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed"} err="failed to get container status \"e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\": rpc error: code = NotFound desc = could not find container \"e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\": container with ID starting with e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.288046 4933 scope.go:117] "RemoveContainer" containerID="8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.288420 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0"} err="failed to get container status \"8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\": rpc error: code = NotFound desc = could not find container \"8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\": container with ID starting with 8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.288439 4933 scope.go:117] "RemoveContainer" containerID="c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.288640 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15"} err="failed to get container status \"c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\": rpc error: code = NotFound desc = could not find container \"c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\": container with ID starting with c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.288665 4933 scope.go:117] "RemoveContainer" containerID="39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.289059 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac"} err="failed to get container status \"39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\": rpc error: code = NotFound desc = could not find container \"39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\": container with ID starting with 39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.289088 4933 scope.go:117] "RemoveContainer" containerID="a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.289443 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68"} err="failed to get container status \"a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\": rpc error: code = NotFound desc = could not find container \"a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\": container with ID starting with a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.289598 4933 scope.go:117] "RemoveContainer" containerID="c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.289982 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741"} err="failed to get container status \"c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\": rpc error: code = NotFound desc = could not find container \"c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\": container with ID starting with c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.290010 4933 scope.go:117] "RemoveContainer" containerID="a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.290283 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1"} err="failed to get container status \"a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\": rpc error: code = NotFound desc = could not find container \"a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\": container with ID starting with a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.290303 4933 scope.go:117] "RemoveContainer" containerID="e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.290722 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d"} err="failed to get container status \"e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\": rpc error: code = NotFound desc = could not find container \"e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\": container with ID starting with e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.290737 4933 scope.go:117] "RemoveContainer" containerID="5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.290997 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6"} err="failed to get container status \"5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6\": rpc error: code = NotFound desc = could not find container \"5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6\": container with ID starting with 5757e900b218b813cdd63de12317a7cff0730380dd4c56139a108acd13a145d6 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.291009 4933 scope.go:117] "RemoveContainer" containerID="2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.291262 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c"} err="failed to get container status \"2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c\": rpc error: code = NotFound desc = could not find container \"2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c\": container with ID starting with 2344ed0bcca60932e6bdea03fbc86ae7bd867dd841e5bb1bd3544dac4543329c not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.291284 4933 scope.go:117] "RemoveContainer" containerID="e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.291689 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed"} err="failed to get container status \"e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\": rpc error: code = NotFound desc = could not find container \"e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed\": container with ID starting with e77ae579b0652dd213d6a4366212d3e7495e31c5a63bff17ed1148addede53ed not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.291710 4933 scope.go:117] "RemoveContainer" containerID="8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.292040 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0"} err="failed to get container status \"8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\": rpc error: code = NotFound desc = could not find container \"8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0\": container with ID starting with 8644001192ec4cafae7762666ac5971e5b76fe8f1f85c3dcb2c991295a2ffbe0 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.292059 4933 scope.go:117] "RemoveContainer" containerID="c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.292406 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15"} err="failed to get container status \"c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\": rpc error: code = NotFound desc = could not find container \"c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15\": container with ID starting with c2882813c5b8715381132914a729304769c0ac504291c114e89e320ed94ded15 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.292549 4933 scope.go:117] "RemoveContainer" containerID="39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.292960 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac"} err="failed to get container status \"39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\": rpc error: code = NotFound desc = could not find container \"39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac\": container with ID starting with 39e1e11efdbb18fe7cff9c551e7278499a99e625516ccde81eef3bb5d6f0f4ac not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.292992 4933 scope.go:117] "RemoveContainer" containerID="a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.293289 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68"} err="failed to get container status \"a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\": rpc error: code = NotFound desc = could not find container \"a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68\": container with ID starting with a3b19f0953b716765252103c2846e4765b0f8b6b69831a6b371e630da2912e68 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.293311 4933 scope.go:117] "RemoveContainer" containerID="c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.293541 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741"} err="failed to get container status \"c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\": rpc error: code = NotFound desc = could not find container \"c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741\": container with ID starting with c8c23fb7641ed32f3e0913f9dc434ac34ae2222c3a039917e9c59281d3cd6741 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.293568 4933 scope.go:117] "RemoveContainer" containerID="a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.293819 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1"} err="failed to get container status \"a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\": rpc error: code = NotFound desc = could not find container \"a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1\": container with ID starting with a32109b88e1d090c214c08c4905b545ede36cc0b986a3ce0568c3612fe5cf4e1 not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.293840 4933 scope.go:117] "RemoveContainer" containerID="e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.294116 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d"} err="failed to get container status \"e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\": rpc error: code = NotFound desc = could not find container \"e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d\": container with ID starting with e08e1c397bdf8c98f04c006a838cb79ce19ff2ce1b4f5942b9b88b678b35aa5d not found: ID does not exist" Jan 22 05:57:14 crc kubenswrapper[4933]: I0122 05:57:14.515920 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a721333-1932-4bb0-b384-c034492e59c4" path="/var/lib/kubelet/pods/6a721333-1932-4bb0-b384-c034492e59c4/volumes" Jan 22 05:57:15 crc kubenswrapper[4933]: I0122 05:57:15.002284 4933 generic.go:334] "Generic (PLEG): container finished" podID="bffcdf36-90a4-4daf-a9d4-17bbb65626f7" containerID="f2d965fab41ea14b2119a457e9a2ce8c39f252929cb4b4ee7525382fedf10a9f" exitCode=0 Jan 22 05:57:15 crc kubenswrapper[4933]: I0122 05:57:15.002421 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" event={"ID":"bffcdf36-90a4-4daf-a9d4-17bbb65626f7","Type":"ContainerDied","Data":"f2d965fab41ea14b2119a457e9a2ce8c39f252929cb4b4ee7525382fedf10a9f"} Jan 22 05:57:15 crc kubenswrapper[4933]: I0122 05:57:15.002459 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" event={"ID":"bffcdf36-90a4-4daf-a9d4-17bbb65626f7","Type":"ContainerStarted","Data":"0b908191b7088b70b7b28462ab8271d076d7ce1cb15e068c201f4276d9ba1614"} Jan 22 05:57:15 crc kubenswrapper[4933]: I0122 05:57:15.005711 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jr6rw_f066dd84-0cd5-4e8c-8411-cf12cc83ea7d/kube-multus/2.log" Jan 22 05:57:16 crc kubenswrapper[4933]: I0122 05:57:16.015877 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" event={"ID":"bffcdf36-90a4-4daf-a9d4-17bbb65626f7","Type":"ContainerStarted","Data":"88457d156f7b4a95f7c66647437809e94113aa5e68dfe8d413801a15aec57535"} Jan 22 05:57:16 crc kubenswrapper[4933]: I0122 05:57:16.016385 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" event={"ID":"bffcdf36-90a4-4daf-a9d4-17bbb65626f7","Type":"ContainerStarted","Data":"add2372fd7c8fe9dce3bc917645f9e343fa6f6ca81a2b112ab6a504c0f1a43f1"} Jan 22 05:57:16 crc kubenswrapper[4933]: I0122 05:57:16.016400 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" event={"ID":"bffcdf36-90a4-4daf-a9d4-17bbb65626f7","Type":"ContainerStarted","Data":"93741ecbd2d925d70a81bd8edeb0b325b109e9a36c36ec36c9efbc6688a87872"} Jan 22 05:57:16 crc kubenswrapper[4933]: I0122 05:57:16.016412 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" event={"ID":"bffcdf36-90a4-4daf-a9d4-17bbb65626f7","Type":"ContainerStarted","Data":"6688346e7cf447b89568ee6960ffc8c1bd27182b6d0b32b1b8b9fd304efb9cb2"} Jan 22 05:57:16 crc kubenswrapper[4933]: I0122 05:57:16.016424 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" event={"ID":"bffcdf36-90a4-4daf-a9d4-17bbb65626f7","Type":"ContainerStarted","Data":"d7380c040ae97b5af12212a7ce9f0e1cd1c47f733b206eb3b49d42ca5560c596"} Jan 22 05:57:16 crc kubenswrapper[4933]: I0122 05:57:16.016435 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" event={"ID":"bffcdf36-90a4-4daf-a9d4-17bbb65626f7","Type":"ContainerStarted","Data":"f3d3bb27b68553f9ad74c86e73ff2be50de04ebd45ba01aea912265e816dbe33"} Jan 22 05:57:18 crc kubenswrapper[4933]: I0122 05:57:18.035978 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" event={"ID":"bffcdf36-90a4-4daf-a9d4-17bbb65626f7","Type":"ContainerStarted","Data":"e0b00718ef709996d5fd1faa9653ab68221e6b6594a8d296302317d257bf8876"} Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.057421 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" event={"ID":"bffcdf36-90a4-4daf-a9d4-17bbb65626f7","Type":"ContainerStarted","Data":"93ccdbf7e82d25c95c382a318bad4b03ae7cc79ecd3baa721a093604157f61f9"} Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.057728 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.057744 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.057756 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.084044 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.085298 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.092408 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" podStartSLOduration=8.092393606 podStartE2EDuration="8.092393606s" podCreationTimestamp="2026-01-22 05:57:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:57:21.090919678 +0000 UTC m=+688.928045051" watchObservedRunningTime="2026-01-22 05:57:21.092393606 +0000 UTC m=+688.929518969" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.247926 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-qktzs"] Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.248554 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.250064 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.250200 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.250433 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.250515 4933 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-ctsm4" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.262442 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-qktzs"] Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.429685 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/283136c3-ccc7-42bd-82a7-d079877057ba-node-mnt\") pod \"crc-storage-crc-qktzs\" (UID: \"283136c3-ccc7-42bd-82a7-d079877057ba\") " pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.429764 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/283136c3-ccc7-42bd-82a7-d079877057ba-crc-storage\") pod \"crc-storage-crc-qktzs\" (UID: \"283136c3-ccc7-42bd-82a7-d079877057ba\") " pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.429796 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6vs2\" (UniqueName: \"kubernetes.io/projected/283136c3-ccc7-42bd-82a7-d079877057ba-kube-api-access-v6vs2\") pod \"crc-storage-crc-qktzs\" (UID: \"283136c3-ccc7-42bd-82a7-d079877057ba\") " pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.531841 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/283136c3-ccc7-42bd-82a7-d079877057ba-node-mnt\") pod \"crc-storage-crc-qktzs\" (UID: \"283136c3-ccc7-42bd-82a7-d079877057ba\") " pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.532008 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/283136c3-ccc7-42bd-82a7-d079877057ba-crc-storage\") pod \"crc-storage-crc-qktzs\" (UID: \"283136c3-ccc7-42bd-82a7-d079877057ba\") " pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.532139 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6vs2\" (UniqueName: \"kubernetes.io/projected/283136c3-ccc7-42bd-82a7-d079877057ba-kube-api-access-v6vs2\") pod \"crc-storage-crc-qktzs\" (UID: \"283136c3-ccc7-42bd-82a7-d079877057ba\") " pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.532443 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/283136c3-ccc7-42bd-82a7-d079877057ba-node-mnt\") pod \"crc-storage-crc-qktzs\" (UID: \"283136c3-ccc7-42bd-82a7-d079877057ba\") " pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.533216 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/283136c3-ccc7-42bd-82a7-d079877057ba-crc-storage\") pod \"crc-storage-crc-qktzs\" (UID: \"283136c3-ccc7-42bd-82a7-d079877057ba\") " pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.556376 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6vs2\" (UniqueName: \"kubernetes.io/projected/283136c3-ccc7-42bd-82a7-d079877057ba-kube-api-access-v6vs2\") pod \"crc-storage-crc-qktzs\" (UID: \"283136c3-ccc7-42bd-82a7-d079877057ba\") " pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:21 crc kubenswrapper[4933]: I0122 05:57:21.561357 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:21 crc kubenswrapper[4933]: E0122 05:57:21.600096 4933 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-qktzs_crc-storage_283136c3-ccc7-42bd-82a7-d079877057ba_0(06ff9db9246378a0bdfc979027d3bfe3ab0a93573bbf38f9156c3c07236d98cc): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 22 05:57:21 crc kubenswrapper[4933]: E0122 05:57:21.600173 4933 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-qktzs_crc-storage_283136c3-ccc7-42bd-82a7-d079877057ba_0(06ff9db9246378a0bdfc979027d3bfe3ab0a93573bbf38f9156c3c07236d98cc): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:21 crc kubenswrapper[4933]: E0122 05:57:21.600195 4933 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-qktzs_crc-storage_283136c3-ccc7-42bd-82a7-d079877057ba_0(06ff9db9246378a0bdfc979027d3bfe3ab0a93573bbf38f9156c3c07236d98cc): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:21 crc kubenswrapper[4933]: E0122 05:57:21.600252 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-qktzs_crc-storage(283136c3-ccc7-42bd-82a7-d079877057ba)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-qktzs_crc-storage(283136c3-ccc7-42bd-82a7-d079877057ba)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-qktzs_crc-storage_283136c3-ccc7-42bd-82a7-d079877057ba_0(06ff9db9246378a0bdfc979027d3bfe3ab0a93573bbf38f9156c3c07236d98cc): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-qktzs" podUID="283136c3-ccc7-42bd-82a7-d079877057ba" Jan 22 05:57:22 crc kubenswrapper[4933]: I0122 05:57:22.063138 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:22 crc kubenswrapper[4933]: I0122 05:57:22.064220 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:22 crc kubenswrapper[4933]: E0122 05:57:22.092342 4933 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-qktzs_crc-storage_283136c3-ccc7-42bd-82a7-d079877057ba_0(b866a752cec954814de0a978a3740976f5eadc05ee0071588d68beeb693c28a9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 22 05:57:22 crc kubenswrapper[4933]: E0122 05:57:22.092451 4933 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-qktzs_crc-storage_283136c3-ccc7-42bd-82a7-d079877057ba_0(b866a752cec954814de0a978a3740976f5eadc05ee0071588d68beeb693c28a9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:22 crc kubenswrapper[4933]: E0122 05:57:22.092499 4933 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-qktzs_crc-storage_283136c3-ccc7-42bd-82a7-d079877057ba_0(b866a752cec954814de0a978a3740976f5eadc05ee0071588d68beeb693c28a9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:22 crc kubenswrapper[4933]: E0122 05:57:22.092592 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-qktzs_crc-storage(283136c3-ccc7-42bd-82a7-d079877057ba)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-qktzs_crc-storage(283136c3-ccc7-42bd-82a7-d079877057ba)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-qktzs_crc-storage_283136c3-ccc7-42bd-82a7-d079877057ba_0(b866a752cec954814de0a978a3740976f5eadc05ee0071588d68beeb693c28a9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-qktzs" podUID="283136c3-ccc7-42bd-82a7-d079877057ba" Jan 22 05:57:27 crc kubenswrapper[4933]: I0122 05:57:27.491415 4933 scope.go:117] "RemoveContainer" containerID="554f6ede0a925394463d392b48700a7c5dcd211ebffe4e3dc51046123872907f" Jan 22 05:57:27 crc kubenswrapper[4933]: E0122 05:57:27.492507 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-jr6rw_openshift-multus(f066dd84-0cd5-4e8c-8411-cf12cc83ea7d)\"" pod="openshift-multus/multus-jr6rw" podUID="f066dd84-0cd5-4e8c-8411-cf12cc83ea7d" Jan 22 05:57:33 crc kubenswrapper[4933]: I0122 05:57:33.489788 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:33 crc kubenswrapper[4933]: I0122 05:57:33.491317 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:33 crc kubenswrapper[4933]: E0122 05:57:33.532729 4933 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-qktzs_crc-storage_283136c3-ccc7-42bd-82a7-d079877057ba_0(30f4c5703fe0830c0fd10f9cc0fdaea735de9fb4731ac0b79800733bf9c2854b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 22 05:57:33 crc kubenswrapper[4933]: E0122 05:57:33.532828 4933 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-qktzs_crc-storage_283136c3-ccc7-42bd-82a7-d079877057ba_0(30f4c5703fe0830c0fd10f9cc0fdaea735de9fb4731ac0b79800733bf9c2854b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:33 crc kubenswrapper[4933]: E0122 05:57:33.532878 4933 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-qktzs_crc-storage_283136c3-ccc7-42bd-82a7-d079877057ba_0(30f4c5703fe0830c0fd10f9cc0fdaea735de9fb4731ac0b79800733bf9c2854b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:33 crc kubenswrapper[4933]: E0122 05:57:33.532967 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-qktzs_crc-storage(283136c3-ccc7-42bd-82a7-d079877057ba)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-qktzs_crc-storage(283136c3-ccc7-42bd-82a7-d079877057ba)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-qktzs_crc-storage_283136c3-ccc7-42bd-82a7-d079877057ba_0(30f4c5703fe0830c0fd10f9cc0fdaea735de9fb4731ac0b79800733bf9c2854b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-qktzs" podUID="283136c3-ccc7-42bd-82a7-d079877057ba" Jan 22 05:57:38 crc kubenswrapper[4933]: I0122 05:57:38.490927 4933 scope.go:117] "RemoveContainer" containerID="554f6ede0a925394463d392b48700a7c5dcd211ebffe4e3dc51046123872907f" Jan 22 05:57:39 crc kubenswrapper[4933]: I0122 05:57:39.174284 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jr6rw_f066dd84-0cd5-4e8c-8411-cf12cc83ea7d/kube-multus/2.log" Jan 22 05:57:39 crc kubenswrapper[4933]: I0122 05:57:39.174812 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jr6rw" event={"ID":"f066dd84-0cd5-4e8c-8411-cf12cc83ea7d","Type":"ContainerStarted","Data":"0b8810c10813dd042c105f2882d39351242a38d7f5d9437b769e77988f1c7498"} Jan 22 05:57:43 crc kubenswrapper[4933]: I0122 05:57:43.990611 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-sptqh" Jan 22 05:57:47 crc kubenswrapper[4933]: I0122 05:57:47.489877 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:47 crc kubenswrapper[4933]: I0122 05:57:47.490714 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:47 crc kubenswrapper[4933]: I0122 05:57:47.728164 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-qktzs"] Jan 22 05:57:47 crc kubenswrapper[4933]: I0122 05:57:47.739000 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 05:57:48 crc kubenswrapper[4933]: I0122 05:57:48.237658 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-qktzs" event={"ID":"283136c3-ccc7-42bd-82a7-d079877057ba","Type":"ContainerStarted","Data":"2393521ba7c524e04a6bcca356e34e1ccd0aeb718be79a3d17c89aa1ace7ec43"} Jan 22 05:57:51 crc kubenswrapper[4933]: I0122 05:57:51.260983 4933 generic.go:334] "Generic (PLEG): container finished" podID="283136c3-ccc7-42bd-82a7-d079877057ba" containerID="63c807d0dd0c3714a46805051cc6edfeb6def19be4f76aedf63d38371adbb050" exitCode=0 Jan 22 05:57:51 crc kubenswrapper[4933]: I0122 05:57:51.261070 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-qktzs" event={"ID":"283136c3-ccc7-42bd-82a7-d079877057ba","Type":"ContainerDied","Data":"63c807d0dd0c3714a46805051cc6edfeb6def19be4f76aedf63d38371adbb050"} Jan 22 05:57:52 crc kubenswrapper[4933]: I0122 05:57:52.529161 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:57:52 crc kubenswrapper[4933]: I0122 05:57:52.602818 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/283136c3-ccc7-42bd-82a7-d079877057ba-node-mnt\") pod \"283136c3-ccc7-42bd-82a7-d079877057ba\" (UID: \"283136c3-ccc7-42bd-82a7-d079877057ba\") " Jan 22 05:57:52 crc kubenswrapper[4933]: I0122 05:57:52.602963 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/283136c3-ccc7-42bd-82a7-d079877057ba-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "283136c3-ccc7-42bd-82a7-d079877057ba" (UID: "283136c3-ccc7-42bd-82a7-d079877057ba"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:57:52 crc kubenswrapper[4933]: I0122 05:57:52.603026 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/283136c3-ccc7-42bd-82a7-d079877057ba-crc-storage\") pod \"283136c3-ccc7-42bd-82a7-d079877057ba\" (UID: \"283136c3-ccc7-42bd-82a7-d079877057ba\") " Jan 22 05:57:52 crc kubenswrapper[4933]: I0122 05:57:52.603173 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6vs2\" (UniqueName: \"kubernetes.io/projected/283136c3-ccc7-42bd-82a7-d079877057ba-kube-api-access-v6vs2\") pod \"283136c3-ccc7-42bd-82a7-d079877057ba\" (UID: \"283136c3-ccc7-42bd-82a7-d079877057ba\") " Jan 22 05:57:52 crc kubenswrapper[4933]: I0122 05:57:52.604302 4933 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/283136c3-ccc7-42bd-82a7-d079877057ba-node-mnt\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:52 crc kubenswrapper[4933]: I0122 05:57:52.611393 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/283136c3-ccc7-42bd-82a7-d079877057ba-kube-api-access-v6vs2" (OuterVolumeSpecName: "kube-api-access-v6vs2") pod "283136c3-ccc7-42bd-82a7-d079877057ba" (UID: "283136c3-ccc7-42bd-82a7-d079877057ba"). InnerVolumeSpecName "kube-api-access-v6vs2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:57:52 crc kubenswrapper[4933]: I0122 05:57:52.617644 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/283136c3-ccc7-42bd-82a7-d079877057ba-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "283136c3-ccc7-42bd-82a7-d079877057ba" (UID: "283136c3-ccc7-42bd-82a7-d079877057ba"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:57:52 crc kubenswrapper[4933]: I0122 05:57:52.705216 4933 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/283136c3-ccc7-42bd-82a7-d079877057ba-crc-storage\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:52 crc kubenswrapper[4933]: I0122 05:57:52.705506 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6vs2\" (UniqueName: \"kubernetes.io/projected/283136c3-ccc7-42bd-82a7-d079877057ba-kube-api-access-v6vs2\") on node \"crc\" DevicePath \"\"" Jan 22 05:57:53 crc kubenswrapper[4933]: I0122 05:57:53.272987 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-qktzs" event={"ID":"283136c3-ccc7-42bd-82a7-d079877057ba","Type":"ContainerDied","Data":"2393521ba7c524e04a6bcca356e34e1ccd0aeb718be79a3d17c89aa1ace7ec43"} Jan 22 05:57:53 crc kubenswrapper[4933]: I0122 05:57:53.273028 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2393521ba7c524e04a6bcca356e34e1ccd0aeb718be79a3d17c89aa1ace7ec43" Jan 22 05:57:53 crc kubenswrapper[4933]: I0122 05:57:53.273031 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-qktzs" Jan 22 05:58:00 crc kubenswrapper[4933]: I0122 05:58:00.824838 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj"] Jan 22 05:58:00 crc kubenswrapper[4933]: E0122 05:58:00.825600 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="283136c3-ccc7-42bd-82a7-d079877057ba" containerName="storage" Jan 22 05:58:00 crc kubenswrapper[4933]: I0122 05:58:00.825615 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="283136c3-ccc7-42bd-82a7-d079877057ba" containerName="storage" Jan 22 05:58:00 crc kubenswrapper[4933]: I0122 05:58:00.825733 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="283136c3-ccc7-42bd-82a7-d079877057ba" containerName="storage" Jan 22 05:58:00 crc kubenswrapper[4933]: I0122 05:58:00.826520 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj" Jan 22 05:58:00 crc kubenswrapper[4933]: I0122 05:58:00.828301 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 22 05:58:00 crc kubenswrapper[4933]: I0122 05:58:00.839367 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj"] Jan 22 05:58:00 crc kubenswrapper[4933]: I0122 05:58:00.908545 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2xvz\" (UniqueName: \"kubernetes.io/projected/5c82fbe7-be8e-4ffd-8c03-f0270785d0fa-kube-api-access-w2xvz\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj\" (UID: \"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj" Jan 22 05:58:00 crc kubenswrapper[4933]: I0122 05:58:00.908586 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5c82fbe7-be8e-4ffd-8c03-f0270785d0fa-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj\" (UID: \"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj" Jan 22 05:58:00 crc kubenswrapper[4933]: I0122 05:58:00.908605 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5c82fbe7-be8e-4ffd-8c03-f0270785d0fa-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj\" (UID: \"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj" Jan 22 05:58:01 crc kubenswrapper[4933]: I0122 05:58:01.009493 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2xvz\" (UniqueName: \"kubernetes.io/projected/5c82fbe7-be8e-4ffd-8c03-f0270785d0fa-kube-api-access-w2xvz\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj\" (UID: \"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj" Jan 22 05:58:01 crc kubenswrapper[4933]: I0122 05:58:01.009538 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5c82fbe7-be8e-4ffd-8c03-f0270785d0fa-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj\" (UID: \"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj" Jan 22 05:58:01 crc kubenswrapper[4933]: I0122 05:58:01.009561 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5c82fbe7-be8e-4ffd-8c03-f0270785d0fa-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj\" (UID: \"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj" Jan 22 05:58:01 crc kubenswrapper[4933]: I0122 05:58:01.010027 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5c82fbe7-be8e-4ffd-8c03-f0270785d0fa-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj\" (UID: \"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj" Jan 22 05:58:01 crc kubenswrapper[4933]: I0122 05:58:01.010333 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5c82fbe7-be8e-4ffd-8c03-f0270785d0fa-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj\" (UID: \"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj" Jan 22 05:58:01 crc kubenswrapper[4933]: I0122 05:58:01.033014 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2xvz\" (UniqueName: \"kubernetes.io/projected/5c82fbe7-be8e-4ffd-8c03-f0270785d0fa-kube-api-access-w2xvz\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj\" (UID: \"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj" Jan 22 05:58:01 crc kubenswrapper[4933]: I0122 05:58:01.144056 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj" Jan 22 05:58:01 crc kubenswrapper[4933]: I0122 05:58:01.421774 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj"] Jan 22 05:58:02 crc kubenswrapper[4933]: I0122 05:58:02.333243 4933 generic.go:334] "Generic (PLEG): container finished" podID="5c82fbe7-be8e-4ffd-8c03-f0270785d0fa" containerID="03ad7ea7748822364efa9ae0721b9cafb21328eb6cf6d1517046de3ed70edaec" exitCode=0 Jan 22 05:58:02 crc kubenswrapper[4933]: I0122 05:58:02.333453 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj" event={"ID":"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa","Type":"ContainerDied","Data":"03ad7ea7748822364efa9ae0721b9cafb21328eb6cf6d1517046de3ed70edaec"} Jan 22 05:58:02 crc kubenswrapper[4933]: I0122 05:58:02.333619 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj" event={"ID":"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa","Type":"ContainerStarted","Data":"08b0c2ff8853321d0c48e67afa209c102c2fe7966c270b2f418e65ea08dbcccb"} Jan 22 05:58:04 crc kubenswrapper[4933]: I0122 05:58:04.349826 4933 generic.go:334] "Generic (PLEG): container finished" podID="5c82fbe7-be8e-4ffd-8c03-f0270785d0fa" containerID="e59c31a3adeae2a19d67f6574ce447faa297bef563ba13053b401ed313dad80f" exitCode=0 Jan 22 05:58:04 crc kubenswrapper[4933]: I0122 05:58:04.349903 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj" event={"ID":"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa","Type":"ContainerDied","Data":"e59c31a3adeae2a19d67f6574ce447faa297bef563ba13053b401ed313dad80f"} Jan 22 05:58:05 crc kubenswrapper[4933]: I0122 05:58:05.361619 4933 generic.go:334] "Generic (PLEG): container finished" podID="5c82fbe7-be8e-4ffd-8c03-f0270785d0fa" containerID="a90f52d620b6bf88176b4aeffb3974ba82c7145a5dccae16a899eb2950fc926b" exitCode=0 Jan 22 05:58:05 crc kubenswrapper[4933]: I0122 05:58:05.361685 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj" event={"ID":"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa","Type":"ContainerDied","Data":"a90f52d620b6bf88176b4aeffb3974ba82c7145a5dccae16a899eb2950fc926b"} Jan 22 05:58:06 crc kubenswrapper[4933]: I0122 05:58:06.653482 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj" Jan 22 05:58:06 crc kubenswrapper[4933]: I0122 05:58:06.790175 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w2xvz\" (UniqueName: \"kubernetes.io/projected/5c82fbe7-be8e-4ffd-8c03-f0270785d0fa-kube-api-access-w2xvz\") pod \"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa\" (UID: \"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa\") " Jan 22 05:58:06 crc kubenswrapper[4933]: I0122 05:58:06.790322 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5c82fbe7-be8e-4ffd-8c03-f0270785d0fa-bundle\") pod \"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa\" (UID: \"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa\") " Jan 22 05:58:06 crc kubenswrapper[4933]: I0122 05:58:06.790362 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5c82fbe7-be8e-4ffd-8c03-f0270785d0fa-util\") pod \"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa\" (UID: \"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa\") " Jan 22 05:58:06 crc kubenswrapper[4933]: I0122 05:58:06.791668 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c82fbe7-be8e-4ffd-8c03-f0270785d0fa-bundle" (OuterVolumeSpecName: "bundle") pod "5c82fbe7-be8e-4ffd-8c03-f0270785d0fa" (UID: "5c82fbe7-be8e-4ffd-8c03-f0270785d0fa"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:58:06 crc kubenswrapper[4933]: I0122 05:58:06.799480 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c82fbe7-be8e-4ffd-8c03-f0270785d0fa-kube-api-access-w2xvz" (OuterVolumeSpecName: "kube-api-access-w2xvz") pod "5c82fbe7-be8e-4ffd-8c03-f0270785d0fa" (UID: "5c82fbe7-be8e-4ffd-8c03-f0270785d0fa"). InnerVolumeSpecName "kube-api-access-w2xvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:58:06 crc kubenswrapper[4933]: I0122 05:58:06.819420 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c82fbe7-be8e-4ffd-8c03-f0270785d0fa-util" (OuterVolumeSpecName: "util") pod "5c82fbe7-be8e-4ffd-8c03-f0270785d0fa" (UID: "5c82fbe7-be8e-4ffd-8c03-f0270785d0fa"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:58:06 crc kubenswrapper[4933]: I0122 05:58:06.891635 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w2xvz\" (UniqueName: \"kubernetes.io/projected/5c82fbe7-be8e-4ffd-8c03-f0270785d0fa-kube-api-access-w2xvz\") on node \"crc\" DevicePath \"\"" Jan 22 05:58:06 crc kubenswrapper[4933]: I0122 05:58:06.891697 4933 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5c82fbe7-be8e-4ffd-8c03-f0270785d0fa-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:58:06 crc kubenswrapper[4933]: I0122 05:58:06.891715 4933 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5c82fbe7-be8e-4ffd-8c03-f0270785d0fa-util\") on node \"crc\" DevicePath \"\"" Jan 22 05:58:07 crc kubenswrapper[4933]: I0122 05:58:07.379221 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj" event={"ID":"5c82fbe7-be8e-4ffd-8c03-f0270785d0fa","Type":"ContainerDied","Data":"08b0c2ff8853321d0c48e67afa209c102c2fe7966c270b2f418e65ea08dbcccb"} Jan 22 05:58:07 crc kubenswrapper[4933]: I0122 05:58:07.379282 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="08b0c2ff8853321d0c48e67afa209c102c2fe7966c270b2f418e65ea08dbcccb" Jan 22 05:58:07 crc kubenswrapper[4933]: I0122 05:58:07.379395 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj" Jan 22 05:58:08 crc kubenswrapper[4933]: I0122 05:58:08.542592 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-49j2k"] Jan 22 05:58:08 crc kubenswrapper[4933]: E0122 05:58:08.543149 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c82fbe7-be8e-4ffd-8c03-f0270785d0fa" containerName="util" Jan 22 05:58:08 crc kubenswrapper[4933]: I0122 05:58:08.543164 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c82fbe7-be8e-4ffd-8c03-f0270785d0fa" containerName="util" Jan 22 05:58:08 crc kubenswrapper[4933]: E0122 05:58:08.543175 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c82fbe7-be8e-4ffd-8c03-f0270785d0fa" containerName="extract" Jan 22 05:58:08 crc kubenswrapper[4933]: I0122 05:58:08.543183 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c82fbe7-be8e-4ffd-8c03-f0270785d0fa" containerName="extract" Jan 22 05:58:08 crc kubenswrapper[4933]: E0122 05:58:08.543199 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c82fbe7-be8e-4ffd-8c03-f0270785d0fa" containerName="pull" Jan 22 05:58:08 crc kubenswrapper[4933]: I0122 05:58:08.543208 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c82fbe7-be8e-4ffd-8c03-f0270785d0fa" containerName="pull" Jan 22 05:58:08 crc kubenswrapper[4933]: I0122 05:58:08.543339 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c82fbe7-be8e-4ffd-8c03-f0270785d0fa" containerName="extract" Jan 22 05:58:08 crc kubenswrapper[4933]: I0122 05:58:08.543786 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-49j2k" Jan 22 05:58:08 crc kubenswrapper[4933]: I0122 05:58:08.545656 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-tshmp" Jan 22 05:58:08 crc kubenswrapper[4933]: I0122 05:58:08.546068 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 22 05:58:08 crc kubenswrapper[4933]: I0122 05:58:08.555601 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 22 05:58:08 crc kubenswrapper[4933]: I0122 05:58:08.556188 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-49j2k"] Jan 22 05:58:08 crc kubenswrapper[4933]: I0122 05:58:08.615379 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gx48x\" (UniqueName: \"kubernetes.io/projected/85167be7-eaf1-4931-ac95-f80007d35c42-kube-api-access-gx48x\") pod \"nmstate-operator-646758c888-49j2k\" (UID: \"85167be7-eaf1-4931-ac95-f80007d35c42\") " pod="openshift-nmstate/nmstate-operator-646758c888-49j2k" Jan 22 05:58:08 crc kubenswrapper[4933]: I0122 05:58:08.717029 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gx48x\" (UniqueName: \"kubernetes.io/projected/85167be7-eaf1-4931-ac95-f80007d35c42-kube-api-access-gx48x\") pod \"nmstate-operator-646758c888-49j2k\" (UID: \"85167be7-eaf1-4931-ac95-f80007d35c42\") " pod="openshift-nmstate/nmstate-operator-646758c888-49j2k" Jan 22 05:58:08 crc kubenswrapper[4933]: I0122 05:58:08.732276 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gx48x\" (UniqueName: \"kubernetes.io/projected/85167be7-eaf1-4931-ac95-f80007d35c42-kube-api-access-gx48x\") pod \"nmstate-operator-646758c888-49j2k\" (UID: \"85167be7-eaf1-4931-ac95-f80007d35c42\") " pod="openshift-nmstate/nmstate-operator-646758c888-49j2k" Jan 22 05:58:08 crc kubenswrapper[4933]: I0122 05:58:08.878940 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-49j2k" Jan 22 05:58:09 crc kubenswrapper[4933]: I0122 05:58:09.119950 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-49j2k"] Jan 22 05:58:09 crc kubenswrapper[4933]: I0122 05:58:09.390912 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-49j2k" event={"ID":"85167be7-eaf1-4931-ac95-f80007d35c42","Type":"ContainerStarted","Data":"f9beebdca62d024b35a40084a91816e9fdf51605bc882f628ce2d3d20854e8a8"} Jan 22 05:58:12 crc kubenswrapper[4933]: I0122 05:58:12.408285 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-49j2k" event={"ID":"85167be7-eaf1-4931-ac95-f80007d35c42","Type":"ContainerStarted","Data":"ad213ffbe2a62d2a7f9aa4fcd66601c90c5fab6fa2de3efd7afa6bbe69f565c4"} Jan 22 05:58:12 crc kubenswrapper[4933]: I0122 05:58:12.426688 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-49j2k" podStartSLOduration=2.287872601 podStartE2EDuration="4.426672371s" podCreationTimestamp="2026-01-22 05:58:08 +0000 UTC" firstStartedPulling="2026-01-22 05:58:09.132612415 +0000 UTC m=+736.969737758" lastFinishedPulling="2026-01-22 05:58:11.271412175 +0000 UTC m=+739.108537528" observedRunningTime="2026-01-22 05:58:12.423512861 +0000 UTC m=+740.260638244" watchObservedRunningTime="2026-01-22 05:58:12.426672371 +0000 UTC m=+740.263797724" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.426110 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-88t8r"] Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.434567 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-88t8r" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.434657 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-dtghh"] Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.436137 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-dtghh" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.437607 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-q6frx" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.448932 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-z4cfk"] Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.453634 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-dtghh"] Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.453811 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-z4cfk" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.454361 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.481903 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/40102dae-8b84-4138-bd5d-4a3b7c1b3492-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-dtghh\" (UID: \"40102dae-8b84-4138-bd5d-4a3b7c1b3492\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-dtghh" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.481976 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbr5z\" (UniqueName: \"kubernetes.io/projected/7f8f46df-3251-4fc5-87fb-10b877fe5878-kube-api-access-fbr5z\") pod \"nmstate-metrics-54757c584b-88t8r\" (UID: \"7f8f46df-3251-4fc5-87fb-10b877fe5878\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-88t8r" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.482013 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zctjf\" (UniqueName: \"kubernetes.io/projected/40102dae-8b84-4138-bd5d-4a3b7c1b3492-kube-api-access-zctjf\") pod \"nmstate-webhook-8474b5b9d8-dtghh\" (UID: \"40102dae-8b84-4138-bd5d-4a3b7c1b3492\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-dtghh" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.482606 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-88t8r"] Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.547790 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-9b5pb"] Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.548414 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-9b5pb" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.550800 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-kqv8j" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.551037 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.551221 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.562213 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-9b5pb"] Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.583592 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/95abb851-2f05-43e0-8c35-92027baf4a2c-ovs-socket\") pod \"nmstate-handler-z4cfk\" (UID: \"95abb851-2f05-43e0-8c35-92027baf4a2c\") " pod="openshift-nmstate/nmstate-handler-z4cfk" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.583638 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbr5z\" (UniqueName: \"kubernetes.io/projected/7f8f46df-3251-4fc5-87fb-10b877fe5878-kube-api-access-fbr5z\") pod \"nmstate-metrics-54757c584b-88t8r\" (UID: \"7f8f46df-3251-4fc5-87fb-10b877fe5878\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-88t8r" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.583663 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zctjf\" (UniqueName: \"kubernetes.io/projected/40102dae-8b84-4138-bd5d-4a3b7c1b3492-kube-api-access-zctjf\") pod \"nmstate-webhook-8474b5b9d8-dtghh\" (UID: \"40102dae-8b84-4138-bd5d-4a3b7c1b3492\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-dtghh" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.583695 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxd2n\" (UniqueName: \"kubernetes.io/projected/95abb851-2f05-43e0-8c35-92027baf4a2c-kube-api-access-dxd2n\") pod \"nmstate-handler-z4cfk\" (UID: \"95abb851-2f05-43e0-8c35-92027baf4a2c\") " pod="openshift-nmstate/nmstate-handler-z4cfk" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.583715 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/95abb851-2f05-43e0-8c35-92027baf4a2c-dbus-socket\") pod \"nmstate-handler-z4cfk\" (UID: \"95abb851-2f05-43e0-8c35-92027baf4a2c\") " pod="openshift-nmstate/nmstate-handler-z4cfk" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.583744 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/95abb851-2f05-43e0-8c35-92027baf4a2c-nmstate-lock\") pod \"nmstate-handler-z4cfk\" (UID: \"95abb851-2f05-43e0-8c35-92027baf4a2c\") " pod="openshift-nmstate/nmstate-handler-z4cfk" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.583783 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/40102dae-8b84-4138-bd5d-4a3b7c1b3492-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-dtghh\" (UID: \"40102dae-8b84-4138-bd5d-4a3b7c1b3492\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-dtghh" Jan 22 05:58:13 crc kubenswrapper[4933]: E0122 05:58:13.583880 4933 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Jan 22 05:58:13 crc kubenswrapper[4933]: E0122 05:58:13.583926 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/40102dae-8b84-4138-bd5d-4a3b7c1b3492-tls-key-pair podName:40102dae-8b84-4138-bd5d-4a3b7c1b3492 nodeName:}" failed. No retries permitted until 2026-01-22 05:58:14.083910756 +0000 UTC m=+741.921036109 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/40102dae-8b84-4138-bd5d-4a3b7c1b3492-tls-key-pair") pod "nmstate-webhook-8474b5b9d8-dtghh" (UID: "40102dae-8b84-4138-bd5d-4a3b7c1b3492") : secret "openshift-nmstate-webhook" not found Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.609106 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbr5z\" (UniqueName: \"kubernetes.io/projected/7f8f46df-3251-4fc5-87fb-10b877fe5878-kube-api-access-fbr5z\") pod \"nmstate-metrics-54757c584b-88t8r\" (UID: \"7f8f46df-3251-4fc5-87fb-10b877fe5878\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-88t8r" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.609120 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zctjf\" (UniqueName: \"kubernetes.io/projected/40102dae-8b84-4138-bd5d-4a3b7c1b3492-kube-api-access-zctjf\") pod \"nmstate-webhook-8474b5b9d8-dtghh\" (UID: \"40102dae-8b84-4138-bd5d-4a3b7c1b3492\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-dtghh" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.685234 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9zkn\" (UniqueName: \"kubernetes.io/projected/bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0-kube-api-access-q9zkn\") pod \"nmstate-console-plugin-7754f76f8b-9b5pb\" (UID: \"bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-9b5pb" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.685302 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/95abb851-2f05-43e0-8c35-92027baf4a2c-ovs-socket\") pod \"nmstate-handler-z4cfk\" (UID: \"95abb851-2f05-43e0-8c35-92027baf4a2c\") " pod="openshift-nmstate/nmstate-handler-z4cfk" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.685333 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-9b5pb\" (UID: \"bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-9b5pb" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.685370 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxd2n\" (UniqueName: \"kubernetes.io/projected/95abb851-2f05-43e0-8c35-92027baf4a2c-kube-api-access-dxd2n\") pod \"nmstate-handler-z4cfk\" (UID: \"95abb851-2f05-43e0-8c35-92027baf4a2c\") " pod="openshift-nmstate/nmstate-handler-z4cfk" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.685398 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/95abb851-2f05-43e0-8c35-92027baf4a2c-dbus-socket\") pod \"nmstate-handler-z4cfk\" (UID: \"95abb851-2f05-43e0-8c35-92027baf4a2c\") " pod="openshift-nmstate/nmstate-handler-z4cfk" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.685424 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/95abb851-2f05-43e0-8c35-92027baf4a2c-nmstate-lock\") pod \"nmstate-handler-z4cfk\" (UID: \"95abb851-2f05-43e0-8c35-92027baf4a2c\") " pod="openshift-nmstate/nmstate-handler-z4cfk" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.685464 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-9b5pb\" (UID: \"bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-9b5pb" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.685600 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/95abb851-2f05-43e0-8c35-92027baf4a2c-ovs-socket\") pod \"nmstate-handler-z4cfk\" (UID: \"95abb851-2f05-43e0-8c35-92027baf4a2c\") " pod="openshift-nmstate/nmstate-handler-z4cfk" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.686100 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/95abb851-2f05-43e0-8c35-92027baf4a2c-dbus-socket\") pod \"nmstate-handler-z4cfk\" (UID: \"95abb851-2f05-43e0-8c35-92027baf4a2c\") " pod="openshift-nmstate/nmstate-handler-z4cfk" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.686142 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/95abb851-2f05-43e0-8c35-92027baf4a2c-nmstate-lock\") pod \"nmstate-handler-z4cfk\" (UID: \"95abb851-2f05-43e0-8c35-92027baf4a2c\") " pod="openshift-nmstate/nmstate-handler-z4cfk" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.704961 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxd2n\" (UniqueName: \"kubernetes.io/projected/95abb851-2f05-43e0-8c35-92027baf4a2c-kube-api-access-dxd2n\") pod \"nmstate-handler-z4cfk\" (UID: \"95abb851-2f05-43e0-8c35-92027baf4a2c\") " pod="openshift-nmstate/nmstate-handler-z4cfk" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.737162 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-597779d89c-gf9ls"] Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.737922 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.748811 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-597779d89c-gf9ls"] Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.757200 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-88t8r" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.789611 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fc5147e7-834d-4c00-9840-965bbd2e1875-console-oauth-config\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.789878 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-9b5pb\" (UID: \"bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-9b5pb" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.789927 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fc5147e7-834d-4c00-9840-965bbd2e1875-console-config\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.789947 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fc5147e7-834d-4c00-9840-965bbd2e1875-trusted-ca-bundle\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.789962 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9zkn\" (UniqueName: \"kubernetes.io/projected/bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0-kube-api-access-q9zkn\") pod \"nmstate-console-plugin-7754f76f8b-9b5pb\" (UID: \"bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-9b5pb" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.789980 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fc5147e7-834d-4c00-9840-965bbd2e1875-console-serving-cert\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.790003 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-9b5pb\" (UID: \"bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-9b5pb" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.790018 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fc5147e7-834d-4c00-9840-965bbd2e1875-service-ca\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.790034 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pgrwt\" (UniqueName: \"kubernetes.io/projected/fc5147e7-834d-4c00-9840-965bbd2e1875-kube-api-access-pgrwt\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.790055 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fc5147e7-834d-4c00-9840-965bbd2e1875-oauth-serving-cert\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: E0122 05:58:13.790205 4933 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Jan 22 05:58:13 crc kubenswrapper[4933]: E0122 05:58:13.790246 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0-plugin-serving-cert podName:bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0 nodeName:}" failed. No retries permitted until 2026-01-22 05:58:14.290231663 +0000 UTC m=+742.127357016 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0-plugin-serving-cert") pod "nmstate-console-plugin-7754f76f8b-9b5pb" (UID: "bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0") : secret "plugin-serving-cert" not found Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.791285 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-z4cfk" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.791442 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-9b5pb\" (UID: \"bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-9b5pb" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.805426 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9zkn\" (UniqueName: \"kubernetes.io/projected/bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0-kube-api-access-q9zkn\") pod \"nmstate-console-plugin-7754f76f8b-9b5pb\" (UID: \"bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-9b5pb" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.890926 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fc5147e7-834d-4c00-9840-965bbd2e1875-service-ca\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.890978 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pgrwt\" (UniqueName: \"kubernetes.io/projected/fc5147e7-834d-4c00-9840-965bbd2e1875-kube-api-access-pgrwt\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.891010 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fc5147e7-834d-4c00-9840-965bbd2e1875-oauth-serving-cert\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.891051 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fc5147e7-834d-4c00-9840-965bbd2e1875-console-oauth-config\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.891164 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fc5147e7-834d-4c00-9840-965bbd2e1875-console-config\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.891193 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fc5147e7-834d-4c00-9840-965bbd2e1875-trusted-ca-bundle\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.891215 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fc5147e7-834d-4c00-9840-965bbd2e1875-console-serving-cert\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.894391 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fc5147e7-834d-4c00-9840-965bbd2e1875-service-ca\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.894425 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fc5147e7-834d-4c00-9840-965bbd2e1875-oauth-serving-cert\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.894531 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fc5147e7-834d-4c00-9840-965bbd2e1875-trusted-ca-bundle\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.894729 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fc5147e7-834d-4c00-9840-965bbd2e1875-console-config\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.899277 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fc5147e7-834d-4c00-9840-965bbd2e1875-console-oauth-config\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.907776 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fc5147e7-834d-4c00-9840-965bbd2e1875-console-serving-cert\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:13 crc kubenswrapper[4933]: I0122 05:58:13.909987 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pgrwt\" (UniqueName: \"kubernetes.io/projected/fc5147e7-834d-4c00-9840-965bbd2e1875-kube-api-access-pgrwt\") pod \"console-597779d89c-gf9ls\" (UID: \"fc5147e7-834d-4c00-9840-965bbd2e1875\") " pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:14 crc kubenswrapper[4933]: I0122 05:58:14.050834 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:14 crc kubenswrapper[4933]: I0122 05:58:14.093901 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/40102dae-8b84-4138-bd5d-4a3b7c1b3492-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-dtghh\" (UID: \"40102dae-8b84-4138-bd5d-4a3b7c1b3492\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-dtghh" Jan 22 05:58:14 crc kubenswrapper[4933]: I0122 05:58:14.099032 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/40102dae-8b84-4138-bd5d-4a3b7c1b3492-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-dtghh\" (UID: \"40102dae-8b84-4138-bd5d-4a3b7c1b3492\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-dtghh" Jan 22 05:58:14 crc kubenswrapper[4933]: I0122 05:58:14.180280 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-88t8r"] Jan 22 05:58:14 crc kubenswrapper[4933]: W0122 05:58:14.189313 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7f8f46df_3251_4fc5_87fb_10b877fe5878.slice/crio-0f5d4eeb09469ebcdf5686fae7f49677e2119910b72d364803e2616cc96a8262 WatchSource:0}: Error finding container 0f5d4eeb09469ebcdf5686fae7f49677e2119910b72d364803e2616cc96a8262: Status 404 returned error can't find the container with id 0f5d4eeb09469ebcdf5686fae7f49677e2119910b72d364803e2616cc96a8262 Jan 22 05:58:14 crc kubenswrapper[4933]: I0122 05:58:14.262981 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-597779d89c-gf9ls"] Jan 22 05:58:14 crc kubenswrapper[4933]: W0122 05:58:14.266664 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfc5147e7_834d_4c00_9840_965bbd2e1875.slice/crio-49ccc95cbda6d1f48044a05956582124b1b6b80c5352e05389bc22989a4ae834 WatchSource:0}: Error finding container 49ccc95cbda6d1f48044a05956582124b1b6b80c5352e05389bc22989a4ae834: Status 404 returned error can't find the container with id 49ccc95cbda6d1f48044a05956582124b1b6b80c5352e05389bc22989a4ae834 Jan 22 05:58:14 crc kubenswrapper[4933]: I0122 05:58:14.296933 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-9b5pb\" (UID: \"bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-9b5pb" Jan 22 05:58:14 crc kubenswrapper[4933]: I0122 05:58:14.306318 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-9b5pb\" (UID: \"bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-9b5pb" Jan 22 05:58:14 crc kubenswrapper[4933]: I0122 05:58:14.364586 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-dtghh" Jan 22 05:58:14 crc kubenswrapper[4933]: I0122 05:58:14.426625 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-88t8r" event={"ID":"7f8f46df-3251-4fc5-87fb-10b877fe5878","Type":"ContainerStarted","Data":"0f5d4eeb09469ebcdf5686fae7f49677e2119910b72d364803e2616cc96a8262"} Jan 22 05:58:14 crc kubenswrapper[4933]: I0122 05:58:14.428242 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-z4cfk" event={"ID":"95abb851-2f05-43e0-8c35-92027baf4a2c","Type":"ContainerStarted","Data":"9e7c5b0b04039d311db92f0faa371aa5ce7d928a1d6e38569faaee7360af47bb"} Jan 22 05:58:14 crc kubenswrapper[4933]: I0122 05:58:14.429691 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-597779d89c-gf9ls" event={"ID":"fc5147e7-834d-4c00-9840-965bbd2e1875","Type":"ContainerStarted","Data":"49ccc95cbda6d1f48044a05956582124b1b6b80c5352e05389bc22989a4ae834"} Jan 22 05:58:14 crc kubenswrapper[4933]: I0122 05:58:14.480686 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-9b5pb" Jan 22 05:58:14 crc kubenswrapper[4933]: I0122 05:58:14.626977 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-dtghh"] Jan 22 05:58:14 crc kubenswrapper[4933]: W0122 05:58:14.632142 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40102dae_8b84_4138_bd5d_4a3b7c1b3492.slice/crio-7712ee1114ce0db304848f9c9ca3714921abe03c06a5a448129129b06a764240 WatchSource:0}: Error finding container 7712ee1114ce0db304848f9c9ca3714921abe03c06a5a448129129b06a764240: Status 404 returned error can't find the container with id 7712ee1114ce0db304848f9c9ca3714921abe03c06a5a448129129b06a764240 Jan 22 05:58:14 crc kubenswrapper[4933]: I0122 05:58:14.706368 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-9b5pb"] Jan 22 05:58:15 crc kubenswrapper[4933]: I0122 05:58:15.436695 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-dtghh" event={"ID":"40102dae-8b84-4138-bd5d-4a3b7c1b3492","Type":"ContainerStarted","Data":"7712ee1114ce0db304848f9c9ca3714921abe03c06a5a448129129b06a764240"} Jan 22 05:58:15 crc kubenswrapper[4933]: I0122 05:58:15.443127 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-597779d89c-gf9ls" event={"ID":"fc5147e7-834d-4c00-9840-965bbd2e1875","Type":"ContainerStarted","Data":"f48c97dda485d6cf0ef002e8e66773e0fdcb66dbad34d0a43645cf8db1720a82"} Jan 22 05:58:15 crc kubenswrapper[4933]: I0122 05:58:15.447920 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-9b5pb" event={"ID":"bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0","Type":"ContainerStarted","Data":"89dae99ff8bd928009a226df4fabef6cd8b9f12be67ec4b848fdf4e0624774af"} Jan 22 05:58:17 crc kubenswrapper[4933]: I0122 05:58:17.469335 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-z4cfk" event={"ID":"95abb851-2f05-43e0-8c35-92027baf4a2c","Type":"ContainerStarted","Data":"002326abf7f08eb74eae830136a07113d46c4c71416d6124f12938625da96725"} Jan 22 05:58:17 crc kubenswrapper[4933]: I0122 05:58:17.469705 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-z4cfk" Jan 22 05:58:17 crc kubenswrapper[4933]: I0122 05:58:17.471279 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-dtghh" event={"ID":"40102dae-8b84-4138-bd5d-4a3b7c1b3492","Type":"ContainerStarted","Data":"4a0822a8ead93a70da0efcfbbf1d3834a8b6fb7accb3b18711c514d4e4e82c74"} Jan 22 05:58:17 crc kubenswrapper[4933]: I0122 05:58:17.471455 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-dtghh" Jan 22 05:58:17 crc kubenswrapper[4933]: I0122 05:58:17.473239 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-88t8r" event={"ID":"7f8f46df-3251-4fc5-87fb-10b877fe5878","Type":"ContainerStarted","Data":"cc9cba9087a05e6fb324d7d0fafa387f460f3c4e762cd5c2978b1a824e016a32"} Jan 22 05:58:17 crc kubenswrapper[4933]: I0122 05:58:17.475356 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-9b5pb" event={"ID":"bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0","Type":"ContainerStarted","Data":"d80d8c098da9a89c770210a49ad1102c31240b922b3e9534ceb6edc141f80b8e"} Jan 22 05:58:17 crc kubenswrapper[4933]: I0122 05:58:17.493450 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-z4cfk" podStartSLOduration=1.379256375 podStartE2EDuration="4.493407503s" podCreationTimestamp="2026-01-22 05:58:13 +0000 UTC" firstStartedPulling="2026-01-22 05:58:13.818879097 +0000 UTC m=+741.656004450" lastFinishedPulling="2026-01-22 05:58:16.933030215 +0000 UTC m=+744.770155578" observedRunningTime="2026-01-22 05:58:17.486186383 +0000 UTC m=+745.323311766" watchObservedRunningTime="2026-01-22 05:58:17.493407503 +0000 UTC m=+745.330532926" Jan 22 05:58:17 crc kubenswrapper[4933]: I0122 05:58:17.493733 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-597779d89c-gf9ls" podStartSLOduration=4.493712551 podStartE2EDuration="4.493712551s" podCreationTimestamp="2026-01-22 05:58:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:58:15.459835259 +0000 UTC m=+743.296960642" watchObservedRunningTime="2026-01-22 05:58:17.493712551 +0000 UTC m=+745.330837974" Jan 22 05:58:17 crc kubenswrapper[4933]: I0122 05:58:17.519376 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-9b5pb" podStartSLOduration=2.300596197 podStartE2EDuration="4.519350811s" podCreationTimestamp="2026-01-22 05:58:13 +0000 UTC" firstStartedPulling="2026-01-22 05:58:14.713492352 +0000 UTC m=+742.550617705" lastFinishedPulling="2026-01-22 05:58:16.932246966 +0000 UTC m=+744.769372319" observedRunningTime="2026-01-22 05:58:17.515662388 +0000 UTC m=+745.352787771" watchObservedRunningTime="2026-01-22 05:58:17.519350811 +0000 UTC m=+745.356476184" Jan 22 05:58:19 crc kubenswrapper[4933]: I0122 05:58:19.491264 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-88t8r" event={"ID":"7f8f46df-3251-4fc5-87fb-10b877fe5878","Type":"ContainerStarted","Data":"31543fca2cfde7c8013a2b8d7b404c5d56e45493fd83a7348bb4aa8e5c58f772"} Jan 22 05:58:19 crc kubenswrapper[4933]: I0122 05:58:19.518208 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-dtghh" podStartSLOduration=4.218723662 podStartE2EDuration="6.518190619s" podCreationTimestamp="2026-01-22 05:58:13 +0000 UTC" firstStartedPulling="2026-01-22 05:58:14.633660561 +0000 UTC m=+742.470785914" lastFinishedPulling="2026-01-22 05:58:16.933127518 +0000 UTC m=+744.770252871" observedRunningTime="2026-01-22 05:58:17.558297012 +0000 UTC m=+745.395422375" watchObservedRunningTime="2026-01-22 05:58:19.518190619 +0000 UTC m=+747.355315972" Jan 22 05:58:19 crc kubenswrapper[4933]: I0122 05:58:19.518625 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-88t8r" podStartSLOduration=1.458899913 podStartE2EDuration="6.51861918s" podCreationTimestamp="2026-01-22 05:58:13 +0000 UTC" firstStartedPulling="2026-01-22 05:58:14.192024615 +0000 UTC m=+742.029149968" lastFinishedPulling="2026-01-22 05:58:19.251743882 +0000 UTC m=+747.088869235" observedRunningTime="2026-01-22 05:58:19.514316982 +0000 UTC m=+747.351442365" watchObservedRunningTime="2026-01-22 05:58:19.51861918 +0000 UTC m=+747.355744533" Jan 22 05:58:23 crc kubenswrapper[4933]: I0122 05:58:23.828845 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-z4cfk" Jan 22 05:58:24 crc kubenswrapper[4933]: I0122 05:58:24.052505 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:24 crc kubenswrapper[4933]: I0122 05:58:24.052846 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:24 crc kubenswrapper[4933]: I0122 05:58:24.060540 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:24 crc kubenswrapper[4933]: I0122 05:58:24.528978 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-597779d89c-gf9ls" Jan 22 05:58:24 crc kubenswrapper[4933]: I0122 05:58:24.585043 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-gqpfp"] Jan 22 05:58:34 crc kubenswrapper[4933]: I0122 05:58:34.373812 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-dtghh" Jan 22 05:58:36 crc kubenswrapper[4933]: I0122 05:58:36.105296 4933 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 22 05:58:49 crc kubenswrapper[4933]: I0122 05:58:49.015958 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57"] Jan 22 05:58:49 crc kubenswrapper[4933]: I0122 05:58:49.029433 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57" Jan 22 05:58:49 crc kubenswrapper[4933]: I0122 05:58:49.034762 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 22 05:58:49 crc kubenswrapper[4933]: I0122 05:58:49.051421 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57"] Jan 22 05:58:49 crc kubenswrapper[4933]: I0122 05:58:49.096253 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d078e767-9507-4d8c-a559-3c9df7a99923-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57\" (UID: \"d078e767-9507-4d8c-a559-3c9df7a99923\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57" Jan 22 05:58:49 crc kubenswrapper[4933]: I0122 05:58:49.096549 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pcls2\" (UniqueName: \"kubernetes.io/projected/d078e767-9507-4d8c-a559-3c9df7a99923-kube-api-access-pcls2\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57\" (UID: \"d078e767-9507-4d8c-a559-3c9df7a99923\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57" Jan 22 05:58:49 crc kubenswrapper[4933]: I0122 05:58:49.096706 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d078e767-9507-4d8c-a559-3c9df7a99923-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57\" (UID: \"d078e767-9507-4d8c-a559-3c9df7a99923\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57" Jan 22 05:58:49 crc kubenswrapper[4933]: I0122 05:58:49.198449 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pcls2\" (UniqueName: \"kubernetes.io/projected/d078e767-9507-4d8c-a559-3c9df7a99923-kube-api-access-pcls2\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57\" (UID: \"d078e767-9507-4d8c-a559-3c9df7a99923\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57" Jan 22 05:58:49 crc kubenswrapper[4933]: I0122 05:58:49.198559 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d078e767-9507-4d8c-a559-3c9df7a99923-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57\" (UID: \"d078e767-9507-4d8c-a559-3c9df7a99923\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57" Jan 22 05:58:49 crc kubenswrapper[4933]: I0122 05:58:49.198652 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d078e767-9507-4d8c-a559-3c9df7a99923-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57\" (UID: \"d078e767-9507-4d8c-a559-3c9df7a99923\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57" Jan 22 05:58:49 crc kubenswrapper[4933]: I0122 05:58:49.199434 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d078e767-9507-4d8c-a559-3c9df7a99923-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57\" (UID: \"d078e767-9507-4d8c-a559-3c9df7a99923\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57" Jan 22 05:58:49 crc kubenswrapper[4933]: I0122 05:58:49.199453 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d078e767-9507-4d8c-a559-3c9df7a99923-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57\" (UID: \"d078e767-9507-4d8c-a559-3c9df7a99923\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57" Jan 22 05:58:49 crc kubenswrapper[4933]: I0122 05:58:49.234831 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pcls2\" (UniqueName: \"kubernetes.io/projected/d078e767-9507-4d8c-a559-3c9df7a99923-kube-api-access-pcls2\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57\" (UID: \"d078e767-9507-4d8c-a559-3c9df7a99923\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57" Jan 22 05:58:49 crc kubenswrapper[4933]: I0122 05:58:49.365029 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57" Jan 22 05:58:49 crc kubenswrapper[4933]: I0122 05:58:49.630555 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-gqpfp" podUID="63e15a9d-3476-43c1-93e0-6453f0fc9adb" containerName="console" containerID="cri-o://703beb457ce4279f36298587f57df0a2043207bb7d5012efd72b2fdcc10f0ed0" gracePeriod=15 Jan 22 05:58:49 crc kubenswrapper[4933]: I0122 05:58:49.817267 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57"] Jan 22 05:58:49 crc kubenswrapper[4933]: I0122 05:58:49.958937 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-gqpfp_63e15a9d-3476-43c1-93e0-6453f0fc9adb/console/0.log" Jan 22 05:58:49 crc kubenswrapper[4933]: I0122 05:58:49.959019 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.007932 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-trusted-ca-bundle\") pod \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.007980 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6x9zh\" (UniqueName: \"kubernetes.io/projected/63e15a9d-3476-43c1-93e0-6453f0fc9adb-kube-api-access-6x9zh\") pod \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.008034 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/63e15a9d-3476-43c1-93e0-6453f0fc9adb-console-serving-cert\") pod \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.008095 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-console-config\") pod \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.008150 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-service-ca\") pod \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.008166 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/63e15a9d-3476-43c1-93e0-6453f0fc9adb-console-oauth-config\") pod \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.008189 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-oauth-serving-cert\") pod \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\" (UID: \"63e15a9d-3476-43c1-93e0-6453f0fc9adb\") " Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.009026 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "63e15a9d-3476-43c1-93e0-6453f0fc9adb" (UID: "63e15a9d-3476-43c1-93e0-6453f0fc9adb"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.009448 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "63e15a9d-3476-43c1-93e0-6453f0fc9adb" (UID: "63e15a9d-3476-43c1-93e0-6453f0fc9adb"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.010358 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-console-config" (OuterVolumeSpecName: "console-config") pod "63e15a9d-3476-43c1-93e0-6453f0fc9adb" (UID: "63e15a9d-3476-43c1-93e0-6453f0fc9adb"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.010424 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-service-ca" (OuterVolumeSpecName: "service-ca") pod "63e15a9d-3476-43c1-93e0-6453f0fc9adb" (UID: "63e15a9d-3476-43c1-93e0-6453f0fc9adb"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.014921 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63e15a9d-3476-43c1-93e0-6453f0fc9adb-kube-api-access-6x9zh" (OuterVolumeSpecName: "kube-api-access-6x9zh") pod "63e15a9d-3476-43c1-93e0-6453f0fc9adb" (UID: "63e15a9d-3476-43c1-93e0-6453f0fc9adb"). InnerVolumeSpecName "kube-api-access-6x9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.015049 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63e15a9d-3476-43c1-93e0-6453f0fc9adb-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "63e15a9d-3476-43c1-93e0-6453f0fc9adb" (UID: "63e15a9d-3476-43c1-93e0-6453f0fc9adb"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.015366 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63e15a9d-3476-43c1-93e0-6453f0fc9adb-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "63e15a9d-3476-43c1-93e0-6453f0fc9adb" (UID: "63e15a9d-3476-43c1-93e0-6453f0fc9adb"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.109159 4933 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-console-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.109191 4933 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-service-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.109200 4933 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/63e15a9d-3476-43c1-93e0-6453f0fc9adb-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.109210 4933 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.109218 4933 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/63e15a9d-3476-43c1-93e0-6453f0fc9adb-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.109227 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6x9zh\" (UniqueName: \"kubernetes.io/projected/63e15a9d-3476-43c1-93e0-6453f0fc9adb-kube-api-access-6x9zh\") on node \"crc\" DevicePath \"\"" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.109238 4933 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/63e15a9d-3476-43c1-93e0-6453f0fc9adb-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.702199 4933 generic.go:334] "Generic (PLEG): container finished" podID="d078e767-9507-4d8c-a559-3c9df7a99923" containerID="741868d0088bb84348cd6e58bac081aa4905cbf604c0a6894ec21dcedc713afa" exitCode=0 Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.702247 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57" event={"ID":"d078e767-9507-4d8c-a559-3c9df7a99923","Type":"ContainerDied","Data":"741868d0088bb84348cd6e58bac081aa4905cbf604c0a6894ec21dcedc713afa"} Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.702508 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57" event={"ID":"d078e767-9507-4d8c-a559-3c9df7a99923","Type":"ContainerStarted","Data":"b6b20fd1cbb195cb12b0e44d9ec1c7541ccc3539691776b5f5d4c9e555cf23ab"} Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.703956 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-gqpfp_63e15a9d-3476-43c1-93e0-6453f0fc9adb/console/0.log" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.704001 4933 generic.go:334] "Generic (PLEG): container finished" podID="63e15a9d-3476-43c1-93e0-6453f0fc9adb" containerID="703beb457ce4279f36298587f57df0a2043207bb7d5012efd72b2fdcc10f0ed0" exitCode=2 Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.704020 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-gqpfp" event={"ID":"63e15a9d-3476-43c1-93e0-6453f0fc9adb","Type":"ContainerDied","Data":"703beb457ce4279f36298587f57df0a2043207bb7d5012efd72b2fdcc10f0ed0"} Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.704033 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-gqpfp" event={"ID":"63e15a9d-3476-43c1-93e0-6453f0fc9adb","Type":"ContainerDied","Data":"ccfc5285ff70506f1eaffb8a8c7cf6110ceff3040a35c9d5e1943feb1092ff66"} Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.704047 4933 scope.go:117] "RemoveContainer" containerID="703beb457ce4279f36298587f57df0a2043207bb7d5012efd72b2fdcc10f0ed0" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.704131 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-gqpfp" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.722252 4933 scope.go:117] "RemoveContainer" containerID="703beb457ce4279f36298587f57df0a2043207bb7d5012efd72b2fdcc10f0ed0" Jan 22 05:58:50 crc kubenswrapper[4933]: E0122 05:58:50.722646 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"703beb457ce4279f36298587f57df0a2043207bb7d5012efd72b2fdcc10f0ed0\": container with ID starting with 703beb457ce4279f36298587f57df0a2043207bb7d5012efd72b2fdcc10f0ed0 not found: ID does not exist" containerID="703beb457ce4279f36298587f57df0a2043207bb7d5012efd72b2fdcc10f0ed0" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.722707 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"703beb457ce4279f36298587f57df0a2043207bb7d5012efd72b2fdcc10f0ed0"} err="failed to get container status \"703beb457ce4279f36298587f57df0a2043207bb7d5012efd72b2fdcc10f0ed0\": rpc error: code = NotFound desc = could not find container \"703beb457ce4279f36298587f57df0a2043207bb7d5012efd72b2fdcc10f0ed0\": container with ID starting with 703beb457ce4279f36298587f57df0a2043207bb7d5012efd72b2fdcc10f0ed0 not found: ID does not exist" Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.743340 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-gqpfp"] Jan 22 05:58:50 crc kubenswrapper[4933]: I0122 05:58:50.746823 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-gqpfp"] Jan 22 05:58:51 crc kubenswrapper[4933]: I0122 05:58:51.358445 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wpnpq"] Jan 22 05:58:51 crc kubenswrapper[4933]: E0122 05:58:51.358807 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63e15a9d-3476-43c1-93e0-6453f0fc9adb" containerName="console" Jan 22 05:58:51 crc kubenswrapper[4933]: I0122 05:58:51.358829 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="63e15a9d-3476-43c1-93e0-6453f0fc9adb" containerName="console" Jan 22 05:58:51 crc kubenswrapper[4933]: I0122 05:58:51.359455 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="63e15a9d-3476-43c1-93e0-6453f0fc9adb" containerName="console" Jan 22 05:58:51 crc kubenswrapper[4933]: I0122 05:58:51.360823 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wpnpq" Jan 22 05:58:51 crc kubenswrapper[4933]: I0122 05:58:51.373874 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wpnpq"] Jan 22 05:58:51 crc kubenswrapper[4933]: I0122 05:58:51.425959 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpvzr\" (UniqueName: \"kubernetes.io/projected/f7afe775-7207-45aa-9570-ca9be0110d0f-kube-api-access-zpvzr\") pod \"redhat-operators-wpnpq\" (UID: \"f7afe775-7207-45aa-9570-ca9be0110d0f\") " pod="openshift-marketplace/redhat-operators-wpnpq" Jan 22 05:58:51 crc kubenswrapper[4933]: I0122 05:58:51.426046 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7afe775-7207-45aa-9570-ca9be0110d0f-catalog-content\") pod \"redhat-operators-wpnpq\" (UID: \"f7afe775-7207-45aa-9570-ca9be0110d0f\") " pod="openshift-marketplace/redhat-operators-wpnpq" Jan 22 05:58:51 crc kubenswrapper[4933]: I0122 05:58:51.426095 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7afe775-7207-45aa-9570-ca9be0110d0f-utilities\") pod \"redhat-operators-wpnpq\" (UID: \"f7afe775-7207-45aa-9570-ca9be0110d0f\") " pod="openshift-marketplace/redhat-operators-wpnpq" Jan 22 05:58:51 crc kubenswrapper[4933]: I0122 05:58:51.527064 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7afe775-7207-45aa-9570-ca9be0110d0f-catalog-content\") pod \"redhat-operators-wpnpq\" (UID: \"f7afe775-7207-45aa-9570-ca9be0110d0f\") " pod="openshift-marketplace/redhat-operators-wpnpq" Jan 22 05:58:51 crc kubenswrapper[4933]: I0122 05:58:51.527142 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7afe775-7207-45aa-9570-ca9be0110d0f-utilities\") pod \"redhat-operators-wpnpq\" (UID: \"f7afe775-7207-45aa-9570-ca9be0110d0f\") " pod="openshift-marketplace/redhat-operators-wpnpq" Jan 22 05:58:51 crc kubenswrapper[4933]: I0122 05:58:51.527228 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpvzr\" (UniqueName: \"kubernetes.io/projected/f7afe775-7207-45aa-9570-ca9be0110d0f-kube-api-access-zpvzr\") pod \"redhat-operators-wpnpq\" (UID: \"f7afe775-7207-45aa-9570-ca9be0110d0f\") " pod="openshift-marketplace/redhat-operators-wpnpq" Jan 22 05:58:51 crc kubenswrapper[4933]: I0122 05:58:51.527956 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7afe775-7207-45aa-9570-ca9be0110d0f-utilities\") pod \"redhat-operators-wpnpq\" (UID: \"f7afe775-7207-45aa-9570-ca9be0110d0f\") " pod="openshift-marketplace/redhat-operators-wpnpq" Jan 22 05:58:51 crc kubenswrapper[4933]: I0122 05:58:51.528332 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7afe775-7207-45aa-9570-ca9be0110d0f-catalog-content\") pod \"redhat-operators-wpnpq\" (UID: \"f7afe775-7207-45aa-9570-ca9be0110d0f\") " pod="openshift-marketplace/redhat-operators-wpnpq" Jan 22 05:58:51 crc kubenswrapper[4933]: I0122 05:58:51.550536 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpvzr\" (UniqueName: \"kubernetes.io/projected/f7afe775-7207-45aa-9570-ca9be0110d0f-kube-api-access-zpvzr\") pod \"redhat-operators-wpnpq\" (UID: \"f7afe775-7207-45aa-9570-ca9be0110d0f\") " pod="openshift-marketplace/redhat-operators-wpnpq" Jan 22 05:58:51 crc kubenswrapper[4933]: I0122 05:58:51.716815 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wpnpq" Jan 22 05:58:51 crc kubenswrapper[4933]: I0122 05:58:51.971221 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wpnpq"] Jan 22 05:58:52 crc kubenswrapper[4933]: I0122 05:58:52.509390 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63e15a9d-3476-43c1-93e0-6453f0fc9adb" path="/var/lib/kubelet/pods/63e15a9d-3476-43c1-93e0-6453f0fc9adb/volumes" Jan 22 05:58:52 crc kubenswrapper[4933]: I0122 05:58:52.722755 4933 generic.go:334] "Generic (PLEG): container finished" podID="f7afe775-7207-45aa-9570-ca9be0110d0f" containerID="e697021c9c202ab5df036bf2fd30ce83af1fb5bd275071bb8be97d0ad008b4d2" exitCode=0 Jan 22 05:58:52 crc kubenswrapper[4933]: I0122 05:58:52.722811 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wpnpq" event={"ID":"f7afe775-7207-45aa-9570-ca9be0110d0f","Type":"ContainerDied","Data":"e697021c9c202ab5df036bf2fd30ce83af1fb5bd275071bb8be97d0ad008b4d2"} Jan 22 05:58:52 crc kubenswrapper[4933]: I0122 05:58:52.722862 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wpnpq" event={"ID":"f7afe775-7207-45aa-9570-ca9be0110d0f","Type":"ContainerStarted","Data":"1af7848e95930994b4df8c10d945e4e9d8ecc33bab3a081e4e833a5b0f449552"} Jan 22 05:58:52 crc kubenswrapper[4933]: I0122 05:58:52.725300 4933 generic.go:334] "Generic (PLEG): container finished" podID="d078e767-9507-4d8c-a559-3c9df7a99923" containerID="34990b872a0c3abdbf054f3d304373d70472b1f425f2a209b932ac6d212c1895" exitCode=0 Jan 22 05:58:52 crc kubenswrapper[4933]: I0122 05:58:52.725346 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57" event={"ID":"d078e767-9507-4d8c-a559-3c9df7a99923","Type":"ContainerDied","Data":"34990b872a0c3abdbf054f3d304373d70472b1f425f2a209b932ac6d212c1895"} Jan 22 05:58:53 crc kubenswrapper[4933]: I0122 05:58:53.731613 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wpnpq" event={"ID":"f7afe775-7207-45aa-9570-ca9be0110d0f","Type":"ContainerStarted","Data":"9cf148680a9725e5046120b7c2937effa6b784044b8d30dfeb165584a71ff2a5"} Jan 22 05:58:53 crc kubenswrapper[4933]: I0122 05:58:53.733654 4933 generic.go:334] "Generic (PLEG): container finished" podID="d078e767-9507-4d8c-a559-3c9df7a99923" containerID="868e05f2446f2f653b5e276fb56c782c3aa5e49ab5deb30c8abd9b3570b39f59" exitCode=0 Jan 22 05:58:53 crc kubenswrapper[4933]: I0122 05:58:53.733692 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57" event={"ID":"d078e767-9507-4d8c-a559-3c9df7a99923","Type":"ContainerDied","Data":"868e05f2446f2f653b5e276fb56c782c3aa5e49ab5deb30c8abd9b3570b39f59"} Jan 22 05:58:54 crc kubenswrapper[4933]: I0122 05:58:54.744233 4933 generic.go:334] "Generic (PLEG): container finished" podID="f7afe775-7207-45aa-9570-ca9be0110d0f" containerID="9cf148680a9725e5046120b7c2937effa6b784044b8d30dfeb165584a71ff2a5" exitCode=0 Jan 22 05:58:54 crc kubenswrapper[4933]: I0122 05:58:54.744294 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wpnpq" event={"ID":"f7afe775-7207-45aa-9570-ca9be0110d0f","Type":"ContainerDied","Data":"9cf148680a9725e5046120b7c2937effa6b784044b8d30dfeb165584a71ff2a5"} Jan 22 05:58:55 crc kubenswrapper[4933]: I0122 05:58:55.033594 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57" Jan 22 05:58:55 crc kubenswrapper[4933]: I0122 05:58:55.074810 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcls2\" (UniqueName: \"kubernetes.io/projected/d078e767-9507-4d8c-a559-3c9df7a99923-kube-api-access-pcls2\") pod \"d078e767-9507-4d8c-a559-3c9df7a99923\" (UID: \"d078e767-9507-4d8c-a559-3c9df7a99923\") " Jan 22 05:58:55 crc kubenswrapper[4933]: I0122 05:58:55.074893 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d078e767-9507-4d8c-a559-3c9df7a99923-bundle\") pod \"d078e767-9507-4d8c-a559-3c9df7a99923\" (UID: \"d078e767-9507-4d8c-a559-3c9df7a99923\") " Jan 22 05:58:55 crc kubenswrapper[4933]: I0122 05:58:55.074941 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d078e767-9507-4d8c-a559-3c9df7a99923-util\") pod \"d078e767-9507-4d8c-a559-3c9df7a99923\" (UID: \"d078e767-9507-4d8c-a559-3c9df7a99923\") " Jan 22 05:58:55 crc kubenswrapper[4933]: I0122 05:58:55.077840 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d078e767-9507-4d8c-a559-3c9df7a99923-bundle" (OuterVolumeSpecName: "bundle") pod "d078e767-9507-4d8c-a559-3c9df7a99923" (UID: "d078e767-9507-4d8c-a559-3c9df7a99923"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:58:55 crc kubenswrapper[4933]: I0122 05:58:55.081827 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d078e767-9507-4d8c-a559-3c9df7a99923-kube-api-access-pcls2" (OuterVolumeSpecName: "kube-api-access-pcls2") pod "d078e767-9507-4d8c-a559-3c9df7a99923" (UID: "d078e767-9507-4d8c-a559-3c9df7a99923"). InnerVolumeSpecName "kube-api-access-pcls2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:58:55 crc kubenswrapper[4933]: I0122 05:58:55.089300 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d078e767-9507-4d8c-a559-3c9df7a99923-util" (OuterVolumeSpecName: "util") pod "d078e767-9507-4d8c-a559-3c9df7a99923" (UID: "d078e767-9507-4d8c-a559-3c9df7a99923"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:58:55 crc kubenswrapper[4933]: I0122 05:58:55.175723 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcls2\" (UniqueName: \"kubernetes.io/projected/d078e767-9507-4d8c-a559-3c9df7a99923-kube-api-access-pcls2\") on node \"crc\" DevicePath \"\"" Jan 22 05:58:55 crc kubenswrapper[4933]: I0122 05:58:55.175752 4933 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d078e767-9507-4d8c-a559-3c9df7a99923-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:58:55 crc kubenswrapper[4933]: I0122 05:58:55.175764 4933 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d078e767-9507-4d8c-a559-3c9df7a99923-util\") on node \"crc\" DevicePath \"\"" Jan 22 05:58:55 crc kubenswrapper[4933]: I0122 05:58:55.758744 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57" event={"ID":"d078e767-9507-4d8c-a559-3c9df7a99923","Type":"ContainerDied","Data":"b6b20fd1cbb195cb12b0e44d9ec1c7541ccc3539691776b5f5d4c9e555cf23ab"} Jan 22 05:58:55 crc kubenswrapper[4933]: I0122 05:58:55.759183 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b6b20fd1cbb195cb12b0e44d9ec1c7541ccc3539691776b5f5d4c9e555cf23ab" Jan 22 05:58:55 crc kubenswrapper[4933]: I0122 05:58:55.758888 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57" Jan 22 05:58:56 crc kubenswrapper[4933]: I0122 05:58:56.768697 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wpnpq" event={"ID":"f7afe775-7207-45aa-9570-ca9be0110d0f","Type":"ContainerStarted","Data":"ecfd452410c4d777c0a7faa1777dbe4f45148f0fdb9fd2863bb3a6a0126a82c3"} Jan 22 05:58:56 crc kubenswrapper[4933]: I0122 05:58:56.798422 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wpnpq" podStartSLOduration=3.142514454 podStartE2EDuration="5.798406871s" podCreationTimestamp="2026-01-22 05:58:51 +0000 UTC" firstStartedPulling="2026-01-22 05:58:52.725285763 +0000 UTC m=+780.562411136" lastFinishedPulling="2026-01-22 05:58:55.38117816 +0000 UTC m=+783.218303553" observedRunningTime="2026-01-22 05:58:56.797307384 +0000 UTC m=+784.634432767" watchObservedRunningTime="2026-01-22 05:58:56.798406871 +0000 UTC m=+784.635532234" Jan 22 05:59:01 crc kubenswrapper[4933]: I0122 05:59:01.717550 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wpnpq" Jan 22 05:59:01 crc kubenswrapper[4933]: I0122 05:59:01.717908 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wpnpq" Jan 22 05:59:02 crc kubenswrapper[4933]: I0122 05:59:02.778832 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wpnpq" podUID="f7afe775-7207-45aa-9570-ca9be0110d0f" containerName="registry-server" probeResult="failure" output=< Jan 22 05:59:02 crc kubenswrapper[4933]: timeout: failed to connect service ":50051" within 1s Jan 22 05:59:02 crc kubenswrapper[4933]: > Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.379550 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-6c8b57987c-qr4jc"] Jan 22 05:59:04 crc kubenswrapper[4933]: E0122 05:59:04.380387 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d078e767-9507-4d8c-a559-3c9df7a99923" containerName="util" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.380408 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d078e767-9507-4d8c-a559-3c9df7a99923" containerName="util" Jan 22 05:59:04 crc kubenswrapper[4933]: E0122 05:59:04.380428 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d078e767-9507-4d8c-a559-3c9df7a99923" containerName="extract" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.380439 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d078e767-9507-4d8c-a559-3c9df7a99923" containerName="extract" Jan 22 05:59:04 crc kubenswrapper[4933]: E0122 05:59:04.380459 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d078e767-9507-4d8c-a559-3c9df7a99923" containerName="pull" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.380470 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d078e767-9507-4d8c-a559-3c9df7a99923" containerName="pull" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.380639 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="d078e767-9507-4d8c-a559-3c9df7a99923" containerName="extract" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.381421 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6c8b57987c-qr4jc" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.383906 4933 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.385013 4933 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.385363 4933 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-scmtx" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.385720 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.385947 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.399114 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6c8b57987c-qr4jc"] Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.496825 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rbjx\" (UniqueName: \"kubernetes.io/projected/9f8987dd-79ca-4569-8000-088df75be06e-kube-api-access-2rbjx\") pod \"metallb-operator-controller-manager-6c8b57987c-qr4jc\" (UID: \"9f8987dd-79ca-4569-8000-088df75be06e\") " pod="metallb-system/metallb-operator-controller-manager-6c8b57987c-qr4jc" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.496921 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9f8987dd-79ca-4569-8000-088df75be06e-webhook-cert\") pod \"metallb-operator-controller-manager-6c8b57987c-qr4jc\" (UID: \"9f8987dd-79ca-4569-8000-088df75be06e\") " pod="metallb-system/metallb-operator-controller-manager-6c8b57987c-qr4jc" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.496942 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9f8987dd-79ca-4569-8000-088df75be06e-apiservice-cert\") pod \"metallb-operator-controller-manager-6c8b57987c-qr4jc\" (UID: \"9f8987dd-79ca-4569-8000-088df75be06e\") " pod="metallb-system/metallb-operator-controller-manager-6c8b57987c-qr4jc" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.598376 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9f8987dd-79ca-4569-8000-088df75be06e-webhook-cert\") pod \"metallb-operator-controller-manager-6c8b57987c-qr4jc\" (UID: \"9f8987dd-79ca-4569-8000-088df75be06e\") " pod="metallb-system/metallb-operator-controller-manager-6c8b57987c-qr4jc" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.598446 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9f8987dd-79ca-4569-8000-088df75be06e-apiservice-cert\") pod \"metallb-operator-controller-manager-6c8b57987c-qr4jc\" (UID: \"9f8987dd-79ca-4569-8000-088df75be06e\") " pod="metallb-system/metallb-operator-controller-manager-6c8b57987c-qr4jc" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.598515 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rbjx\" (UniqueName: \"kubernetes.io/projected/9f8987dd-79ca-4569-8000-088df75be06e-kube-api-access-2rbjx\") pod \"metallb-operator-controller-manager-6c8b57987c-qr4jc\" (UID: \"9f8987dd-79ca-4569-8000-088df75be06e\") " pod="metallb-system/metallb-operator-controller-manager-6c8b57987c-qr4jc" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.609978 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9f8987dd-79ca-4569-8000-088df75be06e-webhook-cert\") pod \"metallb-operator-controller-manager-6c8b57987c-qr4jc\" (UID: \"9f8987dd-79ca-4569-8000-088df75be06e\") " pod="metallb-system/metallb-operator-controller-manager-6c8b57987c-qr4jc" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.618945 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rbjx\" (UniqueName: \"kubernetes.io/projected/9f8987dd-79ca-4569-8000-088df75be06e-kube-api-access-2rbjx\") pod \"metallb-operator-controller-manager-6c8b57987c-qr4jc\" (UID: \"9f8987dd-79ca-4569-8000-088df75be06e\") " pod="metallb-system/metallb-operator-controller-manager-6c8b57987c-qr4jc" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.619564 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9f8987dd-79ca-4569-8000-088df75be06e-apiservice-cert\") pod \"metallb-operator-controller-manager-6c8b57987c-qr4jc\" (UID: \"9f8987dd-79ca-4569-8000-088df75be06e\") " pod="metallb-system/metallb-operator-controller-manager-6c8b57987c-qr4jc" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.699501 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6c8b57987c-qr4jc" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.708722 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-688dc4b4d8-9z74g"] Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.709527 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-688dc4b4d8-9z74g" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.711768 4933 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.712755 4933 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-z64tv" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.714204 4933 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.724654 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-688dc4b4d8-9z74g"] Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.805810 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fe7410b0-e355-498e-841e-89ae7f5e56de-apiservice-cert\") pod \"metallb-operator-webhook-server-688dc4b4d8-9z74g\" (UID: \"fe7410b0-e355-498e-841e-89ae7f5e56de\") " pod="metallb-system/metallb-operator-webhook-server-688dc4b4d8-9z74g" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.805860 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8hhx\" (UniqueName: \"kubernetes.io/projected/fe7410b0-e355-498e-841e-89ae7f5e56de-kube-api-access-l8hhx\") pod \"metallb-operator-webhook-server-688dc4b4d8-9z74g\" (UID: \"fe7410b0-e355-498e-841e-89ae7f5e56de\") " pod="metallb-system/metallb-operator-webhook-server-688dc4b4d8-9z74g" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.805939 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fe7410b0-e355-498e-841e-89ae7f5e56de-webhook-cert\") pod \"metallb-operator-webhook-server-688dc4b4d8-9z74g\" (UID: \"fe7410b0-e355-498e-841e-89ae7f5e56de\") " pod="metallb-system/metallb-operator-webhook-server-688dc4b4d8-9z74g" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.909262 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fe7410b0-e355-498e-841e-89ae7f5e56de-apiservice-cert\") pod \"metallb-operator-webhook-server-688dc4b4d8-9z74g\" (UID: \"fe7410b0-e355-498e-841e-89ae7f5e56de\") " pod="metallb-system/metallb-operator-webhook-server-688dc4b4d8-9z74g" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.909580 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8hhx\" (UniqueName: \"kubernetes.io/projected/fe7410b0-e355-498e-841e-89ae7f5e56de-kube-api-access-l8hhx\") pod \"metallb-operator-webhook-server-688dc4b4d8-9z74g\" (UID: \"fe7410b0-e355-498e-841e-89ae7f5e56de\") " pod="metallb-system/metallb-operator-webhook-server-688dc4b4d8-9z74g" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.909661 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fe7410b0-e355-498e-841e-89ae7f5e56de-webhook-cert\") pod \"metallb-operator-webhook-server-688dc4b4d8-9z74g\" (UID: \"fe7410b0-e355-498e-841e-89ae7f5e56de\") " pod="metallb-system/metallb-operator-webhook-server-688dc4b4d8-9z74g" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.917919 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fe7410b0-e355-498e-841e-89ae7f5e56de-apiservice-cert\") pod \"metallb-operator-webhook-server-688dc4b4d8-9z74g\" (UID: \"fe7410b0-e355-498e-841e-89ae7f5e56de\") " pod="metallb-system/metallb-operator-webhook-server-688dc4b4d8-9z74g" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.919001 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fe7410b0-e355-498e-841e-89ae7f5e56de-webhook-cert\") pod \"metallb-operator-webhook-server-688dc4b4d8-9z74g\" (UID: \"fe7410b0-e355-498e-841e-89ae7f5e56de\") " pod="metallb-system/metallb-operator-webhook-server-688dc4b4d8-9z74g" Jan 22 05:59:04 crc kubenswrapper[4933]: I0122 05:59:04.927526 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8hhx\" (UniqueName: \"kubernetes.io/projected/fe7410b0-e355-498e-841e-89ae7f5e56de-kube-api-access-l8hhx\") pod \"metallb-operator-webhook-server-688dc4b4d8-9z74g\" (UID: \"fe7410b0-e355-498e-841e-89ae7f5e56de\") " pod="metallb-system/metallb-operator-webhook-server-688dc4b4d8-9z74g" Jan 22 05:59:05 crc kubenswrapper[4933]: I0122 05:59:05.021178 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6c8b57987c-qr4jc"] Jan 22 05:59:05 crc kubenswrapper[4933]: W0122 05:59:05.023702 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9f8987dd_79ca_4569_8000_088df75be06e.slice/crio-aa5f05046eaaaad7eb6feaecbebf882e7646ca0b8d755f7177dc300b1a91f869 WatchSource:0}: Error finding container aa5f05046eaaaad7eb6feaecbebf882e7646ca0b8d755f7177dc300b1a91f869: Status 404 returned error can't find the container with id aa5f05046eaaaad7eb6feaecbebf882e7646ca0b8d755f7177dc300b1a91f869 Jan 22 05:59:05 crc kubenswrapper[4933]: I0122 05:59:05.054935 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-688dc4b4d8-9z74g" Jan 22 05:59:05 crc kubenswrapper[4933]: I0122 05:59:05.330222 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-688dc4b4d8-9z74g"] Jan 22 05:59:05 crc kubenswrapper[4933]: W0122 05:59:05.342582 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfe7410b0_e355_498e_841e_89ae7f5e56de.slice/crio-0779ce1419b9608e0c59af4e55c47e97768b08f247932f52a6c13f101e5f0215 WatchSource:0}: Error finding container 0779ce1419b9608e0c59af4e55c47e97768b08f247932f52a6c13f101e5f0215: Status 404 returned error can't find the container with id 0779ce1419b9608e0c59af4e55c47e97768b08f247932f52a6c13f101e5f0215 Jan 22 05:59:05 crc kubenswrapper[4933]: I0122 05:59:05.826802 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6c8b57987c-qr4jc" event={"ID":"9f8987dd-79ca-4569-8000-088df75be06e","Type":"ContainerStarted","Data":"aa5f05046eaaaad7eb6feaecbebf882e7646ca0b8d755f7177dc300b1a91f869"} Jan 22 05:59:05 crc kubenswrapper[4933]: I0122 05:59:05.828519 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-688dc4b4d8-9z74g" event={"ID":"fe7410b0-e355-498e-841e-89ae7f5e56de","Type":"ContainerStarted","Data":"0779ce1419b9608e0c59af4e55c47e97768b08f247932f52a6c13f101e5f0215"} Jan 22 05:59:10 crc kubenswrapper[4933]: I0122 05:59:10.943256 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:59:10 crc kubenswrapper[4933]: I0122 05:59:10.944144 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:59:11 crc kubenswrapper[4933]: I0122 05:59:11.778042 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wpnpq" Jan 22 05:59:11 crc kubenswrapper[4933]: I0122 05:59:11.833842 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wpnpq" Jan 22 05:59:11 crc kubenswrapper[4933]: I0122 05:59:11.865366 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-688dc4b4d8-9z74g" event={"ID":"fe7410b0-e355-498e-841e-89ae7f5e56de","Type":"ContainerStarted","Data":"a33eb8f8a1e75b4f826e1d6c5d9e264fb46bef662431f3294cf124ee96c5bd52"} Jan 22 05:59:11 crc kubenswrapper[4933]: I0122 05:59:11.865454 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-688dc4b4d8-9z74g" Jan 22 05:59:11 crc kubenswrapper[4933]: I0122 05:59:11.867541 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6c8b57987c-qr4jc" event={"ID":"9f8987dd-79ca-4569-8000-088df75be06e","Type":"ContainerStarted","Data":"3d96c3c79648343fbf2a9a7c4f61a73369b01c3a273a43f9f17e47cd7c1abdf0"} Jan 22 05:59:11 crc kubenswrapper[4933]: I0122 05:59:11.896600 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-688dc4b4d8-9z74g" podStartSLOduration=2.328290082 podStartE2EDuration="7.896575036s" podCreationTimestamp="2026-01-22 05:59:04 +0000 UTC" firstStartedPulling="2026-01-22 05:59:05.344487982 +0000 UTC m=+793.181613335" lastFinishedPulling="2026-01-22 05:59:10.912772936 +0000 UTC m=+798.749898289" observedRunningTime="2026-01-22 05:59:11.887753906 +0000 UTC m=+799.724879289" watchObservedRunningTime="2026-01-22 05:59:11.896575036 +0000 UTC m=+799.733700409" Jan 22 05:59:11 crc kubenswrapper[4933]: I0122 05:59:11.941423 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-6c8b57987c-qr4jc" podStartSLOduration=2.058735478 podStartE2EDuration="7.941406244s" podCreationTimestamp="2026-01-22 05:59:04 +0000 UTC" firstStartedPulling="2026-01-22 05:59:05.025531356 +0000 UTC m=+792.862656719" lastFinishedPulling="2026-01-22 05:59:10.908202132 +0000 UTC m=+798.745327485" observedRunningTime="2026-01-22 05:59:11.938811019 +0000 UTC m=+799.775936402" watchObservedRunningTime="2026-01-22 05:59:11.941406244 +0000 UTC m=+799.778531607" Jan 22 05:59:12 crc kubenswrapper[4933]: I0122 05:59:12.025196 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wpnpq"] Jan 22 05:59:12 crc kubenswrapper[4933]: I0122 05:59:12.872768 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wpnpq" podUID="f7afe775-7207-45aa-9570-ca9be0110d0f" containerName="registry-server" containerID="cri-o://ecfd452410c4d777c0a7faa1777dbe4f45148f0fdb9fd2863bb3a6a0126a82c3" gracePeriod=2 Jan 22 05:59:12 crc kubenswrapper[4933]: I0122 05:59:12.873173 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6c8b57987c-qr4jc" Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.280247 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wpnpq" Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.479039 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7afe775-7207-45aa-9570-ca9be0110d0f-utilities\") pod \"f7afe775-7207-45aa-9570-ca9be0110d0f\" (UID: \"f7afe775-7207-45aa-9570-ca9be0110d0f\") " Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.479097 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7afe775-7207-45aa-9570-ca9be0110d0f-catalog-content\") pod \"f7afe775-7207-45aa-9570-ca9be0110d0f\" (UID: \"f7afe775-7207-45aa-9570-ca9be0110d0f\") " Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.479152 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zpvzr\" (UniqueName: \"kubernetes.io/projected/f7afe775-7207-45aa-9570-ca9be0110d0f-kube-api-access-zpvzr\") pod \"f7afe775-7207-45aa-9570-ca9be0110d0f\" (UID: \"f7afe775-7207-45aa-9570-ca9be0110d0f\") " Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.480649 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7afe775-7207-45aa-9570-ca9be0110d0f-utilities" (OuterVolumeSpecName: "utilities") pod "f7afe775-7207-45aa-9570-ca9be0110d0f" (UID: "f7afe775-7207-45aa-9570-ca9be0110d0f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.494309 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7afe775-7207-45aa-9570-ca9be0110d0f-kube-api-access-zpvzr" (OuterVolumeSpecName: "kube-api-access-zpvzr") pod "f7afe775-7207-45aa-9570-ca9be0110d0f" (UID: "f7afe775-7207-45aa-9570-ca9be0110d0f"). InnerVolumeSpecName "kube-api-access-zpvzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.580151 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7afe775-7207-45aa-9570-ca9be0110d0f-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.580192 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zpvzr\" (UniqueName: \"kubernetes.io/projected/f7afe775-7207-45aa-9570-ca9be0110d0f-kube-api-access-zpvzr\") on node \"crc\" DevicePath \"\"" Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.601501 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7afe775-7207-45aa-9570-ca9be0110d0f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f7afe775-7207-45aa-9570-ca9be0110d0f" (UID: "f7afe775-7207-45aa-9570-ca9be0110d0f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.681048 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7afe775-7207-45aa-9570-ca9be0110d0f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.881594 4933 generic.go:334] "Generic (PLEG): container finished" podID="f7afe775-7207-45aa-9570-ca9be0110d0f" containerID="ecfd452410c4d777c0a7faa1777dbe4f45148f0fdb9fd2863bb3a6a0126a82c3" exitCode=0 Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.881641 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wpnpq" event={"ID":"f7afe775-7207-45aa-9570-ca9be0110d0f","Type":"ContainerDied","Data":"ecfd452410c4d777c0a7faa1777dbe4f45148f0fdb9fd2863bb3a6a0126a82c3"} Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.881703 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wpnpq" event={"ID":"f7afe775-7207-45aa-9570-ca9be0110d0f","Type":"ContainerDied","Data":"1af7848e95930994b4df8c10d945e4e9d8ecc33bab3a081e4e833a5b0f449552"} Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.881700 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wpnpq" Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.881784 4933 scope.go:117] "RemoveContainer" containerID="ecfd452410c4d777c0a7faa1777dbe4f45148f0fdb9fd2863bb3a6a0126a82c3" Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.912297 4933 scope.go:117] "RemoveContainer" containerID="9cf148680a9725e5046120b7c2937effa6b784044b8d30dfeb165584a71ff2a5" Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.926294 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wpnpq"] Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.931502 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wpnpq"] Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.937940 4933 scope.go:117] "RemoveContainer" containerID="e697021c9c202ab5df036bf2fd30ce83af1fb5bd275071bb8be97d0ad008b4d2" Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.963136 4933 scope.go:117] "RemoveContainer" containerID="ecfd452410c4d777c0a7faa1777dbe4f45148f0fdb9fd2863bb3a6a0126a82c3" Jan 22 05:59:13 crc kubenswrapper[4933]: E0122 05:59:13.963550 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ecfd452410c4d777c0a7faa1777dbe4f45148f0fdb9fd2863bb3a6a0126a82c3\": container with ID starting with ecfd452410c4d777c0a7faa1777dbe4f45148f0fdb9fd2863bb3a6a0126a82c3 not found: ID does not exist" containerID="ecfd452410c4d777c0a7faa1777dbe4f45148f0fdb9fd2863bb3a6a0126a82c3" Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.963590 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecfd452410c4d777c0a7faa1777dbe4f45148f0fdb9fd2863bb3a6a0126a82c3"} err="failed to get container status \"ecfd452410c4d777c0a7faa1777dbe4f45148f0fdb9fd2863bb3a6a0126a82c3\": rpc error: code = NotFound desc = could not find container \"ecfd452410c4d777c0a7faa1777dbe4f45148f0fdb9fd2863bb3a6a0126a82c3\": container with ID starting with ecfd452410c4d777c0a7faa1777dbe4f45148f0fdb9fd2863bb3a6a0126a82c3 not found: ID does not exist" Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.963614 4933 scope.go:117] "RemoveContainer" containerID="9cf148680a9725e5046120b7c2937effa6b784044b8d30dfeb165584a71ff2a5" Jan 22 05:59:13 crc kubenswrapper[4933]: E0122 05:59:13.963825 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9cf148680a9725e5046120b7c2937effa6b784044b8d30dfeb165584a71ff2a5\": container with ID starting with 9cf148680a9725e5046120b7c2937effa6b784044b8d30dfeb165584a71ff2a5 not found: ID does not exist" containerID="9cf148680a9725e5046120b7c2937effa6b784044b8d30dfeb165584a71ff2a5" Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.963853 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9cf148680a9725e5046120b7c2937effa6b784044b8d30dfeb165584a71ff2a5"} err="failed to get container status \"9cf148680a9725e5046120b7c2937effa6b784044b8d30dfeb165584a71ff2a5\": rpc error: code = NotFound desc = could not find container \"9cf148680a9725e5046120b7c2937effa6b784044b8d30dfeb165584a71ff2a5\": container with ID starting with 9cf148680a9725e5046120b7c2937effa6b784044b8d30dfeb165584a71ff2a5 not found: ID does not exist" Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.963868 4933 scope.go:117] "RemoveContainer" containerID="e697021c9c202ab5df036bf2fd30ce83af1fb5bd275071bb8be97d0ad008b4d2" Jan 22 05:59:13 crc kubenswrapper[4933]: E0122 05:59:13.964059 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e697021c9c202ab5df036bf2fd30ce83af1fb5bd275071bb8be97d0ad008b4d2\": container with ID starting with e697021c9c202ab5df036bf2fd30ce83af1fb5bd275071bb8be97d0ad008b4d2 not found: ID does not exist" containerID="e697021c9c202ab5df036bf2fd30ce83af1fb5bd275071bb8be97d0ad008b4d2" Jan 22 05:59:13 crc kubenswrapper[4933]: I0122 05:59:13.964101 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e697021c9c202ab5df036bf2fd30ce83af1fb5bd275071bb8be97d0ad008b4d2"} err="failed to get container status \"e697021c9c202ab5df036bf2fd30ce83af1fb5bd275071bb8be97d0ad008b4d2\": rpc error: code = NotFound desc = could not find container \"e697021c9c202ab5df036bf2fd30ce83af1fb5bd275071bb8be97d0ad008b4d2\": container with ID starting with e697021c9c202ab5df036bf2fd30ce83af1fb5bd275071bb8be97d0ad008b4d2 not found: ID does not exist" Jan 22 05:59:14 crc kubenswrapper[4933]: I0122 05:59:14.496901 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7afe775-7207-45aa-9570-ca9be0110d0f" path="/var/lib/kubelet/pods/f7afe775-7207-45aa-9570-ca9be0110d0f/volumes" Jan 22 05:59:25 crc kubenswrapper[4933]: I0122 05:59:25.061273 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-688dc4b4d8-9z74g" Jan 22 05:59:40 crc kubenswrapper[4933]: I0122 05:59:40.942928 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:59:40 crc kubenswrapper[4933]: I0122 05:59:40.943568 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:59:44 crc kubenswrapper[4933]: I0122 05:59:44.703506 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6c8b57987c-qr4jc" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.519105 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-tgvxb"] Jan 22 05:59:45 crc kubenswrapper[4933]: E0122 05:59:45.519466 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7afe775-7207-45aa-9570-ca9be0110d0f" containerName="extract-content" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.519493 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7afe775-7207-45aa-9570-ca9be0110d0f" containerName="extract-content" Jan 22 05:59:45 crc kubenswrapper[4933]: E0122 05:59:45.519525 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7afe775-7207-45aa-9570-ca9be0110d0f" containerName="registry-server" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.519537 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7afe775-7207-45aa-9570-ca9be0110d0f" containerName="registry-server" Jan 22 05:59:45 crc kubenswrapper[4933]: E0122 05:59:45.519555 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7afe775-7207-45aa-9570-ca9be0110d0f" containerName="extract-utilities" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.519568 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7afe775-7207-45aa-9570-ca9be0110d0f" containerName="extract-utilities" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.519745 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7afe775-7207-45aa-9570-ca9be0110d0f" containerName="registry-server" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.522691 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.530454 4933 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-5lsxw" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.530487 4933 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.530484 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.536245 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-rjx6j"] Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.537177 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rjx6j" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.539504 4933 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.546363 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-rjx6j"] Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.620525 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-xlk7v"] Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.621587 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-xlk7v" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.624296 4933 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.624312 4933 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-6l42n" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.624341 4933 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.627374 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.632763 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-2stg8"] Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.633545 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-2stg8" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.635355 4933 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.639680 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-2stg8"] Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.642790 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pftp\" (UniqueName: \"kubernetes.io/projected/f619cfe1-61b2-4726-a08f-b41cc24ae488-kube-api-access-6pftp\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.642844 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/f619cfe1-61b2-4726-a08f-b41cc24ae488-metrics\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.642895 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5krx\" (UniqueName: \"kubernetes.io/projected/42d0dcd8-97a6-489b-9b19-43fd22936816-kube-api-access-d5krx\") pod \"frr-k8s-webhook-server-7df86c4f6c-rjx6j\" (UID: \"42d0dcd8-97a6-489b-9b19-43fd22936816\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rjx6j" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.642934 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/42d0dcd8-97a6-489b-9b19-43fd22936816-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-rjx6j\" (UID: \"42d0dcd8-97a6-489b-9b19-43fd22936816\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rjx6j" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.642952 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/f619cfe1-61b2-4726-a08f-b41cc24ae488-frr-sockets\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.642967 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/f619cfe1-61b2-4726-a08f-b41cc24ae488-frr-startup\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.643003 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/f619cfe1-61b2-4726-a08f-b41cc24ae488-frr-conf\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.643048 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f619cfe1-61b2-4726-a08f-b41cc24ae488-metrics-certs\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.643124 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/f619cfe1-61b2-4726-a08f-b41cc24ae488-reloader\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.744188 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/f619cfe1-61b2-4726-a08f-b41cc24ae488-frr-conf\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.744234 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/29f4dd7e-c2cf-4f64-87e8-2201fe99e751-metrics-certs\") pod \"controller-6968d8fdc4-2stg8\" (UID: \"29f4dd7e-c2cf-4f64-87e8-2201fe99e751\") " pod="metallb-system/controller-6968d8fdc4-2stg8" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.744265 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f619cfe1-61b2-4726-a08f-b41cc24ae488-metrics-certs\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.744305 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/f619cfe1-61b2-4726-a08f-b41cc24ae488-reloader\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.744321 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/29f4dd7e-c2cf-4f64-87e8-2201fe99e751-cert\") pod \"controller-6968d8fdc4-2stg8\" (UID: \"29f4dd7e-c2cf-4f64-87e8-2201fe99e751\") " pod="metallb-system/controller-6968d8fdc4-2stg8" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.744350 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/57ec97d6-d16e-4069-98cc-7dcf56910fad-metrics-certs\") pod \"speaker-xlk7v\" (UID: \"57ec97d6-d16e-4069-98cc-7dcf56910fad\") " pod="metallb-system/speaker-xlk7v" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.744366 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pftp\" (UniqueName: \"kubernetes.io/projected/f619cfe1-61b2-4726-a08f-b41cc24ae488-kube-api-access-6pftp\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.744385 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/f619cfe1-61b2-4726-a08f-b41cc24ae488-metrics\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.744403 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/57ec97d6-d16e-4069-98cc-7dcf56910fad-metallb-excludel2\") pod \"speaker-xlk7v\" (UID: \"57ec97d6-d16e-4069-98cc-7dcf56910fad\") " pod="metallb-system/speaker-xlk7v" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.744428 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hss4f\" (UniqueName: \"kubernetes.io/projected/57ec97d6-d16e-4069-98cc-7dcf56910fad-kube-api-access-hss4f\") pod \"speaker-xlk7v\" (UID: \"57ec97d6-d16e-4069-98cc-7dcf56910fad\") " pod="metallb-system/speaker-xlk7v" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.744448 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/57ec97d6-d16e-4069-98cc-7dcf56910fad-memberlist\") pod \"speaker-xlk7v\" (UID: \"57ec97d6-d16e-4069-98cc-7dcf56910fad\") " pod="metallb-system/speaker-xlk7v" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.744469 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5krx\" (UniqueName: \"kubernetes.io/projected/42d0dcd8-97a6-489b-9b19-43fd22936816-kube-api-access-d5krx\") pod \"frr-k8s-webhook-server-7df86c4f6c-rjx6j\" (UID: \"42d0dcd8-97a6-489b-9b19-43fd22936816\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rjx6j" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.744489 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6gks\" (UniqueName: \"kubernetes.io/projected/29f4dd7e-c2cf-4f64-87e8-2201fe99e751-kube-api-access-l6gks\") pod \"controller-6968d8fdc4-2stg8\" (UID: \"29f4dd7e-c2cf-4f64-87e8-2201fe99e751\") " pod="metallb-system/controller-6968d8fdc4-2stg8" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.744505 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/42d0dcd8-97a6-489b-9b19-43fd22936816-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-rjx6j\" (UID: \"42d0dcd8-97a6-489b-9b19-43fd22936816\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rjx6j" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.744522 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/f619cfe1-61b2-4726-a08f-b41cc24ae488-frr-sockets\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.744539 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/f619cfe1-61b2-4726-a08f-b41cc24ae488-frr-startup\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.744615 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/f619cfe1-61b2-4726-a08f-b41cc24ae488-frr-conf\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.744812 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/f619cfe1-61b2-4726-a08f-b41cc24ae488-metrics\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.745323 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/f619cfe1-61b2-4726-a08f-b41cc24ae488-frr-startup\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: E0122 05:59:45.745487 4933 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.745543 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/f619cfe1-61b2-4726-a08f-b41cc24ae488-frr-sockets\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: E0122 05:59:45.745558 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/42d0dcd8-97a6-489b-9b19-43fd22936816-cert podName:42d0dcd8-97a6-489b-9b19-43fd22936816 nodeName:}" failed. No retries permitted until 2026-01-22 05:59:46.245533411 +0000 UTC m=+834.082658804 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/42d0dcd8-97a6-489b-9b19-43fd22936816-cert") pod "frr-k8s-webhook-server-7df86c4f6c-rjx6j" (UID: "42d0dcd8-97a6-489b-9b19-43fd22936816") : secret "frr-k8s-webhook-server-cert" not found Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.745637 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/f619cfe1-61b2-4726-a08f-b41cc24ae488-reloader\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.757857 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f619cfe1-61b2-4726-a08f-b41cc24ae488-metrics-certs\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.766739 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5krx\" (UniqueName: \"kubernetes.io/projected/42d0dcd8-97a6-489b-9b19-43fd22936816-kube-api-access-d5krx\") pod \"frr-k8s-webhook-server-7df86c4f6c-rjx6j\" (UID: \"42d0dcd8-97a6-489b-9b19-43fd22936816\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rjx6j" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.776005 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pftp\" (UniqueName: \"kubernetes.io/projected/f619cfe1-61b2-4726-a08f-b41cc24ae488-kube-api-access-6pftp\") pod \"frr-k8s-tgvxb\" (UID: \"f619cfe1-61b2-4726-a08f-b41cc24ae488\") " pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.846207 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/57ec97d6-d16e-4069-98cc-7dcf56910fad-metrics-certs\") pod \"speaker-xlk7v\" (UID: \"57ec97d6-d16e-4069-98cc-7dcf56910fad\") " pod="metallb-system/speaker-xlk7v" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.846275 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/57ec97d6-d16e-4069-98cc-7dcf56910fad-metallb-excludel2\") pod \"speaker-xlk7v\" (UID: \"57ec97d6-d16e-4069-98cc-7dcf56910fad\") " pod="metallb-system/speaker-xlk7v" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.846312 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hss4f\" (UniqueName: \"kubernetes.io/projected/57ec97d6-d16e-4069-98cc-7dcf56910fad-kube-api-access-hss4f\") pod \"speaker-xlk7v\" (UID: \"57ec97d6-d16e-4069-98cc-7dcf56910fad\") " pod="metallb-system/speaker-xlk7v" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.846336 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/57ec97d6-d16e-4069-98cc-7dcf56910fad-memberlist\") pod \"speaker-xlk7v\" (UID: \"57ec97d6-d16e-4069-98cc-7dcf56910fad\") " pod="metallb-system/speaker-xlk7v" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.846366 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6gks\" (UniqueName: \"kubernetes.io/projected/29f4dd7e-c2cf-4f64-87e8-2201fe99e751-kube-api-access-l6gks\") pod \"controller-6968d8fdc4-2stg8\" (UID: \"29f4dd7e-c2cf-4f64-87e8-2201fe99e751\") " pod="metallb-system/controller-6968d8fdc4-2stg8" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.846412 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/29f4dd7e-c2cf-4f64-87e8-2201fe99e751-metrics-certs\") pod \"controller-6968d8fdc4-2stg8\" (UID: \"29f4dd7e-c2cf-4f64-87e8-2201fe99e751\") " pod="metallb-system/controller-6968d8fdc4-2stg8" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.846456 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/29f4dd7e-c2cf-4f64-87e8-2201fe99e751-cert\") pod \"controller-6968d8fdc4-2stg8\" (UID: \"29f4dd7e-c2cf-4f64-87e8-2201fe99e751\") " pod="metallb-system/controller-6968d8fdc4-2stg8" Jan 22 05:59:45 crc kubenswrapper[4933]: E0122 05:59:45.846645 4933 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 22 05:59:45 crc kubenswrapper[4933]: E0122 05:59:45.846706 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/57ec97d6-d16e-4069-98cc-7dcf56910fad-memberlist podName:57ec97d6-d16e-4069-98cc-7dcf56910fad nodeName:}" failed. No retries permitted until 2026-01-22 05:59:46.346688945 +0000 UTC m=+834.183814298 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/57ec97d6-d16e-4069-98cc-7dcf56910fad-memberlist") pod "speaker-xlk7v" (UID: "57ec97d6-d16e-4069-98cc-7dcf56910fad") : secret "metallb-memberlist" not found Jan 22 05:59:45 crc kubenswrapper[4933]: E0122 05:59:45.847097 4933 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Jan 22 05:59:45 crc kubenswrapper[4933]: E0122 05:59:45.847136 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/29f4dd7e-c2cf-4f64-87e8-2201fe99e751-metrics-certs podName:29f4dd7e-c2cf-4f64-87e8-2201fe99e751 nodeName:}" failed. No retries permitted until 2026-01-22 05:59:46.347126166 +0000 UTC m=+834.184251519 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/29f4dd7e-c2cf-4f64-87e8-2201fe99e751-metrics-certs") pod "controller-6968d8fdc4-2stg8" (UID: "29f4dd7e-c2cf-4f64-87e8-2201fe99e751") : secret "controller-certs-secret" not found Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.847501 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/57ec97d6-d16e-4069-98cc-7dcf56910fad-metallb-excludel2\") pod \"speaker-xlk7v\" (UID: \"57ec97d6-d16e-4069-98cc-7dcf56910fad\") " pod="metallb-system/speaker-xlk7v" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.849972 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/57ec97d6-d16e-4069-98cc-7dcf56910fad-metrics-certs\") pod \"speaker-xlk7v\" (UID: \"57ec97d6-d16e-4069-98cc-7dcf56910fad\") " pod="metallb-system/speaker-xlk7v" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.850236 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.851769 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/29f4dd7e-c2cf-4f64-87e8-2201fe99e751-cert\") pod \"controller-6968d8fdc4-2stg8\" (UID: \"29f4dd7e-c2cf-4f64-87e8-2201fe99e751\") " pod="metallb-system/controller-6968d8fdc4-2stg8" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.860449 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6gks\" (UniqueName: \"kubernetes.io/projected/29f4dd7e-c2cf-4f64-87e8-2201fe99e751-kube-api-access-l6gks\") pod \"controller-6968d8fdc4-2stg8\" (UID: \"29f4dd7e-c2cf-4f64-87e8-2201fe99e751\") " pod="metallb-system/controller-6968d8fdc4-2stg8" Jan 22 05:59:45 crc kubenswrapper[4933]: I0122 05:59:45.866963 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hss4f\" (UniqueName: \"kubernetes.io/projected/57ec97d6-d16e-4069-98cc-7dcf56910fad-kube-api-access-hss4f\") pod \"speaker-xlk7v\" (UID: \"57ec97d6-d16e-4069-98cc-7dcf56910fad\") " pod="metallb-system/speaker-xlk7v" Jan 22 05:59:46 crc kubenswrapper[4933]: I0122 05:59:46.251632 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/42d0dcd8-97a6-489b-9b19-43fd22936816-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-rjx6j\" (UID: \"42d0dcd8-97a6-489b-9b19-43fd22936816\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rjx6j" Jan 22 05:59:46 crc kubenswrapper[4933]: I0122 05:59:46.259888 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/42d0dcd8-97a6-489b-9b19-43fd22936816-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-rjx6j\" (UID: \"42d0dcd8-97a6-489b-9b19-43fd22936816\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rjx6j" Jan 22 05:59:46 crc kubenswrapper[4933]: I0122 05:59:46.353018 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/57ec97d6-d16e-4069-98cc-7dcf56910fad-memberlist\") pod \"speaker-xlk7v\" (UID: \"57ec97d6-d16e-4069-98cc-7dcf56910fad\") " pod="metallb-system/speaker-xlk7v" Jan 22 05:59:46 crc kubenswrapper[4933]: I0122 05:59:46.353160 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/29f4dd7e-c2cf-4f64-87e8-2201fe99e751-metrics-certs\") pod \"controller-6968d8fdc4-2stg8\" (UID: \"29f4dd7e-c2cf-4f64-87e8-2201fe99e751\") " pod="metallb-system/controller-6968d8fdc4-2stg8" Jan 22 05:59:46 crc kubenswrapper[4933]: E0122 05:59:46.353239 4933 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 22 05:59:46 crc kubenswrapper[4933]: E0122 05:59:46.353326 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/57ec97d6-d16e-4069-98cc-7dcf56910fad-memberlist podName:57ec97d6-d16e-4069-98cc-7dcf56910fad nodeName:}" failed. No retries permitted until 2026-01-22 05:59:47.353303651 +0000 UTC m=+835.190429084 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/57ec97d6-d16e-4069-98cc-7dcf56910fad-memberlist") pod "speaker-xlk7v" (UID: "57ec97d6-d16e-4069-98cc-7dcf56910fad") : secret "metallb-memberlist" not found Jan 22 05:59:46 crc kubenswrapper[4933]: I0122 05:59:46.420284 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/29f4dd7e-c2cf-4f64-87e8-2201fe99e751-metrics-certs\") pod \"controller-6968d8fdc4-2stg8\" (UID: \"29f4dd7e-c2cf-4f64-87e8-2201fe99e751\") " pod="metallb-system/controller-6968d8fdc4-2stg8" Jan 22 05:59:46 crc kubenswrapper[4933]: I0122 05:59:46.463353 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rjx6j" Jan 22 05:59:46 crc kubenswrapper[4933]: I0122 05:59:46.545147 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-2stg8" Jan 22 05:59:46 crc kubenswrapper[4933]: I0122 05:59:46.777218 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-2stg8"] Jan 22 05:59:46 crc kubenswrapper[4933]: I0122 05:59:46.903977 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-rjx6j"] Jan 22 05:59:47 crc kubenswrapper[4933]: I0122 05:59:47.097609 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-2stg8" event={"ID":"29f4dd7e-c2cf-4f64-87e8-2201fe99e751","Type":"ContainerStarted","Data":"698717ff277b9bf6a11b4bc088940895209ad238640326573fb941423f6a8d0f"} Jan 22 05:59:47 crc kubenswrapper[4933]: I0122 05:59:47.105301 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rjx6j" event={"ID":"42d0dcd8-97a6-489b-9b19-43fd22936816","Type":"ContainerStarted","Data":"44d93d240505d0534c7c418c1af6503c09e204ccfa1ca98bef1b64565ab4e337"} Jan 22 05:59:47 crc kubenswrapper[4933]: I0122 05:59:47.109975 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tgvxb" event={"ID":"f619cfe1-61b2-4726-a08f-b41cc24ae488","Type":"ContainerStarted","Data":"9f0895705fb71ed1abdf21bd54c599ec6be1e0d1f17504834064f14d13dc1e65"} Jan 22 05:59:47 crc kubenswrapper[4933]: I0122 05:59:47.369483 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/57ec97d6-d16e-4069-98cc-7dcf56910fad-memberlist\") pod \"speaker-xlk7v\" (UID: \"57ec97d6-d16e-4069-98cc-7dcf56910fad\") " pod="metallb-system/speaker-xlk7v" Jan 22 05:59:47 crc kubenswrapper[4933]: I0122 05:59:47.376720 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/57ec97d6-d16e-4069-98cc-7dcf56910fad-memberlist\") pod \"speaker-xlk7v\" (UID: \"57ec97d6-d16e-4069-98cc-7dcf56910fad\") " pod="metallb-system/speaker-xlk7v" Jan 22 05:59:47 crc kubenswrapper[4933]: I0122 05:59:47.432748 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-xlk7v" Jan 22 05:59:47 crc kubenswrapper[4933]: W0122 05:59:47.469958 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod57ec97d6_d16e_4069_98cc_7dcf56910fad.slice/crio-75cf60f0e0a19de2eee05376a5994a6d52dcf7de9d0304c2d5304f5266b298eb WatchSource:0}: Error finding container 75cf60f0e0a19de2eee05376a5994a6d52dcf7de9d0304c2d5304f5266b298eb: Status 404 returned error can't find the container with id 75cf60f0e0a19de2eee05376a5994a6d52dcf7de9d0304c2d5304f5266b298eb Jan 22 05:59:48 crc kubenswrapper[4933]: I0122 05:59:48.125917 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-2stg8" event={"ID":"29f4dd7e-c2cf-4f64-87e8-2201fe99e751","Type":"ContainerStarted","Data":"ba0b615c451d7bd28f467fef308ec7e6c712f0237975a2faf49d2b1c1ff67b55"} Jan 22 05:59:48 crc kubenswrapper[4933]: I0122 05:59:48.126318 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-2stg8" event={"ID":"29f4dd7e-c2cf-4f64-87e8-2201fe99e751","Type":"ContainerStarted","Data":"d394728648e633bf1d8847f3236a073c47c482521d9e6b303ea184279af16c74"} Jan 22 05:59:48 crc kubenswrapper[4933]: I0122 05:59:48.126342 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-2stg8" Jan 22 05:59:48 crc kubenswrapper[4933]: I0122 05:59:48.128731 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-xlk7v" event={"ID":"57ec97d6-d16e-4069-98cc-7dcf56910fad","Type":"ContainerStarted","Data":"0964962ef77de256241852d56e5578f754e6911d766a0afc7ac54ca684b7cc80"} Jan 22 05:59:48 crc kubenswrapper[4933]: I0122 05:59:48.128759 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-xlk7v" event={"ID":"57ec97d6-d16e-4069-98cc-7dcf56910fad","Type":"ContainerStarted","Data":"21b35d798fd09c3e0766ea530105a9afce606fc93a413853e2ba7b9067b52049"} Jan 22 05:59:48 crc kubenswrapper[4933]: I0122 05:59:48.128772 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-xlk7v" event={"ID":"57ec97d6-d16e-4069-98cc-7dcf56910fad","Type":"ContainerStarted","Data":"75cf60f0e0a19de2eee05376a5994a6d52dcf7de9d0304c2d5304f5266b298eb"} Jan 22 05:59:48 crc kubenswrapper[4933]: I0122 05:59:48.128995 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-xlk7v" Jan 22 05:59:48 crc kubenswrapper[4933]: I0122 05:59:48.146296 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-2stg8" podStartSLOduration=3.146282235 podStartE2EDuration="3.146282235s" podCreationTimestamp="2026-01-22 05:59:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:59:48.142823459 +0000 UTC m=+835.979948832" watchObservedRunningTime="2026-01-22 05:59:48.146282235 +0000 UTC m=+835.983407588" Jan 22 05:59:48 crc kubenswrapper[4933]: I0122 05:59:48.160105 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-xlk7v" podStartSLOduration=3.16008775 podStartE2EDuration="3.16008775s" podCreationTimestamp="2026-01-22 05:59:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:59:48.156125821 +0000 UTC m=+835.993251174" watchObservedRunningTime="2026-01-22 05:59:48.16008775 +0000 UTC m=+835.997213103" Jan 22 05:59:54 crc kubenswrapper[4933]: I0122 05:59:54.175133 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rjx6j" event={"ID":"42d0dcd8-97a6-489b-9b19-43fd22936816","Type":"ContainerStarted","Data":"a82cecc09bb80987794f717e70f74ff30aef2dab3502aff7fe5f091d4f3fa5ef"} Jan 22 05:59:54 crc kubenswrapper[4933]: I0122 05:59:54.176032 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rjx6j" Jan 22 05:59:54 crc kubenswrapper[4933]: I0122 05:59:54.177804 4933 generic.go:334] "Generic (PLEG): container finished" podID="f619cfe1-61b2-4726-a08f-b41cc24ae488" containerID="047e9e880bf6db3e8fcea01c99f5e0ef8148b50e1ae5aa73ffd9c8b5625a935a" exitCode=0 Jan 22 05:59:54 crc kubenswrapper[4933]: I0122 05:59:54.177868 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tgvxb" event={"ID":"f619cfe1-61b2-4726-a08f-b41cc24ae488","Type":"ContainerDied","Data":"047e9e880bf6db3e8fcea01c99f5e0ef8148b50e1ae5aa73ffd9c8b5625a935a"} Jan 22 05:59:54 crc kubenswrapper[4933]: I0122 05:59:54.206630 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rjx6j" podStartSLOduration=2.994802136 podStartE2EDuration="9.206600501s" podCreationTimestamp="2026-01-22 05:59:45 +0000 UTC" firstStartedPulling="2026-01-22 05:59:46.913465734 +0000 UTC m=+834.750591097" lastFinishedPulling="2026-01-22 05:59:53.125264109 +0000 UTC m=+840.962389462" observedRunningTime="2026-01-22 05:59:54.201476004 +0000 UTC m=+842.038601427" watchObservedRunningTime="2026-01-22 05:59:54.206600501 +0000 UTC m=+842.043725894" Jan 22 05:59:55 crc kubenswrapper[4933]: I0122 05:59:55.188731 4933 generic.go:334] "Generic (PLEG): container finished" podID="f619cfe1-61b2-4726-a08f-b41cc24ae488" containerID="2d9b146f63c83e44f60ffdcb8c39ec67c4270a1d93eed8305f8c48f0f38072de" exitCode=0 Jan 22 05:59:55 crc kubenswrapper[4933]: I0122 05:59:55.188843 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tgvxb" event={"ID":"f619cfe1-61b2-4726-a08f-b41cc24ae488","Type":"ContainerDied","Data":"2d9b146f63c83e44f60ffdcb8c39ec67c4270a1d93eed8305f8c48f0f38072de"} Jan 22 05:59:56 crc kubenswrapper[4933]: I0122 05:59:56.198158 4933 generic.go:334] "Generic (PLEG): container finished" podID="f619cfe1-61b2-4726-a08f-b41cc24ae488" containerID="9703f456c5f15bef41beb76358402db721dc71106dd68b89e8a4b686bc40f918" exitCode=0 Jan 22 05:59:56 crc kubenswrapper[4933]: I0122 05:59:56.198242 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tgvxb" event={"ID":"f619cfe1-61b2-4726-a08f-b41cc24ae488","Type":"ContainerDied","Data":"9703f456c5f15bef41beb76358402db721dc71106dd68b89e8a4b686bc40f918"} Jan 22 05:59:57 crc kubenswrapper[4933]: I0122 05:59:57.207445 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tgvxb" event={"ID":"f619cfe1-61b2-4726-a08f-b41cc24ae488","Type":"ContainerStarted","Data":"cd5e6004ee56a9dacb1cef044c9d3d833e30d22a703278ef52177545954e9d2f"} Jan 22 05:59:57 crc kubenswrapper[4933]: I0122 05:59:57.207771 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tgvxb" event={"ID":"f619cfe1-61b2-4726-a08f-b41cc24ae488","Type":"ContainerStarted","Data":"1dacc84e0d22111d279a1254dbcdf2e89dd3991e086466874f82aa4c31dff1e1"} Jan 22 05:59:57 crc kubenswrapper[4933]: I0122 05:59:57.207785 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tgvxb" event={"ID":"f619cfe1-61b2-4726-a08f-b41cc24ae488","Type":"ContainerStarted","Data":"1f22a46002a7d2e2dd100e23c31a3f4fea1862d176092ad29a667d2667eebcc4"} Jan 22 05:59:57 crc kubenswrapper[4933]: I0122 05:59:57.437915 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-xlk7v" Jan 22 05:59:58 crc kubenswrapper[4933]: I0122 05:59:58.218466 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tgvxb" event={"ID":"f619cfe1-61b2-4726-a08f-b41cc24ae488","Type":"ContainerStarted","Data":"4395815c693c5e5345336ff00a241d88f3f084b6eb3846ac25581d34acb73c9e"} Jan 22 05:59:58 crc kubenswrapper[4933]: I0122 05:59:58.218760 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tgvxb" event={"ID":"f619cfe1-61b2-4726-a08f-b41cc24ae488","Type":"ContainerStarted","Data":"4f25d00cde60a4736ed43a4163a651b22267b975048e1874d7a7b6583dca787c"} Jan 22 05:59:58 crc kubenswrapper[4933]: I0122 05:59:58.218778 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-tgvxb" Jan 22 05:59:58 crc kubenswrapper[4933]: I0122 05:59:58.218790 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-tgvxb" event={"ID":"f619cfe1-61b2-4726-a08f-b41cc24ae488","Type":"ContainerStarted","Data":"c4e51da89aab4152d216b8ed2f5f20a262a56dd2ef1ccca17926ece89bda82e7"} Jan 22 05:59:58 crc kubenswrapper[4933]: I0122 05:59:58.248227 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-tgvxb" podStartSLOduration=6.39706007 podStartE2EDuration="13.248208772s" podCreationTimestamp="2026-01-22 05:59:45 +0000 UTC" firstStartedPulling="2026-01-22 05:59:46.224780336 +0000 UTC m=+834.061905729" lastFinishedPulling="2026-01-22 05:59:53.075929068 +0000 UTC m=+840.913054431" observedRunningTime="2026-01-22 05:59:58.246794303 +0000 UTC m=+846.083919706" watchObservedRunningTime="2026-01-22 05:59:58.248208772 +0000 UTC m=+846.085334125" Jan 22 05:59:59 crc kubenswrapper[4933]: I0122 05:59:59.322867 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj"] Jan 22 05:59:59 crc kubenswrapper[4933]: I0122 05:59:59.324238 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" Jan 22 05:59:59 crc kubenswrapper[4933]: I0122 05:59:59.326235 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 22 05:59:59 crc kubenswrapper[4933]: I0122 05:59:59.345592 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj"] Jan 22 05:59:59 crc kubenswrapper[4933]: I0122 05:59:59.431901 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3b3b2cb0-2124-4792-b8fa-7c0a3438c186-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj\" (UID: \"3b3b2cb0-2124-4792-b8fa-7c0a3438c186\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" Jan 22 05:59:59 crc kubenswrapper[4933]: I0122 05:59:59.432227 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4mgh\" (UniqueName: \"kubernetes.io/projected/3b3b2cb0-2124-4792-b8fa-7c0a3438c186-kube-api-access-c4mgh\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj\" (UID: \"3b3b2cb0-2124-4792-b8fa-7c0a3438c186\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" Jan 22 05:59:59 crc kubenswrapper[4933]: I0122 05:59:59.432361 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3b3b2cb0-2124-4792-b8fa-7c0a3438c186-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj\" (UID: \"3b3b2cb0-2124-4792-b8fa-7c0a3438c186\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" Jan 22 05:59:59 crc kubenswrapper[4933]: I0122 05:59:59.533005 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4mgh\" (UniqueName: \"kubernetes.io/projected/3b3b2cb0-2124-4792-b8fa-7c0a3438c186-kube-api-access-c4mgh\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj\" (UID: \"3b3b2cb0-2124-4792-b8fa-7c0a3438c186\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" Jan 22 05:59:59 crc kubenswrapper[4933]: I0122 05:59:59.533121 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3b3b2cb0-2124-4792-b8fa-7c0a3438c186-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj\" (UID: \"3b3b2cb0-2124-4792-b8fa-7c0a3438c186\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" Jan 22 05:59:59 crc kubenswrapper[4933]: I0122 05:59:59.533200 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3b3b2cb0-2124-4792-b8fa-7c0a3438c186-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj\" (UID: \"3b3b2cb0-2124-4792-b8fa-7c0a3438c186\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" Jan 22 05:59:59 crc kubenswrapper[4933]: I0122 05:59:59.533583 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3b3b2cb0-2124-4792-b8fa-7c0a3438c186-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj\" (UID: \"3b3b2cb0-2124-4792-b8fa-7c0a3438c186\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" Jan 22 05:59:59 crc kubenswrapper[4933]: I0122 05:59:59.533648 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3b3b2cb0-2124-4792-b8fa-7c0a3438c186-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj\" (UID: \"3b3b2cb0-2124-4792-b8fa-7c0a3438c186\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" Jan 22 05:59:59 crc kubenswrapper[4933]: I0122 05:59:59.555652 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4mgh\" (UniqueName: \"kubernetes.io/projected/3b3b2cb0-2124-4792-b8fa-7c0a3438c186-kube-api-access-c4mgh\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj\" (UID: \"3b3b2cb0-2124-4792-b8fa-7c0a3438c186\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" Jan 22 05:59:59 crc kubenswrapper[4933]: I0122 05:59:59.640856 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.049872 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj"] Jan 22 06:00:00 crc kubenswrapper[4933]: W0122 06:00:00.057749 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b3b2cb0_2124_4792_b8fa_7c0a3438c186.slice/crio-3e8578487964bfecbe04972ad5a00e7e3dfe48b61c188c494bcf0df49258fa46 WatchSource:0}: Error finding container 3e8578487964bfecbe04972ad5a00e7e3dfe48b61c188c494bcf0df49258fa46: Status 404 returned error can't find the container with id 3e8578487964bfecbe04972ad5a00e7e3dfe48b61c188c494bcf0df49258fa46 Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.139010 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4"] Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.139995 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4" Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.140606 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/267706ea-2ceb-4ba1-a923-9b82f27a8ddf-config-volume\") pod \"collect-profiles-29484360-dkfb4\" (UID: \"267706ea-2ceb-4ba1-a923-9b82f27a8ddf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4" Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.140777 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pcqsm\" (UniqueName: \"kubernetes.io/projected/267706ea-2ceb-4ba1-a923-9b82f27a8ddf-kube-api-access-pcqsm\") pod \"collect-profiles-29484360-dkfb4\" (UID: \"267706ea-2ceb-4ba1-a923-9b82f27a8ddf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4" Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.140857 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/267706ea-2ceb-4ba1-a923-9b82f27a8ddf-secret-volume\") pod \"collect-profiles-29484360-dkfb4\" (UID: \"267706ea-2ceb-4ba1-a923-9b82f27a8ddf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4" Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.142215 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.143179 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.147640 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4"] Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.231084 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" event={"ID":"3b3b2cb0-2124-4792-b8fa-7c0a3438c186","Type":"ContainerStarted","Data":"3e8578487964bfecbe04972ad5a00e7e3dfe48b61c188c494bcf0df49258fa46"} Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.241766 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pcqsm\" (UniqueName: \"kubernetes.io/projected/267706ea-2ceb-4ba1-a923-9b82f27a8ddf-kube-api-access-pcqsm\") pod \"collect-profiles-29484360-dkfb4\" (UID: \"267706ea-2ceb-4ba1-a923-9b82f27a8ddf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4" Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.241824 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/267706ea-2ceb-4ba1-a923-9b82f27a8ddf-secret-volume\") pod \"collect-profiles-29484360-dkfb4\" (UID: \"267706ea-2ceb-4ba1-a923-9b82f27a8ddf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4" Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.242126 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/267706ea-2ceb-4ba1-a923-9b82f27a8ddf-config-volume\") pod \"collect-profiles-29484360-dkfb4\" (UID: \"267706ea-2ceb-4ba1-a923-9b82f27a8ddf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4" Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.243193 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/267706ea-2ceb-4ba1-a923-9b82f27a8ddf-config-volume\") pod \"collect-profiles-29484360-dkfb4\" (UID: \"267706ea-2ceb-4ba1-a923-9b82f27a8ddf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4" Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.247613 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/267706ea-2ceb-4ba1-a923-9b82f27a8ddf-secret-volume\") pod \"collect-profiles-29484360-dkfb4\" (UID: \"267706ea-2ceb-4ba1-a923-9b82f27a8ddf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4" Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.257303 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pcqsm\" (UniqueName: \"kubernetes.io/projected/267706ea-2ceb-4ba1-a923-9b82f27a8ddf-kube-api-access-pcqsm\") pod \"collect-profiles-29484360-dkfb4\" (UID: \"267706ea-2ceb-4ba1-a923-9b82f27a8ddf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4" Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.465349 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4" Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.678167 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4"] Jan 22 06:00:00 crc kubenswrapper[4933]: W0122 06:00:00.685450 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod267706ea_2ceb_4ba1_a923_9b82f27a8ddf.slice/crio-edb45efd5f723c976ff3349fd25acb246daef40825ee9bf339bff2b7bea71967 WatchSource:0}: Error finding container edb45efd5f723c976ff3349fd25acb246daef40825ee9bf339bff2b7bea71967: Status 404 returned error can't find the container with id edb45efd5f723c976ff3349fd25acb246daef40825ee9bf339bff2b7bea71967 Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.851371 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-tgvxb" Jan 22 06:00:00 crc kubenswrapper[4933]: I0122 06:00:00.891557 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-tgvxb" Jan 22 06:00:01 crc kubenswrapper[4933]: I0122 06:00:01.236788 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4" event={"ID":"267706ea-2ceb-4ba1-a923-9b82f27a8ddf","Type":"ContainerStarted","Data":"759867e7333ddc7383957cb9e7346c3d5215bbb832c39084d30e0885d2a20918"} Jan 22 06:00:01 crc kubenswrapper[4933]: I0122 06:00:01.237141 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4" event={"ID":"267706ea-2ceb-4ba1-a923-9b82f27a8ddf","Type":"ContainerStarted","Data":"edb45efd5f723c976ff3349fd25acb246daef40825ee9bf339bff2b7bea71967"} Jan 22 06:00:01 crc kubenswrapper[4933]: I0122 06:00:01.238346 4933 generic.go:334] "Generic (PLEG): container finished" podID="3b3b2cb0-2124-4792-b8fa-7c0a3438c186" containerID="1cd34c340e8b0801bee7b63c757d2962b602f0fd4af7639a626b09d8481189cb" exitCode=0 Jan 22 06:00:01 crc kubenswrapper[4933]: I0122 06:00:01.238971 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" event={"ID":"3b3b2cb0-2124-4792-b8fa-7c0a3438c186","Type":"ContainerDied","Data":"1cd34c340e8b0801bee7b63c757d2962b602f0fd4af7639a626b09d8481189cb"} Jan 22 06:00:01 crc kubenswrapper[4933]: I0122 06:00:01.259602 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4" podStartSLOduration=1.259583323 podStartE2EDuration="1.259583323s" podCreationTimestamp="2026-01-22 06:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:00:01.254168283 +0000 UTC m=+849.091293666" watchObservedRunningTime="2026-01-22 06:00:01.259583323 +0000 UTC m=+849.096708676" Jan 22 06:00:02 crc kubenswrapper[4933]: I0122 06:00:02.260569 4933 generic.go:334] "Generic (PLEG): container finished" podID="267706ea-2ceb-4ba1-a923-9b82f27a8ddf" containerID="759867e7333ddc7383957cb9e7346c3d5215bbb832c39084d30e0885d2a20918" exitCode=0 Jan 22 06:00:02 crc kubenswrapper[4933]: I0122 06:00:02.260649 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4" event={"ID":"267706ea-2ceb-4ba1-a923-9b82f27a8ddf","Type":"ContainerDied","Data":"759867e7333ddc7383957cb9e7346c3d5215bbb832c39084d30e0885d2a20918"} Jan 22 06:00:03 crc kubenswrapper[4933]: I0122 06:00:03.548579 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4" Jan 22 06:00:03 crc kubenswrapper[4933]: I0122 06:00:03.692748 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/267706ea-2ceb-4ba1-a923-9b82f27a8ddf-config-volume\") pod \"267706ea-2ceb-4ba1-a923-9b82f27a8ddf\" (UID: \"267706ea-2ceb-4ba1-a923-9b82f27a8ddf\") " Jan 22 06:00:03 crc kubenswrapper[4933]: I0122 06:00:03.692836 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/267706ea-2ceb-4ba1-a923-9b82f27a8ddf-secret-volume\") pod \"267706ea-2ceb-4ba1-a923-9b82f27a8ddf\" (UID: \"267706ea-2ceb-4ba1-a923-9b82f27a8ddf\") " Jan 22 06:00:03 crc kubenswrapper[4933]: I0122 06:00:03.693240 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/267706ea-2ceb-4ba1-a923-9b82f27a8ddf-config-volume" (OuterVolumeSpecName: "config-volume") pod "267706ea-2ceb-4ba1-a923-9b82f27a8ddf" (UID: "267706ea-2ceb-4ba1-a923-9b82f27a8ddf"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:00:03 crc kubenswrapper[4933]: I0122 06:00:03.693692 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcqsm\" (UniqueName: \"kubernetes.io/projected/267706ea-2ceb-4ba1-a923-9b82f27a8ddf-kube-api-access-pcqsm\") pod \"267706ea-2ceb-4ba1-a923-9b82f27a8ddf\" (UID: \"267706ea-2ceb-4ba1-a923-9b82f27a8ddf\") " Jan 22 06:00:03 crc kubenswrapper[4933]: I0122 06:00:03.693956 4933 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/267706ea-2ceb-4ba1-a923-9b82f27a8ddf-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 06:00:03 crc kubenswrapper[4933]: I0122 06:00:03.698264 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/267706ea-2ceb-4ba1-a923-9b82f27a8ddf-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "267706ea-2ceb-4ba1-a923-9b82f27a8ddf" (UID: "267706ea-2ceb-4ba1-a923-9b82f27a8ddf"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:00:03 crc kubenswrapper[4933]: I0122 06:00:03.700601 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/267706ea-2ceb-4ba1-a923-9b82f27a8ddf-kube-api-access-pcqsm" (OuterVolumeSpecName: "kube-api-access-pcqsm") pod "267706ea-2ceb-4ba1-a923-9b82f27a8ddf" (UID: "267706ea-2ceb-4ba1-a923-9b82f27a8ddf"). InnerVolumeSpecName "kube-api-access-pcqsm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:00:03 crc kubenswrapper[4933]: I0122 06:00:03.795161 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcqsm\" (UniqueName: \"kubernetes.io/projected/267706ea-2ceb-4ba1-a923-9b82f27a8ddf-kube-api-access-pcqsm\") on node \"crc\" DevicePath \"\"" Jan 22 06:00:03 crc kubenswrapper[4933]: I0122 06:00:03.795229 4933 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/267706ea-2ceb-4ba1-a923-9b82f27a8ddf-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 06:00:04 crc kubenswrapper[4933]: I0122 06:00:04.294213 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4" event={"ID":"267706ea-2ceb-4ba1-a923-9b82f27a8ddf","Type":"ContainerDied","Data":"edb45efd5f723c976ff3349fd25acb246daef40825ee9bf339bff2b7bea71967"} Jan 22 06:00:04 crc kubenswrapper[4933]: I0122 06:00:04.294504 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="edb45efd5f723c976ff3349fd25acb246daef40825ee9bf339bff2b7bea71967" Jan 22 06:00:04 crc kubenswrapper[4933]: I0122 06:00:04.294459 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4" Jan 22 06:00:06 crc kubenswrapper[4933]: I0122 06:00:06.310922 4933 generic.go:334] "Generic (PLEG): container finished" podID="3b3b2cb0-2124-4792-b8fa-7c0a3438c186" containerID="ea0803a19f78bab6004b344f9e8c33377a3b52151a52afb41312392b9bccbd3b" exitCode=0 Jan 22 06:00:06 crc kubenswrapper[4933]: I0122 06:00:06.311015 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" event={"ID":"3b3b2cb0-2124-4792-b8fa-7c0a3438c186","Type":"ContainerDied","Data":"ea0803a19f78bab6004b344f9e8c33377a3b52151a52afb41312392b9bccbd3b"} Jan 22 06:00:06 crc kubenswrapper[4933]: I0122 06:00:06.477818 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-rjx6j" Jan 22 06:00:06 crc kubenswrapper[4933]: I0122 06:00:06.548582 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-2stg8" Jan 22 06:00:08 crc kubenswrapper[4933]: I0122 06:00:08.326840 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" event={"ID":"3b3b2cb0-2124-4792-b8fa-7c0a3438c186","Type":"ContainerStarted","Data":"180712f2c6b8ccbd18e03b65aca86bc4dd10ed3b0a42dcb9449994c4c99ae9a4"} Jan 22 06:00:08 crc kubenswrapper[4933]: I0122 06:00:08.360314 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" podStartSLOduration=5.245570141 podStartE2EDuration="9.360292089s" podCreationTimestamp="2026-01-22 05:59:59 +0000 UTC" firstStartedPulling="2026-01-22 06:00:01.240173231 +0000 UTC m=+849.077298594" lastFinishedPulling="2026-01-22 06:00:05.354849618 +0000 UTC m=+853.192020542" observedRunningTime="2026-01-22 06:00:08.354231737 +0000 UTC m=+856.191357100" watchObservedRunningTime="2026-01-22 06:00:08.360292089 +0000 UTC m=+856.197417452" Jan 22 06:00:09 crc kubenswrapper[4933]: I0122 06:00:09.338261 4933 generic.go:334] "Generic (PLEG): container finished" podID="3b3b2cb0-2124-4792-b8fa-7c0a3438c186" containerID="180712f2c6b8ccbd18e03b65aca86bc4dd10ed3b0a42dcb9449994c4c99ae9a4" exitCode=0 Jan 22 06:00:09 crc kubenswrapper[4933]: I0122 06:00:09.338330 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" event={"ID":"3b3b2cb0-2124-4792-b8fa-7c0a3438c186","Type":"ContainerDied","Data":"180712f2c6b8ccbd18e03b65aca86bc4dd10ed3b0a42dcb9449994c4c99ae9a4"} Jan 22 06:00:10 crc kubenswrapper[4933]: I0122 06:00:10.647728 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" Jan 22 06:00:10 crc kubenswrapper[4933]: I0122 06:00:10.822518 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3b3b2cb0-2124-4792-b8fa-7c0a3438c186-util\") pod \"3b3b2cb0-2124-4792-b8fa-7c0a3438c186\" (UID: \"3b3b2cb0-2124-4792-b8fa-7c0a3438c186\") " Jan 22 06:00:10 crc kubenswrapper[4933]: I0122 06:00:10.822592 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3b3b2cb0-2124-4792-b8fa-7c0a3438c186-bundle\") pod \"3b3b2cb0-2124-4792-b8fa-7c0a3438c186\" (UID: \"3b3b2cb0-2124-4792-b8fa-7c0a3438c186\") " Jan 22 06:00:10 crc kubenswrapper[4933]: I0122 06:00:10.822630 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c4mgh\" (UniqueName: \"kubernetes.io/projected/3b3b2cb0-2124-4792-b8fa-7c0a3438c186-kube-api-access-c4mgh\") pod \"3b3b2cb0-2124-4792-b8fa-7c0a3438c186\" (UID: \"3b3b2cb0-2124-4792-b8fa-7c0a3438c186\") " Jan 22 06:00:10 crc kubenswrapper[4933]: I0122 06:00:10.825023 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b3b2cb0-2124-4792-b8fa-7c0a3438c186-bundle" (OuterVolumeSpecName: "bundle") pod "3b3b2cb0-2124-4792-b8fa-7c0a3438c186" (UID: "3b3b2cb0-2124-4792-b8fa-7c0a3438c186"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:00:10 crc kubenswrapper[4933]: I0122 06:00:10.833189 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b3b2cb0-2124-4792-b8fa-7c0a3438c186-kube-api-access-c4mgh" (OuterVolumeSpecName: "kube-api-access-c4mgh") pod "3b3b2cb0-2124-4792-b8fa-7c0a3438c186" (UID: "3b3b2cb0-2124-4792-b8fa-7c0a3438c186"). InnerVolumeSpecName "kube-api-access-c4mgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:00:10 crc kubenswrapper[4933]: I0122 06:00:10.839139 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b3b2cb0-2124-4792-b8fa-7c0a3438c186-util" (OuterVolumeSpecName: "util") pod "3b3b2cb0-2124-4792-b8fa-7c0a3438c186" (UID: "3b3b2cb0-2124-4792-b8fa-7c0a3438c186"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:00:10 crc kubenswrapper[4933]: I0122 06:00:10.925013 4933 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3b3b2cb0-2124-4792-b8fa-7c0a3438c186-util\") on node \"crc\" DevicePath \"\"" Jan 22 06:00:10 crc kubenswrapper[4933]: I0122 06:00:10.925136 4933 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3b3b2cb0-2124-4792-b8fa-7c0a3438c186-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:00:10 crc kubenswrapper[4933]: I0122 06:00:10.925156 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c4mgh\" (UniqueName: \"kubernetes.io/projected/3b3b2cb0-2124-4792-b8fa-7c0a3438c186-kube-api-access-c4mgh\") on node \"crc\" DevicePath \"\"" Jan 22 06:00:10 crc kubenswrapper[4933]: I0122 06:00:10.943739 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:00:10 crc kubenswrapper[4933]: I0122 06:00:10.943839 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:00:10 crc kubenswrapper[4933]: I0122 06:00:10.943909 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 06:00:10 crc kubenswrapper[4933]: I0122 06:00:10.944794 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"828e74f34c881560f38ae267428103e3c4e3e91319d786584ec0777e00c67304"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:00:10 crc kubenswrapper[4933]: I0122 06:00:10.944894 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://828e74f34c881560f38ae267428103e3c4e3e91319d786584ec0777e00c67304" gracePeriod=600 Jan 22 06:00:11 crc kubenswrapper[4933]: I0122 06:00:11.351113 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="828e74f34c881560f38ae267428103e3c4e3e91319d786584ec0777e00c67304" exitCode=0 Jan 22 06:00:11 crc kubenswrapper[4933]: I0122 06:00:11.351168 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"828e74f34c881560f38ae267428103e3c4e3e91319d786584ec0777e00c67304"} Jan 22 06:00:11 crc kubenswrapper[4933]: I0122 06:00:11.351529 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"8400a254f9521bb3bb5af6c86bda35345893f7a13920ab409abe36fdefec266d"} Jan 22 06:00:11 crc kubenswrapper[4933]: I0122 06:00:11.351559 4933 scope.go:117] "RemoveContainer" containerID="2580ee867ae51438006c1c5f9c6959c2f36e5ea5bae190d4909ac0a463e77f1e" Jan 22 06:00:11 crc kubenswrapper[4933]: I0122 06:00:11.355238 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" event={"ID":"3b3b2cb0-2124-4792-b8fa-7c0a3438c186","Type":"ContainerDied","Data":"3e8578487964bfecbe04972ad5a00e7e3dfe48b61c188c494bcf0df49258fa46"} Jan 22 06:00:11 crc kubenswrapper[4933]: I0122 06:00:11.355263 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e8578487964bfecbe04972ad5a00e7e3dfe48b61c188c494bcf0df49258fa46" Jan 22 06:00:11 crc kubenswrapper[4933]: I0122 06:00:11.355317 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj" Jan 22 06:00:15 crc kubenswrapper[4933]: I0122 06:00:15.856026 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-tgvxb" Jan 22 06:00:17 crc kubenswrapper[4933]: I0122 06:00:17.436917 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-6dslh"] Jan 22 06:00:17 crc kubenswrapper[4933]: E0122 06:00:17.437434 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="267706ea-2ceb-4ba1-a923-9b82f27a8ddf" containerName="collect-profiles" Jan 22 06:00:17 crc kubenswrapper[4933]: I0122 06:00:17.437447 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="267706ea-2ceb-4ba1-a923-9b82f27a8ddf" containerName="collect-profiles" Jan 22 06:00:17 crc kubenswrapper[4933]: E0122 06:00:17.437457 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b3b2cb0-2124-4792-b8fa-7c0a3438c186" containerName="extract" Jan 22 06:00:17 crc kubenswrapper[4933]: I0122 06:00:17.437463 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b3b2cb0-2124-4792-b8fa-7c0a3438c186" containerName="extract" Jan 22 06:00:17 crc kubenswrapper[4933]: E0122 06:00:17.437475 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b3b2cb0-2124-4792-b8fa-7c0a3438c186" containerName="util" Jan 22 06:00:17 crc kubenswrapper[4933]: I0122 06:00:17.437482 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b3b2cb0-2124-4792-b8fa-7c0a3438c186" containerName="util" Jan 22 06:00:17 crc kubenswrapper[4933]: E0122 06:00:17.437491 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b3b2cb0-2124-4792-b8fa-7c0a3438c186" containerName="pull" Jan 22 06:00:17 crc kubenswrapper[4933]: I0122 06:00:17.437497 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b3b2cb0-2124-4792-b8fa-7c0a3438c186" containerName="pull" Jan 22 06:00:17 crc kubenswrapper[4933]: I0122 06:00:17.437631 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="267706ea-2ceb-4ba1-a923-9b82f27a8ddf" containerName="collect-profiles" Jan 22 06:00:17 crc kubenswrapper[4933]: I0122 06:00:17.437643 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b3b2cb0-2124-4792-b8fa-7c0a3438c186" containerName="extract" Jan 22 06:00:17 crc kubenswrapper[4933]: I0122 06:00:17.438117 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-6dslh" Jan 22 06:00:17 crc kubenswrapper[4933]: I0122 06:00:17.440844 4933 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-d8m8q" Jan 22 06:00:17 crc kubenswrapper[4933]: I0122 06:00:17.441028 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Jan 22 06:00:17 crc kubenswrapper[4933]: I0122 06:00:17.441151 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Jan 22 06:00:17 crc kubenswrapper[4933]: I0122 06:00:17.455007 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-6dslh"] Jan 22 06:00:17 crc kubenswrapper[4933]: I0122 06:00:17.622359 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/247e3a05-aad1-438c-b5f5-537cd4fe1a1e-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-6dslh\" (UID: \"247e3a05-aad1-438c-b5f5-537cd4fe1a1e\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-6dslh" Jan 22 06:00:17 crc kubenswrapper[4933]: I0122 06:00:17.622475 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgxgz\" (UniqueName: \"kubernetes.io/projected/247e3a05-aad1-438c-b5f5-537cd4fe1a1e-kube-api-access-xgxgz\") pod \"cert-manager-operator-controller-manager-64cf6dff88-6dslh\" (UID: \"247e3a05-aad1-438c-b5f5-537cd4fe1a1e\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-6dslh" Jan 22 06:00:17 crc kubenswrapper[4933]: I0122 06:00:17.723201 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/247e3a05-aad1-438c-b5f5-537cd4fe1a1e-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-6dslh\" (UID: \"247e3a05-aad1-438c-b5f5-537cd4fe1a1e\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-6dslh" Jan 22 06:00:17 crc kubenswrapper[4933]: I0122 06:00:17.723279 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgxgz\" (UniqueName: \"kubernetes.io/projected/247e3a05-aad1-438c-b5f5-537cd4fe1a1e-kube-api-access-xgxgz\") pod \"cert-manager-operator-controller-manager-64cf6dff88-6dslh\" (UID: \"247e3a05-aad1-438c-b5f5-537cd4fe1a1e\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-6dslh" Jan 22 06:00:17 crc kubenswrapper[4933]: I0122 06:00:17.723807 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/247e3a05-aad1-438c-b5f5-537cd4fe1a1e-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-6dslh\" (UID: \"247e3a05-aad1-438c-b5f5-537cd4fe1a1e\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-6dslh" Jan 22 06:00:17 crc kubenswrapper[4933]: I0122 06:00:17.753973 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgxgz\" (UniqueName: \"kubernetes.io/projected/247e3a05-aad1-438c-b5f5-537cd4fe1a1e-kube-api-access-xgxgz\") pod \"cert-manager-operator-controller-manager-64cf6dff88-6dslh\" (UID: \"247e3a05-aad1-438c-b5f5-537cd4fe1a1e\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-6dslh" Jan 22 06:00:17 crc kubenswrapper[4933]: I0122 06:00:17.754222 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-6dslh" Jan 22 06:00:18 crc kubenswrapper[4933]: I0122 06:00:18.250794 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-6dslh"] Jan 22 06:00:18 crc kubenswrapper[4933]: I0122 06:00:18.407412 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-6dslh" event={"ID":"247e3a05-aad1-438c-b5f5-537cd4fe1a1e","Type":"ContainerStarted","Data":"b0f1b25621d9280751f3e2e54e57e48fe552156a19d8774e12ca75798c1b0b76"} Jan 22 06:00:24 crc kubenswrapper[4933]: I0122 06:00:24.447988 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-6dslh" event={"ID":"247e3a05-aad1-438c-b5f5-537cd4fe1a1e","Type":"ContainerStarted","Data":"f69ae052873e51b2bb6faf7139a855825c63c771db00236f1ad1193a6af96efb"} Jan 22 06:00:24 crc kubenswrapper[4933]: I0122 06:00:24.473408 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-6dslh" podStartSLOduration=1.549151956 podStartE2EDuration="7.47339431s" podCreationTimestamp="2026-01-22 06:00:17 +0000 UTC" firstStartedPulling="2026-01-22 06:00:18.259537419 +0000 UTC m=+866.096662782" lastFinishedPulling="2026-01-22 06:00:24.183779763 +0000 UTC m=+872.020905136" observedRunningTime="2026-01-22 06:00:24.471588494 +0000 UTC m=+872.308713847" watchObservedRunningTime="2026-01-22 06:00:24.47339431 +0000 UTC m=+872.310519663" Jan 22 06:00:29 crc kubenswrapper[4933]: I0122 06:00:29.945915 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-mrwsn"] Jan 22 06:00:29 crc kubenswrapper[4933]: I0122 06:00:29.947347 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-mrwsn" Jan 22 06:00:29 crc kubenswrapper[4933]: I0122 06:00:29.948787 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 22 06:00:29 crc kubenswrapper[4933]: I0122 06:00:29.948906 4933 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-5q9gx" Jan 22 06:00:29 crc kubenswrapper[4933]: I0122 06:00:29.949007 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 22 06:00:29 crc kubenswrapper[4933]: I0122 06:00:29.957352 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-mrwsn"] Jan 22 06:00:29 crc kubenswrapper[4933]: I0122 06:00:29.989991 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/06a56c7d-200d-4473-8766-ddb0f9f75cd4-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-mrwsn\" (UID: \"06a56c7d-200d-4473-8766-ddb0f9f75cd4\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-mrwsn" Jan 22 06:00:29 crc kubenswrapper[4933]: I0122 06:00:29.990163 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdshx\" (UniqueName: \"kubernetes.io/projected/06a56c7d-200d-4473-8766-ddb0f9f75cd4-kube-api-access-pdshx\") pod \"cert-manager-cainjector-855d9ccff4-mrwsn\" (UID: \"06a56c7d-200d-4473-8766-ddb0f9f75cd4\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-mrwsn" Jan 22 06:00:30 crc kubenswrapper[4933]: I0122 06:00:30.091700 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdshx\" (UniqueName: \"kubernetes.io/projected/06a56c7d-200d-4473-8766-ddb0f9f75cd4-kube-api-access-pdshx\") pod \"cert-manager-cainjector-855d9ccff4-mrwsn\" (UID: \"06a56c7d-200d-4473-8766-ddb0f9f75cd4\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-mrwsn" Jan 22 06:00:30 crc kubenswrapper[4933]: I0122 06:00:30.091773 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/06a56c7d-200d-4473-8766-ddb0f9f75cd4-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-mrwsn\" (UID: \"06a56c7d-200d-4473-8766-ddb0f9f75cd4\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-mrwsn" Jan 22 06:00:30 crc kubenswrapper[4933]: I0122 06:00:30.112898 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/06a56c7d-200d-4473-8766-ddb0f9f75cd4-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-mrwsn\" (UID: \"06a56c7d-200d-4473-8766-ddb0f9f75cd4\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-mrwsn" Jan 22 06:00:30 crc kubenswrapper[4933]: I0122 06:00:30.114296 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdshx\" (UniqueName: \"kubernetes.io/projected/06a56c7d-200d-4473-8766-ddb0f9f75cd4-kube-api-access-pdshx\") pod \"cert-manager-cainjector-855d9ccff4-mrwsn\" (UID: \"06a56c7d-200d-4473-8766-ddb0f9f75cd4\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-mrwsn" Jan 22 06:00:30 crc kubenswrapper[4933]: I0122 06:00:30.263204 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-mrwsn" Jan 22 06:00:30 crc kubenswrapper[4933]: I0122 06:00:30.699287 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-mrwsn"] Jan 22 06:00:30 crc kubenswrapper[4933]: W0122 06:00:30.713190 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod06a56c7d_200d_4473_8766_ddb0f9f75cd4.slice/crio-2d371d97bc3bafa63b40ffcbe5cba5ae7af4847b22327ef284c554e2a83be33f WatchSource:0}: Error finding container 2d371d97bc3bafa63b40ffcbe5cba5ae7af4847b22327ef284c554e2a83be33f: Status 404 returned error can't find the container with id 2d371d97bc3bafa63b40ffcbe5cba5ae7af4847b22327ef284c554e2a83be33f Jan 22 06:00:31 crc kubenswrapper[4933]: I0122 06:00:31.495534 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-mrwsn" event={"ID":"06a56c7d-200d-4473-8766-ddb0f9f75cd4","Type":"ContainerStarted","Data":"2d371d97bc3bafa63b40ffcbe5cba5ae7af4847b22327ef284c554e2a83be33f"} Jan 22 06:00:34 crc kubenswrapper[4933]: I0122 06:00:34.008989 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-m2xft"] Jan 22 06:00:34 crc kubenswrapper[4933]: I0122 06:00:34.010270 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-m2xft" Jan 22 06:00:34 crc kubenswrapper[4933]: I0122 06:00:34.012305 4933 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-zw9vj" Jan 22 06:00:34 crc kubenswrapper[4933]: I0122 06:00:34.054648 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76xqs\" (UniqueName: \"kubernetes.io/projected/f81fa480-c38d-4a0e-8adc-51332ceab483-kube-api-access-76xqs\") pod \"cert-manager-webhook-f4fb5df64-m2xft\" (UID: \"f81fa480-c38d-4a0e-8adc-51332ceab483\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-m2xft" Jan 22 06:00:34 crc kubenswrapper[4933]: I0122 06:00:34.054717 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f81fa480-c38d-4a0e-8adc-51332ceab483-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-m2xft\" (UID: \"f81fa480-c38d-4a0e-8adc-51332ceab483\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-m2xft" Jan 22 06:00:34 crc kubenswrapper[4933]: I0122 06:00:34.057348 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-m2xft"] Jan 22 06:00:34 crc kubenswrapper[4933]: I0122 06:00:34.157370 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76xqs\" (UniqueName: \"kubernetes.io/projected/f81fa480-c38d-4a0e-8adc-51332ceab483-kube-api-access-76xqs\") pod \"cert-manager-webhook-f4fb5df64-m2xft\" (UID: \"f81fa480-c38d-4a0e-8adc-51332ceab483\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-m2xft" Jan 22 06:00:34 crc kubenswrapper[4933]: I0122 06:00:34.157468 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f81fa480-c38d-4a0e-8adc-51332ceab483-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-m2xft\" (UID: \"f81fa480-c38d-4a0e-8adc-51332ceab483\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-m2xft" Jan 22 06:00:34 crc kubenswrapper[4933]: I0122 06:00:34.174168 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76xqs\" (UniqueName: \"kubernetes.io/projected/f81fa480-c38d-4a0e-8adc-51332ceab483-kube-api-access-76xqs\") pod \"cert-manager-webhook-f4fb5df64-m2xft\" (UID: \"f81fa480-c38d-4a0e-8adc-51332ceab483\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-m2xft" Jan 22 06:00:34 crc kubenswrapper[4933]: I0122 06:00:34.178854 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f81fa480-c38d-4a0e-8adc-51332ceab483-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-m2xft\" (UID: \"f81fa480-c38d-4a0e-8adc-51332ceab483\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-m2xft" Jan 22 06:00:34 crc kubenswrapper[4933]: I0122 06:00:34.367241 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-m2xft" Jan 22 06:00:38 crc kubenswrapper[4933]: I0122 06:00:38.353788 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-m2xft"] Jan 22 06:00:38 crc kubenswrapper[4933]: W0122 06:00:38.364763 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf81fa480_c38d_4a0e_8adc_51332ceab483.slice/crio-8f4f4b3a2ca63b01fde6c485a37780101fb5d3af2bd8a230fcb038c88556c96b WatchSource:0}: Error finding container 8f4f4b3a2ca63b01fde6c485a37780101fb5d3af2bd8a230fcb038c88556c96b: Status 404 returned error can't find the container with id 8f4f4b3a2ca63b01fde6c485a37780101fb5d3af2bd8a230fcb038c88556c96b Jan 22 06:00:38 crc kubenswrapper[4933]: I0122 06:00:38.541883 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-mrwsn" event={"ID":"06a56c7d-200d-4473-8766-ddb0f9f75cd4","Type":"ContainerStarted","Data":"4d6045a5632c1db157d3df188493e1eaf9720f57f73dbdcac36376df0bd5dc2d"} Jan 22 06:00:38 crc kubenswrapper[4933]: I0122 06:00:38.547790 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-m2xft" event={"ID":"f81fa480-c38d-4a0e-8adc-51332ceab483","Type":"ContainerStarted","Data":"b41c771a5f20895a8e588ff0979e8fcb5f73815fffac6c0378a19565594ca15f"} Jan 22 06:00:38 crc kubenswrapper[4933]: I0122 06:00:38.547827 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-m2xft" event={"ID":"f81fa480-c38d-4a0e-8adc-51332ceab483","Type":"ContainerStarted","Data":"8f4f4b3a2ca63b01fde6c485a37780101fb5d3af2bd8a230fcb038c88556c96b"} Jan 22 06:00:38 crc kubenswrapper[4933]: I0122 06:00:38.548431 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-f4fb5df64-m2xft" Jan 22 06:00:38 crc kubenswrapper[4933]: I0122 06:00:38.559139 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-855d9ccff4-mrwsn" podStartSLOduration=2.233419707 podStartE2EDuration="9.559122528s" podCreationTimestamp="2026-01-22 06:00:29 +0000 UTC" firstStartedPulling="2026-01-22 06:00:30.715180194 +0000 UTC m=+878.552305547" lastFinishedPulling="2026-01-22 06:00:38.040883005 +0000 UTC m=+885.878008368" observedRunningTime="2026-01-22 06:00:38.558510565 +0000 UTC m=+886.395635928" watchObservedRunningTime="2026-01-22 06:00:38.559122528 +0000 UTC m=+886.396247881" Jan 22 06:00:38 crc kubenswrapper[4933]: I0122 06:00:38.582024 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-f4fb5df64-m2xft" podStartSLOduration=5.58200551 podStartE2EDuration="5.58200551s" podCreationTimestamp="2026-01-22 06:00:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:00:38.577178953 +0000 UTC m=+886.414304326" watchObservedRunningTime="2026-01-22 06:00:38.58200551 +0000 UTC m=+886.419130863" Jan 22 06:00:44 crc kubenswrapper[4933]: I0122 06:00:44.375775 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-f4fb5df64-m2xft" Jan 22 06:00:57 crc kubenswrapper[4933]: I0122 06:00:57.015484 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-86cb77c54b-fdmt6"] Jan 22 06:00:57 crc kubenswrapper[4933]: I0122 06:00:57.016963 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-fdmt6" Jan 22 06:00:57 crc kubenswrapper[4933]: I0122 06:00:57.022670 4933 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-lj2lh" Jan 22 06:00:57 crc kubenswrapper[4933]: I0122 06:00:57.037969 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-fdmt6"] Jan 22 06:00:57 crc kubenswrapper[4933]: I0122 06:00:57.086975 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nzg4\" (UniqueName: \"kubernetes.io/projected/cfa84f31-ab21-494f-932c-77b809b656c0-kube-api-access-4nzg4\") pod \"cert-manager-86cb77c54b-fdmt6\" (UID: \"cfa84f31-ab21-494f-932c-77b809b656c0\") " pod="cert-manager/cert-manager-86cb77c54b-fdmt6" Jan 22 06:00:57 crc kubenswrapper[4933]: I0122 06:00:57.087414 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cfa84f31-ab21-494f-932c-77b809b656c0-bound-sa-token\") pod \"cert-manager-86cb77c54b-fdmt6\" (UID: \"cfa84f31-ab21-494f-932c-77b809b656c0\") " pod="cert-manager/cert-manager-86cb77c54b-fdmt6" Jan 22 06:00:57 crc kubenswrapper[4933]: I0122 06:00:57.188818 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nzg4\" (UniqueName: \"kubernetes.io/projected/cfa84f31-ab21-494f-932c-77b809b656c0-kube-api-access-4nzg4\") pod \"cert-manager-86cb77c54b-fdmt6\" (UID: \"cfa84f31-ab21-494f-932c-77b809b656c0\") " pod="cert-manager/cert-manager-86cb77c54b-fdmt6" Jan 22 06:00:57 crc kubenswrapper[4933]: I0122 06:00:57.188878 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cfa84f31-ab21-494f-932c-77b809b656c0-bound-sa-token\") pod \"cert-manager-86cb77c54b-fdmt6\" (UID: \"cfa84f31-ab21-494f-932c-77b809b656c0\") " pod="cert-manager/cert-manager-86cb77c54b-fdmt6" Jan 22 06:00:57 crc kubenswrapper[4933]: I0122 06:00:57.214403 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nzg4\" (UniqueName: \"kubernetes.io/projected/cfa84f31-ab21-494f-932c-77b809b656c0-kube-api-access-4nzg4\") pod \"cert-manager-86cb77c54b-fdmt6\" (UID: \"cfa84f31-ab21-494f-932c-77b809b656c0\") " pod="cert-manager/cert-manager-86cb77c54b-fdmt6" Jan 22 06:00:57 crc kubenswrapper[4933]: I0122 06:00:57.219018 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cfa84f31-ab21-494f-932c-77b809b656c0-bound-sa-token\") pod \"cert-manager-86cb77c54b-fdmt6\" (UID: \"cfa84f31-ab21-494f-932c-77b809b656c0\") " pod="cert-manager/cert-manager-86cb77c54b-fdmt6" Jan 22 06:00:57 crc kubenswrapper[4933]: I0122 06:00:57.373635 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-fdmt6" Jan 22 06:00:57 crc kubenswrapper[4933]: I0122 06:00:57.618051 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-fdmt6"] Jan 22 06:00:57 crc kubenswrapper[4933]: I0122 06:00:57.679591 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-fdmt6" event={"ID":"cfa84f31-ab21-494f-932c-77b809b656c0","Type":"ContainerStarted","Data":"0b3f30bf067272e6a5fd51e92f4bae19b115a60eb38e6037c612875fa444d992"} Jan 22 06:00:58 crc kubenswrapper[4933]: I0122 06:00:58.688722 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-fdmt6" event={"ID":"cfa84f31-ab21-494f-932c-77b809b656c0","Type":"ContainerStarted","Data":"3563d829ab8047ad789391ff3477c6a248a9ca5895929f527fd4372e872f5547"} Jan 22 06:00:58 crc kubenswrapper[4933]: I0122 06:00:58.712566 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-86cb77c54b-fdmt6" podStartSLOduration=2.712550958 podStartE2EDuration="2.712550958s" podCreationTimestamp="2026-01-22 06:00:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:00:58.711536347 +0000 UTC m=+906.548661710" watchObservedRunningTime="2026-01-22 06:00:58.712550958 +0000 UTC m=+906.549676311" Jan 22 06:01:08 crc kubenswrapper[4933]: I0122 06:01:08.587406 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-mrmf9"] Jan 22 06:01:08 crc kubenswrapper[4933]: I0122 06:01:08.589302 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-mrmf9" Jan 22 06:01:08 crc kubenswrapper[4933]: I0122 06:01:08.595028 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 22 06:01:08 crc kubenswrapper[4933]: I0122 06:01:08.595613 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 22 06:01:08 crc kubenswrapper[4933]: I0122 06:01:08.596418 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-t5jm7" Jan 22 06:01:08 crc kubenswrapper[4933]: I0122 06:01:08.619486 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-mrmf9"] Jan 22 06:01:08 crc kubenswrapper[4933]: I0122 06:01:08.661615 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92jt6\" (UniqueName: \"kubernetes.io/projected/68e44743-525e-4db2-98dd-296dde5f4dd4-kube-api-access-92jt6\") pod \"openstack-operator-index-mrmf9\" (UID: \"68e44743-525e-4db2-98dd-296dde5f4dd4\") " pod="openstack-operators/openstack-operator-index-mrmf9" Jan 22 06:01:08 crc kubenswrapper[4933]: I0122 06:01:08.762593 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92jt6\" (UniqueName: \"kubernetes.io/projected/68e44743-525e-4db2-98dd-296dde5f4dd4-kube-api-access-92jt6\") pod \"openstack-operator-index-mrmf9\" (UID: \"68e44743-525e-4db2-98dd-296dde5f4dd4\") " pod="openstack-operators/openstack-operator-index-mrmf9" Jan 22 06:01:08 crc kubenswrapper[4933]: I0122 06:01:08.779656 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92jt6\" (UniqueName: \"kubernetes.io/projected/68e44743-525e-4db2-98dd-296dde5f4dd4-kube-api-access-92jt6\") pod \"openstack-operator-index-mrmf9\" (UID: \"68e44743-525e-4db2-98dd-296dde5f4dd4\") " pod="openstack-operators/openstack-operator-index-mrmf9" Jan 22 06:01:08 crc kubenswrapper[4933]: I0122 06:01:08.918541 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-mrmf9" Jan 22 06:01:09 crc kubenswrapper[4933]: I0122 06:01:09.121809 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-mrmf9"] Jan 22 06:01:09 crc kubenswrapper[4933]: I0122 06:01:09.785465 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-mrmf9" event={"ID":"68e44743-525e-4db2-98dd-296dde5f4dd4","Type":"ContainerStarted","Data":"9fdc862f34cdcc6bedb8f84336b56f87425633879bab1eb20d9b3d74ab9f513e"} Jan 22 06:01:12 crc kubenswrapper[4933]: I0122 06:01:12.150661 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-mrmf9"] Jan 22 06:01:12 crc kubenswrapper[4933]: I0122 06:01:12.804117 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-mrmf9" event={"ID":"68e44743-525e-4db2-98dd-296dde5f4dd4","Type":"ContainerStarted","Data":"c5b0a6707bc97cfe5725d22fec2bf69e35509dfee59e6ed9486b30a0ca094674"} Jan 22 06:01:12 crc kubenswrapper[4933]: I0122 06:01:12.804236 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-mrmf9" podUID="68e44743-525e-4db2-98dd-296dde5f4dd4" containerName="registry-server" containerID="cri-o://c5b0a6707bc97cfe5725d22fec2bf69e35509dfee59e6ed9486b30a0ca094674" gracePeriod=2 Jan 22 06:01:12 crc kubenswrapper[4933]: I0122 06:01:12.827752 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-mrmf9" podStartSLOduration=1.599449798 podStartE2EDuration="4.827735829s" podCreationTimestamp="2026-01-22 06:01:08 +0000 UTC" firstStartedPulling="2026-01-22 06:01:09.135677235 +0000 UTC m=+916.972802588" lastFinishedPulling="2026-01-22 06:01:12.363963226 +0000 UTC m=+920.201088619" observedRunningTime="2026-01-22 06:01:12.823112676 +0000 UTC m=+920.660238069" watchObservedRunningTime="2026-01-22 06:01:12.827735829 +0000 UTC m=+920.664861172" Jan 22 06:01:12 crc kubenswrapper[4933]: I0122 06:01:12.956716 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-2cblb"] Jan 22 06:01:12 crc kubenswrapper[4933]: I0122 06:01:12.957859 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-2cblb" Jan 22 06:01:12 crc kubenswrapper[4933]: I0122 06:01:12.967660 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-2cblb"] Jan 22 06:01:13 crc kubenswrapper[4933]: I0122 06:01:13.030030 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfwsl\" (UniqueName: \"kubernetes.io/projected/283252a7-ff09-4856-9249-7c6cd70dff99-kube-api-access-tfwsl\") pod \"openstack-operator-index-2cblb\" (UID: \"283252a7-ff09-4856-9249-7c6cd70dff99\") " pod="openstack-operators/openstack-operator-index-2cblb" Jan 22 06:01:13 crc kubenswrapper[4933]: I0122 06:01:13.131673 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfwsl\" (UniqueName: \"kubernetes.io/projected/283252a7-ff09-4856-9249-7c6cd70dff99-kube-api-access-tfwsl\") pod \"openstack-operator-index-2cblb\" (UID: \"283252a7-ff09-4856-9249-7c6cd70dff99\") " pod="openstack-operators/openstack-operator-index-2cblb" Jan 22 06:01:13 crc kubenswrapper[4933]: I0122 06:01:13.149624 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfwsl\" (UniqueName: \"kubernetes.io/projected/283252a7-ff09-4856-9249-7c6cd70dff99-kube-api-access-tfwsl\") pod \"openstack-operator-index-2cblb\" (UID: \"283252a7-ff09-4856-9249-7c6cd70dff99\") " pod="openstack-operators/openstack-operator-index-2cblb" Jan 22 06:01:13 crc kubenswrapper[4933]: I0122 06:01:13.174712 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-mrmf9" Jan 22 06:01:13 crc kubenswrapper[4933]: I0122 06:01:13.232281 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92jt6\" (UniqueName: \"kubernetes.io/projected/68e44743-525e-4db2-98dd-296dde5f4dd4-kube-api-access-92jt6\") pod \"68e44743-525e-4db2-98dd-296dde5f4dd4\" (UID: \"68e44743-525e-4db2-98dd-296dde5f4dd4\") " Jan 22 06:01:13 crc kubenswrapper[4933]: I0122 06:01:13.235544 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68e44743-525e-4db2-98dd-296dde5f4dd4-kube-api-access-92jt6" (OuterVolumeSpecName: "kube-api-access-92jt6") pod "68e44743-525e-4db2-98dd-296dde5f4dd4" (UID: "68e44743-525e-4db2-98dd-296dde5f4dd4"). InnerVolumeSpecName "kube-api-access-92jt6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:01:13 crc kubenswrapper[4933]: I0122 06:01:13.280801 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-2cblb" Jan 22 06:01:13 crc kubenswrapper[4933]: I0122 06:01:13.334631 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92jt6\" (UniqueName: \"kubernetes.io/projected/68e44743-525e-4db2-98dd-296dde5f4dd4-kube-api-access-92jt6\") on node \"crc\" DevicePath \"\"" Jan 22 06:01:13 crc kubenswrapper[4933]: I0122 06:01:13.462670 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-2cblb"] Jan 22 06:01:13 crc kubenswrapper[4933]: W0122 06:01:13.463619 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod283252a7_ff09_4856_9249_7c6cd70dff99.slice/crio-972a4aac551fbb6ab1a02d90a81cf33ab513089c8f96dc6c17c96dac167dd482 WatchSource:0}: Error finding container 972a4aac551fbb6ab1a02d90a81cf33ab513089c8f96dc6c17c96dac167dd482: Status 404 returned error can't find the container with id 972a4aac551fbb6ab1a02d90a81cf33ab513089c8f96dc6c17c96dac167dd482 Jan 22 06:01:13 crc kubenswrapper[4933]: I0122 06:01:13.816964 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-2cblb" event={"ID":"283252a7-ff09-4856-9249-7c6cd70dff99","Type":"ContainerStarted","Data":"972a4aac551fbb6ab1a02d90a81cf33ab513089c8f96dc6c17c96dac167dd482"} Jan 22 06:01:13 crc kubenswrapper[4933]: I0122 06:01:13.819253 4933 generic.go:334] "Generic (PLEG): container finished" podID="68e44743-525e-4db2-98dd-296dde5f4dd4" containerID="c5b0a6707bc97cfe5725d22fec2bf69e35509dfee59e6ed9486b30a0ca094674" exitCode=0 Jan 22 06:01:13 crc kubenswrapper[4933]: I0122 06:01:13.819350 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-mrmf9" event={"ID":"68e44743-525e-4db2-98dd-296dde5f4dd4","Type":"ContainerDied","Data":"c5b0a6707bc97cfe5725d22fec2bf69e35509dfee59e6ed9486b30a0ca094674"} Jan 22 06:01:13 crc kubenswrapper[4933]: I0122 06:01:13.819391 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-mrmf9" Jan 22 06:01:13 crc kubenswrapper[4933]: I0122 06:01:13.819434 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-mrmf9" event={"ID":"68e44743-525e-4db2-98dd-296dde5f4dd4","Type":"ContainerDied","Data":"9fdc862f34cdcc6bedb8f84336b56f87425633879bab1eb20d9b3d74ab9f513e"} Jan 22 06:01:13 crc kubenswrapper[4933]: I0122 06:01:13.819466 4933 scope.go:117] "RemoveContainer" containerID="c5b0a6707bc97cfe5725d22fec2bf69e35509dfee59e6ed9486b30a0ca094674" Jan 22 06:01:13 crc kubenswrapper[4933]: I0122 06:01:13.851813 4933 scope.go:117] "RemoveContainer" containerID="c5b0a6707bc97cfe5725d22fec2bf69e35509dfee59e6ed9486b30a0ca094674" Jan 22 06:01:13 crc kubenswrapper[4933]: E0122 06:01:13.852257 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5b0a6707bc97cfe5725d22fec2bf69e35509dfee59e6ed9486b30a0ca094674\": container with ID starting with c5b0a6707bc97cfe5725d22fec2bf69e35509dfee59e6ed9486b30a0ca094674 not found: ID does not exist" containerID="c5b0a6707bc97cfe5725d22fec2bf69e35509dfee59e6ed9486b30a0ca094674" Jan 22 06:01:13 crc kubenswrapper[4933]: I0122 06:01:13.852314 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5b0a6707bc97cfe5725d22fec2bf69e35509dfee59e6ed9486b30a0ca094674"} err="failed to get container status \"c5b0a6707bc97cfe5725d22fec2bf69e35509dfee59e6ed9486b30a0ca094674\": rpc error: code = NotFound desc = could not find container \"c5b0a6707bc97cfe5725d22fec2bf69e35509dfee59e6ed9486b30a0ca094674\": container with ID starting with c5b0a6707bc97cfe5725d22fec2bf69e35509dfee59e6ed9486b30a0ca094674 not found: ID does not exist" Jan 22 06:01:13 crc kubenswrapper[4933]: I0122 06:01:13.874990 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-mrmf9"] Jan 22 06:01:13 crc kubenswrapper[4933]: I0122 06:01:13.882086 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-mrmf9"] Jan 22 06:01:14 crc kubenswrapper[4933]: I0122 06:01:14.506702 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68e44743-525e-4db2-98dd-296dde5f4dd4" path="/var/lib/kubelet/pods/68e44743-525e-4db2-98dd-296dde5f4dd4/volumes" Jan 22 06:01:14 crc kubenswrapper[4933]: I0122 06:01:14.828469 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-2cblb" event={"ID":"283252a7-ff09-4856-9249-7c6cd70dff99","Type":"ContainerStarted","Data":"1f50e45a091f69105064e1895a08c7c0b6b503e80b1c3d29c62cf5781e6f28de"} Jan 22 06:01:14 crc kubenswrapper[4933]: I0122 06:01:14.849010 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-2cblb" podStartSLOduration=2.422772834 podStartE2EDuration="2.848988129s" podCreationTimestamp="2026-01-22 06:01:12 +0000 UTC" firstStartedPulling="2026-01-22 06:01:13.467568578 +0000 UTC m=+921.304693931" lastFinishedPulling="2026-01-22 06:01:13.893783823 +0000 UTC m=+921.730909226" observedRunningTime="2026-01-22 06:01:14.843144001 +0000 UTC m=+922.680269374" watchObservedRunningTime="2026-01-22 06:01:14.848988129 +0000 UTC m=+922.686113482" Jan 22 06:01:15 crc kubenswrapper[4933]: I0122 06:01:15.558258 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xbwpr"] Jan 22 06:01:15 crc kubenswrapper[4933]: E0122 06:01:15.558504 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68e44743-525e-4db2-98dd-296dde5f4dd4" containerName="registry-server" Jan 22 06:01:15 crc kubenswrapper[4933]: I0122 06:01:15.558519 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="68e44743-525e-4db2-98dd-296dde5f4dd4" containerName="registry-server" Jan 22 06:01:15 crc kubenswrapper[4933]: I0122 06:01:15.558661 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="68e44743-525e-4db2-98dd-296dde5f4dd4" containerName="registry-server" Jan 22 06:01:15 crc kubenswrapper[4933]: I0122 06:01:15.559614 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xbwpr" Jan 22 06:01:15 crc kubenswrapper[4933]: I0122 06:01:15.571845 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xbwpr"] Jan 22 06:01:15 crc kubenswrapper[4933]: I0122 06:01:15.671614 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d-utilities\") pod \"community-operators-xbwpr\" (UID: \"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d\") " pod="openshift-marketplace/community-operators-xbwpr" Jan 22 06:01:15 crc kubenswrapper[4933]: I0122 06:01:15.671690 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d-catalog-content\") pod \"community-operators-xbwpr\" (UID: \"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d\") " pod="openshift-marketplace/community-operators-xbwpr" Jan 22 06:01:15 crc kubenswrapper[4933]: I0122 06:01:15.671800 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26zgc\" (UniqueName: \"kubernetes.io/projected/c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d-kube-api-access-26zgc\") pod \"community-operators-xbwpr\" (UID: \"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d\") " pod="openshift-marketplace/community-operators-xbwpr" Jan 22 06:01:15 crc kubenswrapper[4933]: I0122 06:01:15.773203 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26zgc\" (UniqueName: \"kubernetes.io/projected/c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d-kube-api-access-26zgc\") pod \"community-operators-xbwpr\" (UID: \"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d\") " pod="openshift-marketplace/community-operators-xbwpr" Jan 22 06:01:15 crc kubenswrapper[4933]: I0122 06:01:15.773295 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d-utilities\") pod \"community-operators-xbwpr\" (UID: \"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d\") " pod="openshift-marketplace/community-operators-xbwpr" Jan 22 06:01:15 crc kubenswrapper[4933]: I0122 06:01:15.773349 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d-catalog-content\") pod \"community-operators-xbwpr\" (UID: \"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d\") " pod="openshift-marketplace/community-operators-xbwpr" Jan 22 06:01:15 crc kubenswrapper[4933]: I0122 06:01:15.773728 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d-utilities\") pod \"community-operators-xbwpr\" (UID: \"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d\") " pod="openshift-marketplace/community-operators-xbwpr" Jan 22 06:01:15 crc kubenswrapper[4933]: I0122 06:01:15.773840 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d-catalog-content\") pod \"community-operators-xbwpr\" (UID: \"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d\") " pod="openshift-marketplace/community-operators-xbwpr" Jan 22 06:01:15 crc kubenswrapper[4933]: I0122 06:01:15.789953 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26zgc\" (UniqueName: \"kubernetes.io/projected/c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d-kube-api-access-26zgc\") pod \"community-operators-xbwpr\" (UID: \"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d\") " pod="openshift-marketplace/community-operators-xbwpr" Jan 22 06:01:15 crc kubenswrapper[4933]: I0122 06:01:15.879946 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xbwpr" Jan 22 06:01:16 crc kubenswrapper[4933]: I0122 06:01:16.145226 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xbwpr"] Jan 22 06:01:16 crc kubenswrapper[4933]: W0122 06:01:16.153219 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc5f1fce4_6a46_4783_a1b7_c2dcf0fb4d2d.slice/crio-7ff68658d52548e0570b95f996b6a7d0e236bff49153c285031fcfa45b24de12 WatchSource:0}: Error finding container 7ff68658d52548e0570b95f996b6a7d0e236bff49153c285031fcfa45b24de12: Status 404 returned error can't find the container with id 7ff68658d52548e0570b95f996b6a7d0e236bff49153c285031fcfa45b24de12 Jan 22 06:01:16 crc kubenswrapper[4933]: I0122 06:01:16.860592 4933 generic.go:334] "Generic (PLEG): container finished" podID="c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d" containerID="087f9cddfb71c44c8956fd945f415cd14db2bdef257c27ee46433f4a1ae52552" exitCode=0 Jan 22 06:01:16 crc kubenswrapper[4933]: I0122 06:01:16.860649 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbwpr" event={"ID":"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d","Type":"ContainerDied","Data":"087f9cddfb71c44c8956fd945f415cd14db2bdef257c27ee46433f4a1ae52552"} Jan 22 06:01:16 crc kubenswrapper[4933]: I0122 06:01:16.860679 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbwpr" event={"ID":"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d","Type":"ContainerStarted","Data":"7ff68658d52548e0570b95f996b6a7d0e236bff49153c285031fcfa45b24de12"} Jan 22 06:01:17 crc kubenswrapper[4933]: I0122 06:01:17.871005 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbwpr" event={"ID":"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d","Type":"ContainerStarted","Data":"fa008bcc5cce3205576e3397ec8914555c2b83753ef2529815b05d1c11cf6c0e"} Jan 22 06:01:18 crc kubenswrapper[4933]: I0122 06:01:18.886807 4933 generic.go:334] "Generic (PLEG): container finished" podID="c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d" containerID="fa008bcc5cce3205576e3397ec8914555c2b83753ef2529815b05d1c11cf6c0e" exitCode=0 Jan 22 06:01:18 crc kubenswrapper[4933]: I0122 06:01:18.886874 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbwpr" event={"ID":"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d","Type":"ContainerDied","Data":"fa008bcc5cce3205576e3397ec8914555c2b83753ef2529815b05d1c11cf6c0e"} Jan 22 06:01:19 crc kubenswrapper[4933]: I0122 06:01:19.895352 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbwpr" event={"ID":"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d","Type":"ContainerStarted","Data":"a000c5be5fb19ed7f4c92edf482de72c7dfca67f050e43d23dcab6cedeefbb06"} Jan 22 06:01:19 crc kubenswrapper[4933]: I0122 06:01:19.921389 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xbwpr" podStartSLOduration=2.460865484 podStartE2EDuration="4.921372133s" podCreationTimestamp="2026-01-22 06:01:15 +0000 UTC" firstStartedPulling="2026-01-22 06:01:16.862634746 +0000 UTC m=+924.699760149" lastFinishedPulling="2026-01-22 06:01:19.323141405 +0000 UTC m=+927.160266798" observedRunningTime="2026-01-22 06:01:19.91773161 +0000 UTC m=+927.754856993" watchObservedRunningTime="2026-01-22 06:01:19.921372133 +0000 UTC m=+927.758497486" Jan 22 06:01:23 crc kubenswrapper[4933]: I0122 06:01:23.281593 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-2cblb" Jan 22 06:01:23 crc kubenswrapper[4933]: I0122 06:01:23.281887 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-2cblb" Jan 22 06:01:23 crc kubenswrapper[4933]: I0122 06:01:23.320177 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-2cblb" Jan 22 06:01:23 crc kubenswrapper[4933]: I0122 06:01:23.963591 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-2cblb" Jan 22 06:01:24 crc kubenswrapper[4933]: I0122 06:01:24.583133 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk"] Jan 22 06:01:24 crc kubenswrapper[4933]: I0122 06:01:24.584388 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk" Jan 22 06:01:24 crc kubenswrapper[4933]: I0122 06:01:24.586899 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-nggmd" Jan 22 06:01:24 crc kubenswrapper[4933]: I0122 06:01:24.604137 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk"] Jan 22 06:01:24 crc kubenswrapper[4933]: I0122 06:01:24.624980 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6a552e55-190e-4f4c-9234-ebd6d63ee4ad-bundle\") pod \"3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk\" (UID: \"6a552e55-190e-4f4c-9234-ebd6d63ee4ad\") " pod="openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk" Jan 22 06:01:24 crc kubenswrapper[4933]: I0122 06:01:24.625228 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vvjb\" (UniqueName: \"kubernetes.io/projected/6a552e55-190e-4f4c-9234-ebd6d63ee4ad-kube-api-access-2vvjb\") pod \"3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk\" (UID: \"6a552e55-190e-4f4c-9234-ebd6d63ee4ad\") " pod="openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk" Jan 22 06:01:24 crc kubenswrapper[4933]: I0122 06:01:24.625254 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6a552e55-190e-4f4c-9234-ebd6d63ee4ad-util\") pod \"3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk\" (UID: \"6a552e55-190e-4f4c-9234-ebd6d63ee4ad\") " pod="openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk" Jan 22 06:01:24 crc kubenswrapper[4933]: I0122 06:01:24.726104 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6a552e55-190e-4f4c-9234-ebd6d63ee4ad-util\") pod \"3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk\" (UID: \"6a552e55-190e-4f4c-9234-ebd6d63ee4ad\") " pod="openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk" Jan 22 06:01:24 crc kubenswrapper[4933]: I0122 06:01:24.726153 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vvjb\" (UniqueName: \"kubernetes.io/projected/6a552e55-190e-4f4c-9234-ebd6d63ee4ad-kube-api-access-2vvjb\") pod \"3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk\" (UID: \"6a552e55-190e-4f4c-9234-ebd6d63ee4ad\") " pod="openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk" Jan 22 06:01:24 crc kubenswrapper[4933]: I0122 06:01:24.726211 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6a552e55-190e-4f4c-9234-ebd6d63ee4ad-bundle\") pod \"3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk\" (UID: \"6a552e55-190e-4f4c-9234-ebd6d63ee4ad\") " pod="openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk" Jan 22 06:01:24 crc kubenswrapper[4933]: I0122 06:01:24.726602 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6a552e55-190e-4f4c-9234-ebd6d63ee4ad-util\") pod \"3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk\" (UID: \"6a552e55-190e-4f4c-9234-ebd6d63ee4ad\") " pod="openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk" Jan 22 06:01:24 crc kubenswrapper[4933]: I0122 06:01:24.726655 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6a552e55-190e-4f4c-9234-ebd6d63ee4ad-bundle\") pod \"3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk\" (UID: \"6a552e55-190e-4f4c-9234-ebd6d63ee4ad\") " pod="openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk" Jan 22 06:01:24 crc kubenswrapper[4933]: I0122 06:01:24.758675 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vvjb\" (UniqueName: \"kubernetes.io/projected/6a552e55-190e-4f4c-9234-ebd6d63ee4ad-kube-api-access-2vvjb\") pod \"3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk\" (UID: \"6a552e55-190e-4f4c-9234-ebd6d63ee4ad\") " pod="openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk" Jan 22 06:01:24 crc kubenswrapper[4933]: I0122 06:01:24.903936 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk" Jan 22 06:01:25 crc kubenswrapper[4933]: I0122 06:01:25.361571 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk"] Jan 22 06:01:25 crc kubenswrapper[4933]: W0122 06:01:25.372402 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a552e55_190e_4f4c_9234_ebd6d63ee4ad.slice/crio-556aca96b2954af4abda54046606ce0b60cf899aeca54a21395d79608b973a08 WatchSource:0}: Error finding container 556aca96b2954af4abda54046606ce0b60cf899aeca54a21395d79608b973a08: Status 404 returned error can't find the container with id 556aca96b2954af4abda54046606ce0b60cf899aeca54a21395d79608b973a08 Jan 22 06:01:25 crc kubenswrapper[4933]: I0122 06:01:25.880447 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xbwpr" Jan 22 06:01:25 crc kubenswrapper[4933]: I0122 06:01:25.881309 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xbwpr" Jan 22 06:01:25 crc kubenswrapper[4933]: I0122 06:01:25.929739 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xbwpr" Jan 22 06:01:25 crc kubenswrapper[4933]: I0122 06:01:25.938653 4933 generic.go:334] "Generic (PLEG): container finished" podID="6a552e55-190e-4f4c-9234-ebd6d63ee4ad" containerID="df3d8289e9b63dd173e876e3445468fe7e7a36874a0fd4ae2d2fe628d5432964" exitCode=0 Jan 22 06:01:25 crc kubenswrapper[4933]: I0122 06:01:25.938705 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk" event={"ID":"6a552e55-190e-4f4c-9234-ebd6d63ee4ad","Type":"ContainerDied","Data":"df3d8289e9b63dd173e876e3445468fe7e7a36874a0fd4ae2d2fe628d5432964"} Jan 22 06:01:25 crc kubenswrapper[4933]: I0122 06:01:25.938733 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk" event={"ID":"6a552e55-190e-4f4c-9234-ebd6d63ee4ad","Type":"ContainerStarted","Data":"556aca96b2954af4abda54046606ce0b60cf899aeca54a21395d79608b973a08"} Jan 22 06:01:25 crc kubenswrapper[4933]: I0122 06:01:25.986514 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xbwpr" Jan 22 06:01:26 crc kubenswrapper[4933]: I0122 06:01:26.949462 4933 generic.go:334] "Generic (PLEG): container finished" podID="6a552e55-190e-4f4c-9234-ebd6d63ee4ad" containerID="854ed70e35e47f5ee8aeafc38665dead565187afc20e1b25abe527e43e0c4958" exitCode=0 Jan 22 06:01:26 crc kubenswrapper[4933]: I0122 06:01:26.949558 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk" event={"ID":"6a552e55-190e-4f4c-9234-ebd6d63ee4ad","Type":"ContainerDied","Data":"854ed70e35e47f5ee8aeafc38665dead565187afc20e1b25abe527e43e0c4958"} Jan 22 06:01:27 crc kubenswrapper[4933]: I0122 06:01:27.960441 4933 generic.go:334] "Generic (PLEG): container finished" podID="6a552e55-190e-4f4c-9234-ebd6d63ee4ad" containerID="26dac514554d557574319df7250c1c0474d6c61691fd494492372f834ca8ed27" exitCode=0 Jan 22 06:01:27 crc kubenswrapper[4933]: I0122 06:01:27.960495 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk" event={"ID":"6a552e55-190e-4f4c-9234-ebd6d63ee4ad","Type":"ContainerDied","Data":"26dac514554d557574319df7250c1c0474d6c61691fd494492372f834ca8ed27"} Jan 22 06:01:29 crc kubenswrapper[4933]: I0122 06:01:29.272342 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk" Jan 22 06:01:29 crc kubenswrapper[4933]: I0122 06:01:29.282590 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vvjb\" (UniqueName: \"kubernetes.io/projected/6a552e55-190e-4f4c-9234-ebd6d63ee4ad-kube-api-access-2vvjb\") pod \"6a552e55-190e-4f4c-9234-ebd6d63ee4ad\" (UID: \"6a552e55-190e-4f4c-9234-ebd6d63ee4ad\") " Jan 22 06:01:29 crc kubenswrapper[4933]: I0122 06:01:29.282678 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6a552e55-190e-4f4c-9234-ebd6d63ee4ad-bundle\") pod \"6a552e55-190e-4f4c-9234-ebd6d63ee4ad\" (UID: \"6a552e55-190e-4f4c-9234-ebd6d63ee4ad\") " Jan 22 06:01:29 crc kubenswrapper[4933]: I0122 06:01:29.282779 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6a552e55-190e-4f4c-9234-ebd6d63ee4ad-util\") pod \"6a552e55-190e-4f4c-9234-ebd6d63ee4ad\" (UID: \"6a552e55-190e-4f4c-9234-ebd6d63ee4ad\") " Jan 22 06:01:29 crc kubenswrapper[4933]: I0122 06:01:29.283649 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6a552e55-190e-4f4c-9234-ebd6d63ee4ad-bundle" (OuterVolumeSpecName: "bundle") pod "6a552e55-190e-4f4c-9234-ebd6d63ee4ad" (UID: "6a552e55-190e-4f4c-9234-ebd6d63ee4ad"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:01:29 crc kubenswrapper[4933]: I0122 06:01:29.290005 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a552e55-190e-4f4c-9234-ebd6d63ee4ad-kube-api-access-2vvjb" (OuterVolumeSpecName: "kube-api-access-2vvjb") pod "6a552e55-190e-4f4c-9234-ebd6d63ee4ad" (UID: "6a552e55-190e-4f4c-9234-ebd6d63ee4ad"). InnerVolumeSpecName "kube-api-access-2vvjb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:01:29 crc kubenswrapper[4933]: I0122 06:01:29.299360 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6a552e55-190e-4f4c-9234-ebd6d63ee4ad-util" (OuterVolumeSpecName: "util") pod "6a552e55-190e-4f4c-9234-ebd6d63ee4ad" (UID: "6a552e55-190e-4f4c-9234-ebd6d63ee4ad"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:01:29 crc kubenswrapper[4933]: I0122 06:01:29.384286 4933 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6a552e55-190e-4f4c-9234-ebd6d63ee4ad-util\") on node \"crc\" DevicePath \"\"" Jan 22 06:01:29 crc kubenswrapper[4933]: I0122 06:01:29.384326 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vvjb\" (UniqueName: \"kubernetes.io/projected/6a552e55-190e-4f4c-9234-ebd6d63ee4ad-kube-api-access-2vvjb\") on node \"crc\" DevicePath \"\"" Jan 22 06:01:29 crc kubenswrapper[4933]: I0122 06:01:29.384339 4933 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6a552e55-190e-4f4c-9234-ebd6d63ee4ad-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:01:29 crc kubenswrapper[4933]: I0122 06:01:29.540265 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xbwpr"] Jan 22 06:01:29 crc kubenswrapper[4933]: I0122 06:01:29.540512 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xbwpr" podUID="c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d" containerName="registry-server" containerID="cri-o://a000c5be5fb19ed7f4c92edf482de72c7dfca67f050e43d23dcab6cedeefbb06" gracePeriod=2 Jan 22 06:01:29 crc kubenswrapper[4933]: I0122 06:01:29.974942 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk" event={"ID":"6a552e55-190e-4f4c-9234-ebd6d63ee4ad","Type":"ContainerDied","Data":"556aca96b2954af4abda54046606ce0b60cf899aeca54a21395d79608b973a08"} Jan 22 06:01:29 crc kubenswrapper[4933]: I0122 06:01:29.974970 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk" Jan 22 06:01:29 crc kubenswrapper[4933]: I0122 06:01:29.974994 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="556aca96b2954af4abda54046606ce0b60cf899aeca54a21395d79608b973a08" Jan 22 06:01:29 crc kubenswrapper[4933]: I0122 06:01:29.977284 4933 generic.go:334] "Generic (PLEG): container finished" podID="c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d" containerID="a000c5be5fb19ed7f4c92edf482de72c7dfca67f050e43d23dcab6cedeefbb06" exitCode=0 Jan 22 06:01:29 crc kubenswrapper[4933]: I0122 06:01:29.977360 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbwpr" event={"ID":"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d","Type":"ContainerDied","Data":"a000c5be5fb19ed7f4c92edf482de72c7dfca67f050e43d23dcab6cedeefbb06"} Jan 22 06:01:30 crc kubenswrapper[4933]: I0122 06:01:30.446327 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xbwpr" Jan 22 06:01:30 crc kubenswrapper[4933]: I0122 06:01:30.498323 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d-utilities\") pod \"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d\" (UID: \"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d\") " Jan 22 06:01:30 crc kubenswrapper[4933]: I0122 06:01:30.498426 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d-catalog-content\") pod \"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d\" (UID: \"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d\") " Jan 22 06:01:30 crc kubenswrapper[4933]: I0122 06:01:30.498475 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26zgc\" (UniqueName: \"kubernetes.io/projected/c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d-kube-api-access-26zgc\") pod \"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d\" (UID: \"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d\") " Jan 22 06:01:30 crc kubenswrapper[4933]: I0122 06:01:30.499292 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d-utilities" (OuterVolumeSpecName: "utilities") pod "c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d" (UID: "c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:01:30 crc kubenswrapper[4933]: I0122 06:01:30.502576 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d-kube-api-access-26zgc" (OuterVolumeSpecName: "kube-api-access-26zgc") pod "c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d" (UID: "c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d"). InnerVolumeSpecName "kube-api-access-26zgc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:01:30 crc kubenswrapper[4933]: I0122 06:01:30.545622 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d" (UID: "c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:01:30 crc kubenswrapper[4933]: I0122 06:01:30.599905 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:01:30 crc kubenswrapper[4933]: I0122 06:01:30.600138 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26zgc\" (UniqueName: \"kubernetes.io/projected/c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d-kube-api-access-26zgc\") on node \"crc\" DevicePath \"\"" Jan 22 06:01:30 crc kubenswrapper[4933]: I0122 06:01:30.600213 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:01:30 crc kubenswrapper[4933]: I0122 06:01:30.986974 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbwpr" event={"ID":"c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d","Type":"ContainerDied","Data":"7ff68658d52548e0570b95f996b6a7d0e236bff49153c285031fcfa45b24de12"} Jan 22 06:01:30 crc kubenswrapper[4933]: I0122 06:01:30.987022 4933 scope.go:117] "RemoveContainer" containerID="a000c5be5fb19ed7f4c92edf482de72c7dfca67f050e43d23dcab6cedeefbb06" Jan 22 06:01:30 crc kubenswrapper[4933]: I0122 06:01:30.987102 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xbwpr" Jan 22 06:01:31 crc kubenswrapper[4933]: I0122 06:01:31.005276 4933 scope.go:117] "RemoveContainer" containerID="fa008bcc5cce3205576e3397ec8914555c2b83753ef2529815b05d1c11cf6c0e" Jan 22 06:01:31 crc kubenswrapper[4933]: I0122 06:01:31.027876 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xbwpr"] Jan 22 06:01:31 crc kubenswrapper[4933]: I0122 06:01:31.029331 4933 scope.go:117] "RemoveContainer" containerID="087f9cddfb71c44c8956fd945f415cd14db2bdef257c27ee46433f4a1ae52552" Jan 22 06:01:31 crc kubenswrapper[4933]: I0122 06:01:31.030799 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xbwpr"] Jan 22 06:01:32 crc kubenswrapper[4933]: I0122 06:01:32.500381 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d" path="/var/lib/kubelet/pods/c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d/volumes" Jan 22 06:01:33 crc kubenswrapper[4933]: I0122 06:01:33.015101 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-5cd76577f9-bqlhh"] Jan 22 06:01:33 crc kubenswrapper[4933]: E0122 06:01:33.015718 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d" containerName="registry-server" Jan 22 06:01:33 crc kubenswrapper[4933]: I0122 06:01:33.015739 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d" containerName="registry-server" Jan 22 06:01:33 crc kubenswrapper[4933]: E0122 06:01:33.015756 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a552e55-190e-4f4c-9234-ebd6d63ee4ad" containerName="pull" Jan 22 06:01:33 crc kubenswrapper[4933]: I0122 06:01:33.015763 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a552e55-190e-4f4c-9234-ebd6d63ee4ad" containerName="pull" Jan 22 06:01:33 crc kubenswrapper[4933]: E0122 06:01:33.016148 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a552e55-190e-4f4c-9234-ebd6d63ee4ad" containerName="util" Jan 22 06:01:33 crc kubenswrapper[4933]: I0122 06:01:33.016160 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a552e55-190e-4f4c-9234-ebd6d63ee4ad" containerName="util" Jan 22 06:01:33 crc kubenswrapper[4933]: E0122 06:01:33.016173 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d" containerName="extract-content" Jan 22 06:01:33 crc kubenswrapper[4933]: I0122 06:01:33.016180 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d" containerName="extract-content" Jan 22 06:01:33 crc kubenswrapper[4933]: E0122 06:01:33.016191 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d" containerName="extract-utilities" Jan 22 06:01:33 crc kubenswrapper[4933]: I0122 06:01:33.016198 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d" containerName="extract-utilities" Jan 22 06:01:33 crc kubenswrapper[4933]: E0122 06:01:33.016208 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a552e55-190e-4f4c-9234-ebd6d63ee4ad" containerName="extract" Jan 22 06:01:33 crc kubenswrapper[4933]: I0122 06:01:33.016216 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a552e55-190e-4f4c-9234-ebd6d63ee4ad" containerName="extract" Jan 22 06:01:33 crc kubenswrapper[4933]: I0122 06:01:33.016367 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5f1fce4-6a46-4783-a1b7-c2dcf0fb4d2d" containerName="registry-server" Jan 22 06:01:33 crc kubenswrapper[4933]: I0122 06:01:33.016391 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a552e55-190e-4f4c-9234-ebd6d63ee4ad" containerName="extract" Jan 22 06:01:33 crc kubenswrapper[4933]: I0122 06:01:33.017061 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-5cd76577f9-bqlhh" Jan 22 06:01:33 crc kubenswrapper[4933]: I0122 06:01:33.022323 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-9rfxk" Jan 22 06:01:33 crc kubenswrapper[4933]: I0122 06:01:33.031012 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxr57\" (UniqueName: \"kubernetes.io/projected/df54862e-d3c4-4068-9560-93833cf75eae-kube-api-access-zxr57\") pod \"openstack-operator-controller-init-5cd76577f9-bqlhh\" (UID: \"df54862e-d3c4-4068-9560-93833cf75eae\") " pod="openstack-operators/openstack-operator-controller-init-5cd76577f9-bqlhh" Jan 22 06:01:33 crc kubenswrapper[4933]: I0122 06:01:33.039623 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-5cd76577f9-bqlhh"] Jan 22 06:01:33 crc kubenswrapper[4933]: I0122 06:01:33.132383 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxr57\" (UniqueName: \"kubernetes.io/projected/df54862e-d3c4-4068-9560-93833cf75eae-kube-api-access-zxr57\") pod \"openstack-operator-controller-init-5cd76577f9-bqlhh\" (UID: \"df54862e-d3c4-4068-9560-93833cf75eae\") " pod="openstack-operators/openstack-operator-controller-init-5cd76577f9-bqlhh" Jan 22 06:01:33 crc kubenswrapper[4933]: I0122 06:01:33.155274 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxr57\" (UniqueName: \"kubernetes.io/projected/df54862e-d3c4-4068-9560-93833cf75eae-kube-api-access-zxr57\") pod \"openstack-operator-controller-init-5cd76577f9-bqlhh\" (UID: \"df54862e-d3c4-4068-9560-93833cf75eae\") " pod="openstack-operators/openstack-operator-controller-init-5cd76577f9-bqlhh" Jan 22 06:01:33 crc kubenswrapper[4933]: I0122 06:01:33.334103 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-5cd76577f9-bqlhh" Jan 22 06:01:33 crc kubenswrapper[4933]: I0122 06:01:33.613181 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-5cd76577f9-bqlhh"] Jan 22 06:01:34 crc kubenswrapper[4933]: I0122 06:01:34.018147 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-5cd76577f9-bqlhh" event={"ID":"df54862e-d3c4-4068-9560-93833cf75eae","Type":"ContainerStarted","Data":"34381f47b1645d304637dfa1f458a50356fc57e3efe0d35e1a35c2ffcd58475e"} Jan 22 06:01:38 crc kubenswrapper[4933]: I0122 06:01:38.041782 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-5cd76577f9-bqlhh" event={"ID":"df54862e-d3c4-4068-9560-93833cf75eae","Type":"ContainerStarted","Data":"41e353d3dad0a75e53c59930718ed0f791474065e063bdf61f4cf1080405ba99"} Jan 22 06:01:38 crc kubenswrapper[4933]: I0122 06:01:38.041938 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-5cd76577f9-bqlhh" Jan 22 06:01:38 crc kubenswrapper[4933]: I0122 06:01:38.074545 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-5cd76577f9-bqlhh" podStartSLOduration=2.5460648949999998 podStartE2EDuration="6.074530056s" podCreationTimestamp="2026-01-22 06:01:32 +0000 UTC" firstStartedPulling="2026-01-22 06:01:33.593319798 +0000 UTC m=+941.430445151" lastFinishedPulling="2026-01-22 06:01:37.121784959 +0000 UTC m=+944.958910312" observedRunningTime="2026-01-22 06:01:38.072247389 +0000 UTC m=+945.909372752" watchObservedRunningTime="2026-01-22 06:01:38.074530056 +0000 UTC m=+945.911655409" Jan 22 06:01:38 crc kubenswrapper[4933]: I0122 06:01:38.578495 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hvdg8"] Jan 22 06:01:38 crc kubenswrapper[4933]: I0122 06:01:38.580984 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hvdg8" Jan 22 06:01:38 crc kubenswrapper[4933]: I0122 06:01:38.594426 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hvdg8"] Jan 22 06:01:38 crc kubenswrapper[4933]: I0122 06:01:38.773707 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb460db5-1c41-495a-84d6-998309754d3c-utilities\") pod \"certified-operators-hvdg8\" (UID: \"fb460db5-1c41-495a-84d6-998309754d3c\") " pod="openshift-marketplace/certified-operators-hvdg8" Jan 22 06:01:38 crc kubenswrapper[4933]: I0122 06:01:38.773766 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7cf2\" (UniqueName: \"kubernetes.io/projected/fb460db5-1c41-495a-84d6-998309754d3c-kube-api-access-k7cf2\") pod \"certified-operators-hvdg8\" (UID: \"fb460db5-1c41-495a-84d6-998309754d3c\") " pod="openshift-marketplace/certified-operators-hvdg8" Jan 22 06:01:38 crc kubenswrapper[4933]: I0122 06:01:38.773844 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb460db5-1c41-495a-84d6-998309754d3c-catalog-content\") pod \"certified-operators-hvdg8\" (UID: \"fb460db5-1c41-495a-84d6-998309754d3c\") " pod="openshift-marketplace/certified-operators-hvdg8" Jan 22 06:01:38 crc kubenswrapper[4933]: I0122 06:01:38.875133 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb460db5-1c41-495a-84d6-998309754d3c-catalog-content\") pod \"certified-operators-hvdg8\" (UID: \"fb460db5-1c41-495a-84d6-998309754d3c\") " pod="openshift-marketplace/certified-operators-hvdg8" Jan 22 06:01:38 crc kubenswrapper[4933]: I0122 06:01:38.875254 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb460db5-1c41-495a-84d6-998309754d3c-utilities\") pod \"certified-operators-hvdg8\" (UID: \"fb460db5-1c41-495a-84d6-998309754d3c\") " pod="openshift-marketplace/certified-operators-hvdg8" Jan 22 06:01:38 crc kubenswrapper[4933]: I0122 06:01:38.875275 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7cf2\" (UniqueName: \"kubernetes.io/projected/fb460db5-1c41-495a-84d6-998309754d3c-kube-api-access-k7cf2\") pod \"certified-operators-hvdg8\" (UID: \"fb460db5-1c41-495a-84d6-998309754d3c\") " pod="openshift-marketplace/certified-operators-hvdg8" Jan 22 06:01:38 crc kubenswrapper[4933]: I0122 06:01:38.875882 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb460db5-1c41-495a-84d6-998309754d3c-catalog-content\") pod \"certified-operators-hvdg8\" (UID: \"fb460db5-1c41-495a-84d6-998309754d3c\") " pod="openshift-marketplace/certified-operators-hvdg8" Jan 22 06:01:38 crc kubenswrapper[4933]: I0122 06:01:38.876007 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb460db5-1c41-495a-84d6-998309754d3c-utilities\") pod \"certified-operators-hvdg8\" (UID: \"fb460db5-1c41-495a-84d6-998309754d3c\") " pod="openshift-marketplace/certified-operators-hvdg8" Jan 22 06:01:38 crc kubenswrapper[4933]: I0122 06:01:38.903171 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7cf2\" (UniqueName: \"kubernetes.io/projected/fb460db5-1c41-495a-84d6-998309754d3c-kube-api-access-k7cf2\") pod \"certified-operators-hvdg8\" (UID: \"fb460db5-1c41-495a-84d6-998309754d3c\") " pod="openshift-marketplace/certified-operators-hvdg8" Jan 22 06:01:38 crc kubenswrapper[4933]: I0122 06:01:38.911620 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hvdg8" Jan 22 06:01:39 crc kubenswrapper[4933]: I0122 06:01:39.413062 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hvdg8"] Jan 22 06:01:40 crc kubenswrapper[4933]: I0122 06:01:40.057265 4933 generic.go:334] "Generic (PLEG): container finished" podID="fb460db5-1c41-495a-84d6-998309754d3c" containerID="14b2f0a5fc946521889f8a1f767aff63664f41858d94004a8d1a1593fe8c3dce" exitCode=0 Jan 22 06:01:40 crc kubenswrapper[4933]: I0122 06:01:40.057340 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hvdg8" event={"ID":"fb460db5-1c41-495a-84d6-998309754d3c","Type":"ContainerDied","Data":"14b2f0a5fc946521889f8a1f767aff63664f41858d94004a8d1a1593fe8c3dce"} Jan 22 06:01:40 crc kubenswrapper[4933]: I0122 06:01:40.057619 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hvdg8" event={"ID":"fb460db5-1c41-495a-84d6-998309754d3c","Type":"ContainerStarted","Data":"a379e11a863d8c1536cf5a583dabb3a343e45084b457da4eba37c17af23aed68"} Jan 22 06:01:41 crc kubenswrapper[4933]: I0122 06:01:41.065631 4933 generic.go:334] "Generic (PLEG): container finished" podID="fb460db5-1c41-495a-84d6-998309754d3c" containerID="76f3df152ba7aa6d16c307349cd46b3960b3d7d25ca849db73431c69c3eeba3f" exitCode=0 Jan 22 06:01:41 crc kubenswrapper[4933]: I0122 06:01:41.065694 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hvdg8" event={"ID":"fb460db5-1c41-495a-84d6-998309754d3c","Type":"ContainerDied","Data":"76f3df152ba7aa6d16c307349cd46b3960b3d7d25ca849db73431c69c3eeba3f"} Jan 22 06:01:42 crc kubenswrapper[4933]: I0122 06:01:42.076341 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hvdg8" event={"ID":"fb460db5-1c41-495a-84d6-998309754d3c","Type":"ContainerStarted","Data":"bbed29b04d3b122706df1d9b53bd1affe38644a1bdf5ec620fbeb09f03cef2c6"} Jan 22 06:01:42 crc kubenswrapper[4933]: I0122 06:01:42.096303 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hvdg8" podStartSLOduration=2.656953155 podStartE2EDuration="4.096285606s" podCreationTimestamp="2026-01-22 06:01:38 +0000 UTC" firstStartedPulling="2026-01-22 06:01:40.060269398 +0000 UTC m=+947.897394771" lastFinishedPulling="2026-01-22 06:01:41.499601859 +0000 UTC m=+949.336727222" observedRunningTime="2026-01-22 06:01:42.092350147 +0000 UTC m=+949.929475500" watchObservedRunningTime="2026-01-22 06:01:42.096285606 +0000 UTC m=+949.933410959" Jan 22 06:01:43 crc kubenswrapper[4933]: I0122 06:01:43.337885 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-5cd76577f9-bqlhh" Jan 22 06:01:48 crc kubenswrapper[4933]: I0122 06:01:48.912100 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hvdg8" Jan 22 06:01:48 crc kubenswrapper[4933]: I0122 06:01:48.912468 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hvdg8" Jan 22 06:01:48 crc kubenswrapper[4933]: I0122 06:01:48.973930 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hvdg8" Jan 22 06:01:49 crc kubenswrapper[4933]: I0122 06:01:49.161620 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hvdg8" Jan 22 06:01:49 crc kubenswrapper[4933]: I0122 06:01:49.245738 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hvdg8"] Jan 22 06:01:51 crc kubenswrapper[4933]: I0122 06:01:51.128102 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hvdg8" podUID="fb460db5-1c41-495a-84d6-998309754d3c" containerName="registry-server" containerID="cri-o://bbed29b04d3b122706df1d9b53bd1affe38644a1bdf5ec620fbeb09f03cef2c6" gracePeriod=2 Jan 22 06:01:53 crc kubenswrapper[4933]: I0122 06:01:53.141453 4933 generic.go:334] "Generic (PLEG): container finished" podID="fb460db5-1c41-495a-84d6-998309754d3c" containerID="bbed29b04d3b122706df1d9b53bd1affe38644a1bdf5ec620fbeb09f03cef2c6" exitCode=0 Jan 22 06:01:53 crc kubenswrapper[4933]: I0122 06:01:53.141523 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hvdg8" event={"ID":"fb460db5-1c41-495a-84d6-998309754d3c","Type":"ContainerDied","Data":"bbed29b04d3b122706df1d9b53bd1affe38644a1bdf5ec620fbeb09f03cef2c6"} Jan 22 06:01:53 crc kubenswrapper[4933]: I0122 06:01:53.512531 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hvdg8" Jan 22 06:01:53 crc kubenswrapper[4933]: I0122 06:01:53.706047 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb460db5-1c41-495a-84d6-998309754d3c-catalog-content\") pod \"fb460db5-1c41-495a-84d6-998309754d3c\" (UID: \"fb460db5-1c41-495a-84d6-998309754d3c\") " Jan 22 06:01:53 crc kubenswrapper[4933]: I0122 06:01:53.706420 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb460db5-1c41-495a-84d6-998309754d3c-utilities\") pod \"fb460db5-1c41-495a-84d6-998309754d3c\" (UID: \"fb460db5-1c41-495a-84d6-998309754d3c\") " Jan 22 06:01:53 crc kubenswrapper[4933]: I0122 06:01:53.707276 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb460db5-1c41-495a-84d6-998309754d3c-utilities" (OuterVolumeSpecName: "utilities") pod "fb460db5-1c41-495a-84d6-998309754d3c" (UID: "fb460db5-1c41-495a-84d6-998309754d3c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:01:53 crc kubenswrapper[4933]: I0122 06:01:53.706452 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k7cf2\" (UniqueName: \"kubernetes.io/projected/fb460db5-1c41-495a-84d6-998309754d3c-kube-api-access-k7cf2\") pod \"fb460db5-1c41-495a-84d6-998309754d3c\" (UID: \"fb460db5-1c41-495a-84d6-998309754d3c\") " Jan 22 06:01:53 crc kubenswrapper[4933]: I0122 06:01:53.707562 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb460db5-1c41-495a-84d6-998309754d3c-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:01:53 crc kubenswrapper[4933]: I0122 06:01:53.714274 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb460db5-1c41-495a-84d6-998309754d3c-kube-api-access-k7cf2" (OuterVolumeSpecName: "kube-api-access-k7cf2") pod "fb460db5-1c41-495a-84d6-998309754d3c" (UID: "fb460db5-1c41-495a-84d6-998309754d3c"). InnerVolumeSpecName "kube-api-access-k7cf2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:01:53 crc kubenswrapper[4933]: I0122 06:01:53.750441 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb460db5-1c41-495a-84d6-998309754d3c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fb460db5-1c41-495a-84d6-998309754d3c" (UID: "fb460db5-1c41-495a-84d6-998309754d3c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:01:53 crc kubenswrapper[4933]: I0122 06:01:53.808042 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb460db5-1c41-495a-84d6-998309754d3c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:01:53 crc kubenswrapper[4933]: I0122 06:01:53.808095 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k7cf2\" (UniqueName: \"kubernetes.io/projected/fb460db5-1c41-495a-84d6-998309754d3c-kube-api-access-k7cf2\") on node \"crc\" DevicePath \"\"" Jan 22 06:01:54 crc kubenswrapper[4933]: I0122 06:01:54.149401 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hvdg8" event={"ID":"fb460db5-1c41-495a-84d6-998309754d3c","Type":"ContainerDied","Data":"a379e11a863d8c1536cf5a583dabb3a343e45084b457da4eba37c17af23aed68"} Jan 22 06:01:54 crc kubenswrapper[4933]: I0122 06:01:54.149452 4933 scope.go:117] "RemoveContainer" containerID="bbed29b04d3b122706df1d9b53bd1affe38644a1bdf5ec620fbeb09f03cef2c6" Jan 22 06:01:54 crc kubenswrapper[4933]: I0122 06:01:54.149453 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hvdg8" Jan 22 06:01:54 crc kubenswrapper[4933]: I0122 06:01:54.172903 4933 scope.go:117] "RemoveContainer" containerID="76f3df152ba7aa6d16c307349cd46b3960b3d7d25ca849db73431c69c3eeba3f" Jan 22 06:01:54 crc kubenswrapper[4933]: I0122 06:01:54.179801 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hvdg8"] Jan 22 06:01:54 crc kubenswrapper[4933]: I0122 06:01:54.184172 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hvdg8"] Jan 22 06:01:54 crc kubenswrapper[4933]: I0122 06:01:54.202717 4933 scope.go:117] "RemoveContainer" containerID="14b2f0a5fc946521889f8a1f767aff63664f41858d94004a8d1a1593fe8c3dce" Jan 22 06:01:54 crc kubenswrapper[4933]: I0122 06:01:54.499190 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb460db5-1c41-495a-84d6-998309754d3c" path="/var/lib/kubelet/pods/fb460db5-1c41-495a-84d6-998309754d3c/volumes" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.724602 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-z6g5l"] Jan 22 06:02:02 crc kubenswrapper[4933]: E0122 06:02:02.725473 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb460db5-1c41-495a-84d6-998309754d3c" containerName="extract-utilities" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.725492 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb460db5-1c41-495a-84d6-998309754d3c" containerName="extract-utilities" Jan 22 06:02:02 crc kubenswrapper[4933]: E0122 06:02:02.725508 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb460db5-1c41-495a-84d6-998309754d3c" containerName="registry-server" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.725518 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb460db5-1c41-495a-84d6-998309754d3c" containerName="registry-server" Jan 22 06:02:02 crc kubenswrapper[4933]: E0122 06:02:02.725535 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb460db5-1c41-495a-84d6-998309754d3c" containerName="extract-content" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.725544 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb460db5-1c41-495a-84d6-998309754d3c" containerName="extract-content" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.725714 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb460db5-1c41-495a-84d6-998309754d3c" containerName="registry-server" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.726273 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-z6g5l" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.729332 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-jjg4q" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.737521 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-69cf5d4557-6ktqg"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.738787 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-6ktqg" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.740174 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-prcgx" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.743366 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-z6g5l"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.753041 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-sb8mm"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.753986 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-69cf5d4557-6ktqg"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.754109 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-sb8mm" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.758599 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-f9g6k" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.760282 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-sb8mm"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.765332 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-x7t54"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.766365 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-x7t54" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.773576 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-p6w9k"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.774291 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-rlrzc" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.774447 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-p6w9k" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.778145 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-p6w9k"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.778503 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-sm4h6" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.783304 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-x7t54"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.800047 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-ghk9g"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.800789 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-ghk9g" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.805343 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-h2pw2" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.828173 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-ghk9g"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.835105 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.835879 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.841802 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-5zhs4" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.842295 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.853105 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-dmwlv"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.858039 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-dmwlv" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.860421 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-9759j" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.874267 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.888144 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-dmwlv"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.914152 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-pkpcl"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.914928 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-pkpcl" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.917205 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-k64n7" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.918062 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp6zv\" (UniqueName: \"kubernetes.io/projected/78b94bce-a7a5-471f-bab2-f57baeff12b6-kube-api-access-rp6zv\") pod \"barbican-operator-controller-manager-59dd8b7cbf-z6g5l\" (UID: \"78b94bce-a7a5-471f-bab2-f57baeff12b6\") " pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-z6g5l" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.918112 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85k2q\" (UniqueName: \"kubernetes.io/projected/10fc162d-8e83-4741-88be-c1e8dd9f291a-kube-api-access-85k2q\") pod \"heat-operator-controller-manager-594c8c9d5d-p6w9k\" (UID: \"10fc162d-8e83-4741-88be-c1e8dd9f291a\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-p6w9k" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.918131 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqzv9\" (UniqueName: \"kubernetes.io/projected/53b0d937-20e6-4a4b-b61b-f172c672c43f-kube-api-access-wqzv9\") pod \"cinder-operator-controller-manager-69cf5d4557-6ktqg\" (UID: \"53b0d937-20e6-4a4b-b61b-f172c672c43f\") " pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-6ktqg" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.918147 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqvz2\" (UniqueName: \"kubernetes.io/projected/bee42408-9ab2-4e83-a06b-8cb123a853f9-kube-api-access-nqvz2\") pod \"infra-operator-controller-manager-54ccf4f85d-5vl5w\" (UID: \"bee42408-9ab2-4e83-a06b-8cb123a853f9\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.918162 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nqj8\" (UniqueName: \"kubernetes.io/projected/d3f5d746-ae61-4d36-bcce-4530c7f7a899-kube-api-access-6nqj8\") pod \"keystone-operator-controller-manager-b8b6d4659-pkpcl\" (UID: \"d3f5d746-ae61-4d36-bcce-4530c7f7a899\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-pkpcl" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.918183 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9448\" (UniqueName: \"kubernetes.io/projected/ab3830be-ff45-443a-9089-29438fca5c75-kube-api-access-v9448\") pod \"ironic-operator-controller-manager-69d6c9f5b8-dmwlv\" (UID: \"ab3830be-ff45-443a-9089-29438fca5c75\") " pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-dmwlv" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.918203 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nw5t5\" (UniqueName: \"kubernetes.io/projected/f4f610a0-0ede-4d86-b1d4-fcd4a70d9c1c-kube-api-access-nw5t5\") pod \"glance-operator-controller-manager-78fdd796fd-x7t54\" (UID: \"f4f610a0-0ede-4d86-b1d4-fcd4a70d9c1c\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-x7t54" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.918220 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-5vl5w\" (UID: \"bee42408-9ab2-4e83-a06b-8cb123a853f9\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.918237 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhzrj\" (UniqueName: \"kubernetes.io/projected/c2a2b482-2d1c-4c3b-b1dc-7d3f291b665e-kube-api-access-rhzrj\") pod \"designate-operator-controller-manager-b45d7bf98-sb8mm\" (UID: \"c2a2b482-2d1c-4c3b-b1dc-7d3f291b665e\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-sb8mm" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.918269 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqd58\" (UniqueName: \"kubernetes.io/projected/45469fd5-7d9d-44f4-82a1-61d82f8e2dc8-kube-api-access-nqd58\") pod \"horizon-operator-controller-manager-77d5c5b54f-ghk9g\" (UID: \"45469fd5-7d9d-44f4-82a1-61d82f8e2dc8\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-ghk9g" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.926474 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-dwrg6"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.927247 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-dwrg6" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.933200 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-t52l8" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.936011 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-pkpcl"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.947163 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-dtg9q"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.948119 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-dtg9q" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.949654 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-jc8nm" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.952457 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-dwrg6"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.957261 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5d8f59fb49-nkc7j"] Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.958098 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-nkc7j" Jan 22 06:02:02 crc kubenswrapper[4933]: I0122 06:02:02.964983 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-xncjg" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.004095 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-dtg9q"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.020724 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp6zv\" (UniqueName: \"kubernetes.io/projected/78b94bce-a7a5-471f-bab2-f57baeff12b6-kube-api-access-rp6zv\") pod \"barbican-operator-controller-manager-59dd8b7cbf-z6g5l\" (UID: \"78b94bce-a7a5-471f-bab2-f57baeff12b6\") " pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-z6g5l" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.020768 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85k2q\" (UniqueName: \"kubernetes.io/projected/10fc162d-8e83-4741-88be-c1e8dd9f291a-kube-api-access-85k2q\") pod \"heat-operator-controller-manager-594c8c9d5d-p6w9k\" (UID: \"10fc162d-8e83-4741-88be-c1e8dd9f291a\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-p6w9k" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.020810 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqzv9\" (UniqueName: \"kubernetes.io/projected/53b0d937-20e6-4a4b-b61b-f172c672c43f-kube-api-access-wqzv9\") pod \"cinder-operator-controller-manager-69cf5d4557-6ktqg\" (UID: \"53b0d937-20e6-4a4b-b61b-f172c672c43f\") " pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-6ktqg" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.020832 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqvz2\" (UniqueName: \"kubernetes.io/projected/bee42408-9ab2-4e83-a06b-8cb123a853f9-kube-api-access-nqvz2\") pod \"infra-operator-controller-manager-54ccf4f85d-5vl5w\" (UID: \"bee42408-9ab2-4e83-a06b-8cb123a853f9\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.020855 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nqj8\" (UniqueName: \"kubernetes.io/projected/d3f5d746-ae61-4d36-bcce-4530c7f7a899-kube-api-access-6nqj8\") pod \"keystone-operator-controller-manager-b8b6d4659-pkpcl\" (UID: \"d3f5d746-ae61-4d36-bcce-4530c7f7a899\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-pkpcl" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.020926 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9448\" (UniqueName: \"kubernetes.io/projected/ab3830be-ff45-443a-9089-29438fca5c75-kube-api-access-v9448\") pod \"ironic-operator-controller-manager-69d6c9f5b8-dmwlv\" (UID: \"ab3830be-ff45-443a-9089-29438fca5c75\") " pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-dmwlv" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.020986 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nw5t5\" (UniqueName: \"kubernetes.io/projected/f4f610a0-0ede-4d86-b1d4-fcd4a70d9c1c-kube-api-access-nw5t5\") pod \"glance-operator-controller-manager-78fdd796fd-x7t54\" (UID: \"f4f610a0-0ede-4d86-b1d4-fcd4a70d9c1c\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-x7t54" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.021008 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-5vl5w\" (UID: \"bee42408-9ab2-4e83-a06b-8cb123a853f9\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.021055 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhzrj\" (UniqueName: \"kubernetes.io/projected/c2a2b482-2d1c-4c3b-b1dc-7d3f291b665e-kube-api-access-rhzrj\") pod \"designate-operator-controller-manager-b45d7bf98-sb8mm\" (UID: \"c2a2b482-2d1c-4c3b-b1dc-7d3f291b665e\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-sb8mm" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.021108 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqd58\" (UniqueName: \"kubernetes.io/projected/45469fd5-7d9d-44f4-82a1-61d82f8e2dc8-kube-api-access-nqd58\") pod \"horizon-operator-controller-manager-77d5c5b54f-ghk9g\" (UID: \"45469fd5-7d9d-44f4-82a1-61d82f8e2dc8\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-ghk9g" Jan 22 06:02:03 crc kubenswrapper[4933]: E0122 06:02:03.022295 4933 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 22 06:02:03 crc kubenswrapper[4933]: E0122 06:02:03.023492 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert podName:bee42408-9ab2-4e83-a06b-8cb123a853f9 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:03.523459666 +0000 UTC m=+971.360585019 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert") pod "infra-operator-controller-manager-54ccf4f85d-5vl5w" (UID: "bee42408-9ab2-4e83-a06b-8cb123a853f9") : secret "infra-operator-webhook-server-cert" not found Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.041147 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5d8f59fb49-nkc7j"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.087922 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-6b8bc8d87d-czc9v"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.088940 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-czc9v" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.095508 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85k2q\" (UniqueName: \"kubernetes.io/projected/10fc162d-8e83-4741-88be-c1e8dd9f291a-kube-api-access-85k2q\") pod \"heat-operator-controller-manager-594c8c9d5d-p6w9k\" (UID: \"10fc162d-8e83-4741-88be-c1e8dd9f291a\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-p6w9k" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.096108 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9448\" (UniqueName: \"kubernetes.io/projected/ab3830be-ff45-443a-9089-29438fca5c75-kube-api-access-v9448\") pod \"ironic-operator-controller-manager-69d6c9f5b8-dmwlv\" (UID: \"ab3830be-ff45-443a-9089-29438fca5c75\") " pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-dmwlv" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.098888 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp6zv\" (UniqueName: \"kubernetes.io/projected/78b94bce-a7a5-471f-bab2-f57baeff12b6-kube-api-access-rp6zv\") pod \"barbican-operator-controller-manager-59dd8b7cbf-z6g5l\" (UID: \"78b94bce-a7a5-471f-bab2-f57baeff12b6\") " pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-z6g5l" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.101429 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-6thvd" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.106185 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nqj8\" (UniqueName: \"kubernetes.io/projected/d3f5d746-ae61-4d36-bcce-4530c7f7a899-kube-api-access-6nqj8\") pod \"keystone-operator-controller-manager-b8b6d4659-pkpcl\" (UID: \"d3f5d746-ae61-4d36-bcce-4530c7f7a899\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-pkpcl" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.110945 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhzrj\" (UniqueName: \"kubernetes.io/projected/c2a2b482-2d1c-4c3b-b1dc-7d3f291b665e-kube-api-access-rhzrj\") pod \"designate-operator-controller-manager-b45d7bf98-sb8mm\" (UID: \"c2a2b482-2d1c-4c3b-b1dc-7d3f291b665e\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-sb8mm" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.115033 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nw5t5\" (UniqueName: \"kubernetes.io/projected/f4f610a0-0ede-4d86-b1d4-fcd4a70d9c1c-kube-api-access-nw5t5\") pod \"glance-operator-controller-manager-78fdd796fd-x7t54\" (UID: \"f4f610a0-0ede-4d86-b1d4-fcd4a70d9c1c\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-x7t54" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.116778 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqd58\" (UniqueName: \"kubernetes.io/projected/45469fd5-7d9d-44f4-82a1-61d82f8e2dc8-kube-api-access-nqd58\") pod \"horizon-operator-controller-manager-77d5c5b54f-ghk9g\" (UID: \"45469fd5-7d9d-44f4-82a1-61d82f8e2dc8\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-ghk9g" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.120698 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqvz2\" (UniqueName: \"kubernetes.io/projected/bee42408-9ab2-4e83-a06b-8cb123a853f9-kube-api-access-nqvz2\") pod \"infra-operator-controller-manager-54ccf4f85d-5vl5w\" (UID: \"bee42408-9ab2-4e83-a06b-8cb123a853f9\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.121949 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dptns\" (UniqueName: \"kubernetes.io/projected/21941523-226c-4a4c-a099-51df0766a712-kube-api-access-dptns\") pod \"neutron-operator-controller-manager-5d8f59fb49-nkc7j\" (UID: \"21941523-226c-4a4c-a099-51df0766a712\") " pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-nkc7j" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.122033 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkgvc\" (UniqueName: \"kubernetes.io/projected/e0335df2-7bd1-4b61-8057-7663a730d2ff-kube-api-access-fkgvc\") pod \"mariadb-operator-controller-manager-c87fff755-dtg9q\" (UID: \"e0335df2-7bd1-4b61-8057-7663a730d2ff\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-dtg9q" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.122100 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xv566\" (UniqueName: \"kubernetes.io/projected/34f0d75a-dc72-4dad-82a6-512c1351210b-kube-api-access-xv566\") pod \"manila-operator-controller-manager-78c6999f6f-dwrg6\" (UID: \"34f0d75a-dc72-4dad-82a6-512c1351210b\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-dwrg6" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.132241 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-p6w9k" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.140800 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqzv9\" (UniqueName: \"kubernetes.io/projected/53b0d937-20e6-4a4b-b61b-f172c672c43f-kube-api-access-wqzv9\") pod \"cinder-operator-controller-manager-69cf5d4557-6ktqg\" (UID: \"53b0d937-20e6-4a4b-b61b-f172c672c43f\") " pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-6ktqg" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.140911 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-6b8bc8d87d-czc9v"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.141170 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-ghk9g" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.180776 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7bd9774b6-w25vn"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.181836 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-w25vn" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.190710 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-vqkdr" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.193614 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7bd9774b6-w25vn"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.193937 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-dmwlv" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.223383 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dptns\" (UniqueName: \"kubernetes.io/projected/21941523-226c-4a4c-a099-51df0766a712-kube-api-access-dptns\") pod \"neutron-operator-controller-manager-5d8f59fb49-nkc7j\" (UID: \"21941523-226c-4a4c-a099-51df0766a712\") " pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-nkc7j" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.223464 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkgvc\" (UniqueName: \"kubernetes.io/projected/e0335df2-7bd1-4b61-8057-7663a730d2ff-kube-api-access-fkgvc\") pod \"mariadb-operator-controller-manager-c87fff755-dtg9q\" (UID: \"e0335df2-7bd1-4b61-8057-7663a730d2ff\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-dtg9q" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.223523 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xv566\" (UniqueName: \"kubernetes.io/projected/34f0d75a-dc72-4dad-82a6-512c1351210b-kube-api-access-xv566\") pod \"manila-operator-controller-manager-78c6999f6f-dwrg6\" (UID: \"34f0d75a-dc72-4dad-82a6-512c1351210b\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-dwrg6" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.223552 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmjz5\" (UniqueName: \"kubernetes.io/projected/300ef4d3-6c13-4a9d-96e6-a707abccca2c-kube-api-access-gmjz5\") pod \"nova-operator-controller-manager-6b8bc8d87d-czc9v\" (UID: \"300ef4d3-6c13-4a9d-96e6-a707abccca2c\") " pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-czc9v" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.252066 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-pkpcl" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.263995 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xv566\" (UniqueName: \"kubernetes.io/projected/34f0d75a-dc72-4dad-82a6-512c1351210b-kube-api-access-xv566\") pod \"manila-operator-controller-manager-78c6999f6f-dwrg6\" (UID: \"34f0d75a-dc72-4dad-82a6-512c1351210b\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-dwrg6" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.264367 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-dwrg6" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.273809 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-7q7hb"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.276627 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-7q7hb" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.277833 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dptns\" (UniqueName: \"kubernetes.io/projected/21941523-226c-4a4c-a099-51df0766a712-kube-api-access-dptns\") pod \"neutron-operator-controller-manager-5d8f59fb49-nkc7j\" (UID: \"21941523-226c-4a4c-a099-51df0766a712\") " pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-nkc7j" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.280846 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-pv6ps" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.282332 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkgvc\" (UniqueName: \"kubernetes.io/projected/e0335df2-7bd1-4b61-8057-7663a730d2ff-kube-api-access-fkgvc\") pod \"mariadb-operator-controller-manager-c87fff755-dtg9q\" (UID: \"e0335df2-7bd1-4b61-8057-7663a730d2ff\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-dtg9q" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.287498 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.290788 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.293357 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-pj96x" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.293835 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.297027 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-nkc7j" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.313864 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-7q7hb"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.328289 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmjz5\" (UniqueName: \"kubernetes.io/projected/300ef4d3-6c13-4a9d-96e6-a707abccca2c-kube-api-access-gmjz5\") pod \"nova-operator-controller-manager-6b8bc8d87d-czc9v\" (UID: \"300ef4d3-6c13-4a9d-96e6-a707abccca2c\") " pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-czc9v" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.328629 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ww9ct\" (UniqueName: \"kubernetes.io/projected/04683325-6972-455c-9ca5-ddf1fd4b9862-kube-api-access-ww9ct\") pod \"octavia-operator-controller-manager-7bd9774b6-w25vn\" (UID: \"04683325-6972-455c-9ca5-ddf1fd4b9862\") " pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-w25vn" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.333326 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5d646b7d76-m6g48"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.334131 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-m6g48" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.338392 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-bzfkm" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.338460 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.351031 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5d646b7d76-m6g48"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.361612 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-z6g5l" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.363543 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-2wjhd"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.363817 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmjz5\" (UniqueName: \"kubernetes.io/projected/300ef4d3-6c13-4a9d-96e6-a707abccca2c-kube-api-access-gmjz5\") pod \"nova-operator-controller-manager-6b8bc8d87d-czc9v\" (UID: \"300ef4d3-6c13-4a9d-96e6-a707abccca2c\") " pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-czc9v" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.364900 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-2wjhd" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.370517 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-2wjhd"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.372434 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-q6lqn" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.378364 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-85cd9769bb-556x5"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.379445 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-556x5" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.382142 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-q2csb" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.382350 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-6ktqg" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.390512 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-85cd9769bb-556x5"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.399140 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-sb8mm" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.411386 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-x7t54" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.422387 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-9rc7q"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.423308 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-9rc7q" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.427231 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-92rbx" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.431499 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert\") pod \"openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns\" (UID: \"ae3ccc66-eed1-4750-8af1-7f99673b1323\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.431553 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bc6g8\" (UniqueName: \"kubernetes.io/projected/289a66a7-9513-4b66-990a-3d9f11919531-kube-api-access-bc6g8\") pod \"ovn-operator-controller-manager-55db956ddc-7q7hb\" (UID: \"289a66a7-9513-4b66-990a-3d9f11919531\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-7q7hb" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.431634 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ww9ct\" (UniqueName: \"kubernetes.io/projected/04683325-6972-455c-9ca5-ddf1fd4b9862-kube-api-access-ww9ct\") pod \"octavia-operator-controller-manager-7bd9774b6-w25vn\" (UID: \"04683325-6972-455c-9ca5-ddf1fd4b9862\") " pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-w25vn" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.431794 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5x52p\" (UniqueName: \"kubernetes.io/projected/ae3ccc66-eed1-4750-8af1-7f99673b1323-kube-api-access-5x52p\") pod \"openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns\" (UID: \"ae3ccc66-eed1-4750-8af1-7f99673b1323\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.431830 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqbbt\" (UniqueName: \"kubernetes.io/projected/2892be27-6da5-4a19-a30e-36f5907f5d70-kube-api-access-tqbbt\") pod \"placement-operator-controller-manager-5d646b7d76-m6g48\" (UID: \"2892be27-6da5-4a19-a30e-36f5907f5d70\") " pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-m6g48" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.438036 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-9rc7q"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.482381 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5ffb9c6597-gtcc5"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.483454 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-gtcc5" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.486787 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ww9ct\" (UniqueName: \"kubernetes.io/projected/04683325-6972-455c-9ca5-ddf1fd4b9862-kube-api-access-ww9ct\") pod \"octavia-operator-controller-manager-7bd9774b6-w25vn\" (UID: \"04683325-6972-455c-9ca5-ddf1fd4b9862\") " pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-w25vn" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.489390 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5ffb9c6597-gtcc5"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.490935 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-mdr6h" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.533266 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-428wz\" (UniqueName: \"kubernetes.io/projected/48e1a8f3-00fd-48a6-be02-7c61f0425809-kube-api-access-428wz\") pod \"swift-operator-controller-manager-547cbdb99f-2wjhd\" (UID: \"48e1a8f3-00fd-48a6-be02-7c61f0425809\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-2wjhd" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.533340 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-5vl5w\" (UID: \"bee42408-9ab2-4e83-a06b-8cb123a853f9\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.533381 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5x52p\" (UniqueName: \"kubernetes.io/projected/ae3ccc66-eed1-4750-8af1-7f99673b1323-kube-api-access-5x52p\") pod \"openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns\" (UID: \"ae3ccc66-eed1-4750-8af1-7f99673b1323\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.533409 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqbbt\" (UniqueName: \"kubernetes.io/projected/2892be27-6da5-4a19-a30e-36f5907f5d70-kube-api-access-tqbbt\") pod \"placement-operator-controller-manager-5d646b7d76-m6g48\" (UID: \"2892be27-6da5-4a19-a30e-36f5907f5d70\") " pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-m6g48" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.533451 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert\") pod \"openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns\" (UID: \"ae3ccc66-eed1-4750-8af1-7f99673b1323\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.533477 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwrgg\" (UniqueName: \"kubernetes.io/projected/f5292b84-8cb2-4f43-96f9-6304705b15bc-kube-api-access-zwrgg\") pod \"telemetry-operator-controller-manager-85cd9769bb-556x5\" (UID: \"f5292b84-8cb2-4f43-96f9-6304705b15bc\") " pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-556x5" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.533499 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bc6g8\" (UniqueName: \"kubernetes.io/projected/289a66a7-9513-4b66-990a-3d9f11919531-kube-api-access-bc6g8\") pod \"ovn-operator-controller-manager-55db956ddc-7q7hb\" (UID: \"289a66a7-9513-4b66-990a-3d9f11919531\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-7q7hb" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.533542 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9hjk\" (UniqueName: \"kubernetes.io/projected/0a5c558d-f69d-4299-97e2-00326ec7e416-kube-api-access-b9hjk\") pod \"test-operator-controller-manager-69797bbcbd-9rc7q\" (UID: \"0a5c558d-f69d-4299-97e2-00326ec7e416\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-9rc7q" Jan 22 06:02:03 crc kubenswrapper[4933]: E0122 06:02:03.533687 4933 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 22 06:02:03 crc kubenswrapper[4933]: E0122 06:02:03.533733 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert podName:bee42408-9ab2-4e83-a06b-8cb123a853f9 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:04.533716628 +0000 UTC m=+972.370841981 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert") pod "infra-operator-controller-manager-54ccf4f85d-5vl5w" (UID: "bee42408-9ab2-4e83-a06b-8cb123a853f9") : secret "infra-operator-webhook-server-cert" not found Jan 22 06:02:03 crc kubenswrapper[4933]: E0122 06:02:03.534388 4933 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 06:02:03 crc kubenswrapper[4933]: E0122 06:02:03.534469 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert podName:ae3ccc66-eed1-4750-8af1-7f99673b1323 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:04.034451472 +0000 UTC m=+971.871576815 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert") pod "openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" (UID: "ae3ccc66-eed1-4750-8af1-7f99673b1323") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.559025 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5x52p\" (UniqueName: \"kubernetes.io/projected/ae3ccc66-eed1-4750-8af1-7f99673b1323-kube-api-access-5x52p\") pod \"openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns\" (UID: \"ae3ccc66-eed1-4750-8af1-7f99673b1323\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.574802 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-czc9v" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.574888 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-dtg9q" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.581872 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bc6g8\" (UniqueName: \"kubernetes.io/projected/289a66a7-9513-4b66-990a-3d9f11919531-kube-api-access-bc6g8\") pod \"ovn-operator-controller-manager-55db956ddc-7q7hb\" (UID: \"289a66a7-9513-4b66-990a-3d9f11919531\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-7q7hb" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.582792 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqbbt\" (UniqueName: \"kubernetes.io/projected/2892be27-6da5-4a19-a30e-36f5907f5d70-kube-api-access-tqbbt\") pod \"placement-operator-controller-manager-5d646b7d76-m6g48\" (UID: \"2892be27-6da5-4a19-a30e-36f5907f5d70\") " pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-m6g48" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.585600 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.586586 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.587819 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-w25vn" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.588339 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.588722 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-6d66v" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.588871 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.607154 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.629953 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rdbn6"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.631602 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rdbn6" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.634690 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9hjk\" (UniqueName: \"kubernetes.io/projected/0a5c558d-f69d-4299-97e2-00326ec7e416-kube-api-access-b9hjk\") pod \"test-operator-controller-manager-69797bbcbd-9rc7q\" (UID: \"0a5c558d-f69d-4299-97e2-00326ec7e416\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-9rc7q" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.634753 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7l89\" (UniqueName: \"kubernetes.io/projected/b5af85fb-e3ef-41b7-8c6b-7afddd5200bd-kube-api-access-p7l89\") pod \"watcher-operator-controller-manager-5ffb9c6597-gtcc5\" (UID: \"b5af85fb-e3ef-41b7-8c6b-7afddd5200bd\") " pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-gtcc5" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.634795 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-428wz\" (UniqueName: \"kubernetes.io/projected/48e1a8f3-00fd-48a6-be02-7c61f0425809-kube-api-access-428wz\") pod \"swift-operator-controller-manager-547cbdb99f-2wjhd\" (UID: \"48e1a8f3-00fd-48a6-be02-7c61f0425809\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-2wjhd" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.634926 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwrgg\" (UniqueName: \"kubernetes.io/projected/f5292b84-8cb2-4f43-96f9-6304705b15bc-kube-api-access-zwrgg\") pod \"telemetry-operator-controller-manager-85cd9769bb-556x5\" (UID: \"f5292b84-8cb2-4f43-96f9-6304705b15bc\") " pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-556x5" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.635279 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-wrzxj" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.635670 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-7q7hb" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.636065 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rdbn6"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.653948 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwrgg\" (UniqueName: \"kubernetes.io/projected/f5292b84-8cb2-4f43-96f9-6304705b15bc-kube-api-access-zwrgg\") pod \"telemetry-operator-controller-manager-85cd9769bb-556x5\" (UID: \"f5292b84-8cb2-4f43-96f9-6304705b15bc\") " pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-556x5" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.654497 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9hjk\" (UniqueName: \"kubernetes.io/projected/0a5c558d-f69d-4299-97e2-00326ec7e416-kube-api-access-b9hjk\") pod \"test-operator-controller-manager-69797bbcbd-9rc7q\" (UID: \"0a5c558d-f69d-4299-97e2-00326ec7e416\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-9rc7q" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.660140 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-428wz\" (UniqueName: \"kubernetes.io/projected/48e1a8f3-00fd-48a6-be02-7c61f0425809-kube-api-access-428wz\") pod \"swift-operator-controller-manager-547cbdb99f-2wjhd\" (UID: \"48e1a8f3-00fd-48a6-be02-7c61f0425809\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-2wjhd" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.698874 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-m6g48" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.722348 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-2wjhd" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.740678 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fw878\" (UniqueName: \"kubernetes.io/projected/3028718b-d03f-414e-834d-93eb28eeb369-kube-api-access-fw878\") pod \"rabbitmq-cluster-operator-manager-668c99d594-rdbn6\" (UID: \"3028718b-d03f-414e-834d-93eb28eeb369\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rdbn6" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.740751 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8mr9\" (UniqueName: \"kubernetes.io/projected/07b078fc-4665-4e58-934d-f606471d5942-kube-api-access-p8mr9\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.740801 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.740853 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.740881 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7l89\" (UniqueName: \"kubernetes.io/projected/b5af85fb-e3ef-41b7-8c6b-7afddd5200bd-kube-api-access-p7l89\") pod \"watcher-operator-controller-manager-5ffb9c6597-gtcc5\" (UID: \"b5af85fb-e3ef-41b7-8c6b-7afddd5200bd\") " pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-gtcc5" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.749149 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-556x5" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.769760 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7l89\" (UniqueName: \"kubernetes.io/projected/b5af85fb-e3ef-41b7-8c6b-7afddd5200bd-kube-api-access-p7l89\") pod \"watcher-operator-controller-manager-5ffb9c6597-gtcc5\" (UID: \"b5af85fb-e3ef-41b7-8c6b-7afddd5200bd\") " pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-gtcc5" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.785529 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-9rc7q" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.842837 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.842948 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.842994 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fw878\" (UniqueName: \"kubernetes.io/projected/3028718b-d03f-414e-834d-93eb28eeb369-kube-api-access-fw878\") pod \"rabbitmq-cluster-operator-manager-668c99d594-rdbn6\" (UID: \"3028718b-d03f-414e-834d-93eb28eeb369\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rdbn6" Jan 22 06:02:03 crc kubenswrapper[4933]: E0122 06:02:03.843014 4933 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.843051 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8mr9\" (UniqueName: \"kubernetes.io/projected/07b078fc-4665-4e58-934d-f606471d5942-kube-api-access-p8mr9\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:03 crc kubenswrapper[4933]: E0122 06:02:03.843091 4933 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 22 06:02:03 crc kubenswrapper[4933]: E0122 06:02:03.843100 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs podName:07b078fc-4665-4e58-934d-f606471d5942 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:04.343063934 +0000 UTC m=+972.180189287 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs") pod "openstack-operator-controller-manager-647bb87bbd-xc9z6" (UID: "07b078fc-4665-4e58-934d-f606471d5942") : secret "metrics-server-cert" not found Jan 22 06:02:03 crc kubenswrapper[4933]: E0122 06:02:03.843139 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs podName:07b078fc-4665-4e58-934d-f606471d5942 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:04.343129045 +0000 UTC m=+972.180254398 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs") pod "openstack-operator-controller-manager-647bb87bbd-xc9z6" (UID: "07b078fc-4665-4e58-934d-f606471d5942") : secret "webhook-server-cert" not found Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.864273 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8mr9\" (UniqueName: \"kubernetes.io/projected/07b078fc-4665-4e58-934d-f606471d5942-kube-api-access-p8mr9\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.864624 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fw878\" (UniqueName: \"kubernetes.io/projected/3028718b-d03f-414e-834d-93eb28eeb369-kube-api-access-fw878\") pod \"rabbitmq-cluster-operator-manager-668c99d594-rdbn6\" (UID: \"3028718b-d03f-414e-834d-93eb28eeb369\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rdbn6" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.954338 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-gtcc5" Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.965109 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-dmwlv"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.970234 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-p6w9k"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.983201 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-ghk9g"] Jan 22 06:02:03 crc kubenswrapper[4933]: I0122 06:02:03.997456 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rdbn6" Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.045992 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert\") pod \"openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns\" (UID: \"ae3ccc66-eed1-4750-8af1-7f99673b1323\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.046172 4933 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.046242 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert podName:ae3ccc66-eed1-4750-8af1-7f99673b1323 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:05.046224845 +0000 UTC m=+972.883350198 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert") pod "openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" (UID: "ae3ccc66-eed1-4750-8af1-7f99673b1323") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.310508 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-dmwlv" event={"ID":"ab3830be-ff45-443a-9089-29438fca5c75","Type":"ContainerStarted","Data":"78456050bdba7858c95600607e23d3011e9a66fc35cd467882a9188d8793471a"} Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.316504 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-ghk9g" event={"ID":"45469fd5-7d9d-44f4-82a1-61d82f8e2dc8","Type":"ContainerStarted","Data":"a92bb39b1df49801b26bccb700a96c158b4988fe87ffa500b182f97d5717241c"} Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.325182 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-p6w9k" event={"ID":"10fc162d-8e83-4741-88be-c1e8dd9f291a","Type":"ContainerStarted","Data":"ea34dcbe31730c526ac61ccca4b1abe97f192dece7b7c522c78ac1dac0e4b52d"} Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.338164 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-pkpcl"] Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.352094 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.352191 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.352415 4933 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.352480 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs podName:07b078fc-4665-4e58-934d-f606471d5942 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:05.352460449 +0000 UTC m=+973.189585802 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs") pod "openstack-operator-controller-manager-647bb87bbd-xc9z6" (UID: "07b078fc-4665-4e58-934d-f606471d5942") : secret "webhook-server-cert" not found Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.352536 4933 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.352563 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs podName:07b078fc-4665-4e58-934d-f606471d5942 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:05.352554381 +0000 UTC m=+973.189679734 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs") pod "openstack-operator-controller-manager-647bb87bbd-xc9z6" (UID: "07b078fc-4665-4e58-934d-f606471d5942") : secret "metrics-server-cert" not found Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.352938 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-dwrg6"] Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.553787 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-5vl5w\" (UID: \"bee42408-9ab2-4e83-a06b-8cb123a853f9\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w" Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.554203 4933 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.554423 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert podName:bee42408-9ab2-4e83-a06b-8cb123a853f9 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:06.554407096 +0000 UTC m=+974.391532449 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert") pod "infra-operator-controller-manager-54ccf4f85d-5vl5w" (UID: "bee42408-9ab2-4e83-a06b-8cb123a853f9") : secret "infra-operator-webhook-server-cert" not found Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.812827 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-2wjhd"] Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.814041 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-69cf5d4557-6ktqg"] Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.818416 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-6b8bc8d87d-czc9v"] Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.822851 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7bd9774b6-w25vn"] Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.828234 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-dtg9q"] Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.833118 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5d8f59fb49-nkc7j"] Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.847063 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-x7t54"] Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.871792 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-sb8mm"] Jan 22 06:02:04 crc kubenswrapper[4933]: W0122 06:02:04.888266 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48e1a8f3_00fd_48a6_be02_7c61f0425809.slice/crio-72059fdda9522dce59cdf0fc04ce6f8e4eda896ad6b623e0cb3e644f09c4de59 WatchSource:0}: Error finding container 72059fdda9522dce59cdf0fc04ce6f8e4eda896ad6b623e0cb3e644f09c4de59: Status 404 returned error can't find the container with id 72059fdda9522dce59cdf0fc04ce6f8e4eda896ad6b623e0cb3e644f09c4de59 Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.889020 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-z6g5l"] Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.912229 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-7q7hb"] Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.919330 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5d646b7d76-m6g48"] Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.926086 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rdbn6"] Jan 22 06:02:04 crc kubenswrapper[4933]: W0122 06:02:04.926371 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod04683325_6972_455c_9ca5_ddf1fd4b9862.slice/crio-3770c80a5cba4303b7a3c7ac0b9ebd7411591198b72561ee3cf3bab08e7c41ba WatchSource:0}: Error finding container 3770c80a5cba4303b7a3c7ac0b9ebd7411591198b72561ee3cf3bab08e7c41ba: Status 404 returned error can't find the container with id 3770c80a5cba4303b7a3c7ac0b9ebd7411591198b72561ee3cf3bab08e7c41ba Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.933612 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5ffb9c6597-gtcc5"] Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.938210 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-9rc7q"] Jan 22 06:02:04 crc kubenswrapper[4933]: W0122 06:02:04.941153 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod78b94bce_a7a5_471f_bab2_f57baeff12b6.slice/crio-b48629f976840101daac78e6e74d442b399753ec269201931659525b7613efb8 WatchSource:0}: Error finding container b48629f976840101daac78e6e74d442b399753ec269201931659525b7613efb8: Status 404 returned error can't find the container with id b48629f976840101daac78e6e74d442b399753ec269201931659525b7613efb8 Jan 22 06:02:04 crc kubenswrapper[4933]: I0122 06:02:04.944006 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-85cd9769bb-556x5"] Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.945360 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:e5e017be64edd679623ea1b7e6a1ae780fdcee4ef79be989b93d8c1d082da15b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rp6zv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-59dd8b7cbf-z6g5l_openstack-operators(78b94bce-a7a5-471f-bab2-f57baeff12b6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.946483 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-z6g5l" podUID="78b94bce-a7a5-471f-bab2-f57baeff12b6" Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.959926 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-b9hjk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-69797bbcbd-9rc7q_openstack-operators(0a5c558d-f69d-4299-97e2-00326ec7e416): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.960357 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fw878,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-rdbn6_openstack-operators(3028718b-d03f-414e-834d-93eb28eeb369): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.960462 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:e02722d7581bfe1c5fc13e2fa6811d8665102ba86635c77547abf6b933cde127,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zwrgg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-85cd9769bb-556x5_openstack-operators(f5292b84-8cb2-4f43-96f9-6304705b15bc): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.961173 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-9rc7q" podUID="0a5c558d-f69d-4299-97e2-00326ec7e416" Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.961368 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:2d6d13b3c28e45c6bec980b8808dda8da4723ae87e66d04f53d52c3b3c51612b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-p7l89,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-5ffb9c6597-gtcc5_openstack-operators(b5af85fb-e3ef-41b7-8c6b-7afddd5200bd): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.961434 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rdbn6" podUID="3028718b-d03f-414e-834d-93eb28eeb369" Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.961540 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-556x5" podUID="f5292b84-8cb2-4f43-96f9-6304705b15bc" Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.962652 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-gtcc5" podUID="b5af85fb-e3ef-41b7-8c6b-7afddd5200bd" Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.971514 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:65cfe5b9d5b0571aaf8ff9840b12cc56e90ca4cef162dd260c3a9fa2b52c6dd0,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tqbbt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5d646b7d76-m6g48_openstack-operators(2892be27-6da5-4a19-a30e-36f5907f5d70): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 06:02:04 crc kubenswrapper[4933]: E0122 06:02:04.972800 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-m6g48" podUID="2892be27-6da5-4a19-a30e-36f5907f5d70" Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.061402 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert\") pod \"openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns\" (UID: \"ae3ccc66-eed1-4750-8af1-7f99673b1323\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" Jan 22 06:02:05 crc kubenswrapper[4933]: E0122 06:02:05.061537 4933 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 06:02:05 crc kubenswrapper[4933]: E0122 06:02:05.061582 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert podName:ae3ccc66-eed1-4750-8af1-7f99673b1323 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:07.061568855 +0000 UTC m=+974.898694208 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert") pod "openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" (UID: "ae3ccc66-eed1-4750-8af1-7f99673b1323") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.347595 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-z6g5l" event={"ID":"78b94bce-a7a5-471f-bab2-f57baeff12b6","Type":"ContainerStarted","Data":"b48629f976840101daac78e6e74d442b399753ec269201931659525b7613efb8"} Jan 22 06:02:05 crc kubenswrapper[4933]: E0122 06:02:05.350061 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/barbican-operator@sha256:e5e017be64edd679623ea1b7e6a1ae780fdcee4ef79be989b93d8c1d082da15b\\\"\"" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-z6g5l" podUID="78b94bce-a7a5-471f-bab2-f57baeff12b6" Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.351118 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-dtg9q" event={"ID":"e0335df2-7bd1-4b61-8057-7663a730d2ff","Type":"ContainerStarted","Data":"a28b8af7d02c3b01bdf096d277049f93c5a08545de02db6f5468e7d75100b28a"} Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.353699 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-sb8mm" event={"ID":"c2a2b482-2d1c-4c3b-b1dc-7d3f291b665e","Type":"ContainerStarted","Data":"7330c7a0c0018530539d96069cc1e85f556653894864f0b49fa5b0343d498e52"} Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.355390 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-czc9v" event={"ID":"300ef4d3-6c13-4a9d-96e6-a707abccca2c","Type":"ContainerStarted","Data":"e467750bd1ed97716111664219d29387140b63413379c0f898c5e74b6ec278e0"} Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.357606 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-x7t54" event={"ID":"f4f610a0-0ede-4d86-b1d4-fcd4a70d9c1c","Type":"ContainerStarted","Data":"74827f7bd36e997624eaf201f489f4aa177f207d9383a3dc48321cbfd4ec9af3"} Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.359880 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-9rc7q" event={"ID":"0a5c558d-f69d-4299-97e2-00326ec7e416","Type":"ContainerStarted","Data":"aa67cfc54c911e4640ecd86ff956ef3bb10b59e6cf6ea9b7c5000b4a439229f8"} Jan 22 06:02:05 crc kubenswrapper[4933]: E0122 06:02:05.369802 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d\\\"\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-9rc7q" podUID="0a5c558d-f69d-4299-97e2-00326ec7e416" Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.372142 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-dwrg6" event={"ID":"34f0d75a-dc72-4dad-82a6-512c1351210b","Type":"ContainerStarted","Data":"b764d820afa0b64dc17ff0bb93d143e9526099e29e7a85dd557b990eabdccb59"} Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.372736 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.373241 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:05 crc kubenswrapper[4933]: E0122 06:02:05.373404 4933 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 22 06:02:05 crc kubenswrapper[4933]: E0122 06:02:05.373465 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs podName:07b078fc-4665-4e58-934d-f606471d5942 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:07.373445543 +0000 UTC m=+975.210570896 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs") pod "openstack-operator-controller-manager-647bb87bbd-xc9z6" (UID: "07b078fc-4665-4e58-934d-f606471d5942") : secret "metrics-server-cert" not found Jan 22 06:02:05 crc kubenswrapper[4933]: E0122 06:02:05.373581 4933 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 22 06:02:05 crc kubenswrapper[4933]: E0122 06:02:05.373686 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs podName:07b078fc-4665-4e58-934d-f606471d5942 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:07.373668807 +0000 UTC m=+975.210794150 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs") pod "openstack-operator-controller-manager-647bb87bbd-xc9z6" (UID: "07b078fc-4665-4e58-934d-f606471d5942") : secret "webhook-server-cert" not found Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.375555 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-nkc7j" event={"ID":"21941523-226c-4a4c-a099-51df0766a712","Type":"ContainerStarted","Data":"52a8bcc93753e865955746208fd392813363062f103fdd6a11f4780c217cb4f1"} Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.376939 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rdbn6" event={"ID":"3028718b-d03f-414e-834d-93eb28eeb369","Type":"ContainerStarted","Data":"1953748c854cf756744eb59eabc12b29bfb95ceda1e0be7f5f9b1a917ba4aa48"} Jan 22 06:02:05 crc kubenswrapper[4933]: E0122 06:02:05.378925 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rdbn6" podUID="3028718b-d03f-414e-834d-93eb28eeb369" Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.379469 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-556x5" event={"ID":"f5292b84-8cb2-4f43-96f9-6304705b15bc","Type":"ContainerStarted","Data":"eee719c23c3af533a223b6a559d85c8ff1a9e4dbbdb9a1ff30dee0456d558dd2"} Jan 22 06:02:05 crc kubenswrapper[4933]: E0122 06:02:05.380777 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:e02722d7581bfe1c5fc13e2fa6811d8665102ba86635c77547abf6b933cde127\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-556x5" podUID="f5292b84-8cb2-4f43-96f9-6304705b15bc" Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.387341 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-w25vn" event={"ID":"04683325-6972-455c-9ca5-ddf1fd4b9862","Type":"ContainerStarted","Data":"3770c80a5cba4303b7a3c7ac0b9ebd7411591198b72561ee3cf3bab08e7c41ba"} Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.426719 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-2wjhd" event={"ID":"48e1a8f3-00fd-48a6-be02-7c61f0425809","Type":"ContainerStarted","Data":"72059fdda9522dce59cdf0fc04ce6f8e4eda896ad6b623e0cb3e644f09c4de59"} Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.434588 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-m6g48" event={"ID":"2892be27-6da5-4a19-a30e-36f5907f5d70","Type":"ContainerStarted","Data":"7895c09d63dd36732c3eb10138de58dab1f206122f930d6a3c31d65d49e9ebdf"} Jan 22 06:02:05 crc kubenswrapper[4933]: E0122 06:02:05.436335 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:65cfe5b9d5b0571aaf8ff9840b12cc56e90ca4cef162dd260c3a9fa2b52c6dd0\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-m6g48" podUID="2892be27-6da5-4a19-a30e-36f5907f5d70" Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.438044 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-gtcc5" event={"ID":"b5af85fb-e3ef-41b7-8c6b-7afddd5200bd","Type":"ContainerStarted","Data":"ea15fd65c419bf5cfb01052e363c2045f1dc1e9a13b2eda5c768216f6f055100"} Jan 22 06:02:05 crc kubenswrapper[4933]: E0122 06:02:05.440643 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:2d6d13b3c28e45c6bec980b8808dda8da4723ae87e66d04f53d52c3b3c51612b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-gtcc5" podUID="b5af85fb-e3ef-41b7-8c6b-7afddd5200bd" Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.441907 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-pkpcl" event={"ID":"d3f5d746-ae61-4d36-bcce-4530c7f7a899","Type":"ContainerStarted","Data":"0215c7893487951756da422b12a0810209952d801fc4a839580749c9c2f36297"} Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.445469 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-7q7hb" event={"ID":"289a66a7-9513-4b66-990a-3d9f11919531","Type":"ContainerStarted","Data":"8cbbb91f2350a2f873f3a0c2978f9625005bc8cfe377d6fa02a74445dcfc1a70"} Jan 22 06:02:05 crc kubenswrapper[4933]: I0122 06:02:05.447187 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-6ktqg" event={"ID":"53b0d937-20e6-4a4b-b61b-f172c672c43f","Type":"ContainerStarted","Data":"c944ee2aceb3db9e59a7d4569dda54f8ee4d57c389945dc7fb0b14555ca81ae4"} Jan 22 06:02:06 crc kubenswrapper[4933]: E0122 06:02:06.459822 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d\\\"\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-9rc7q" podUID="0a5c558d-f69d-4299-97e2-00326ec7e416" Jan 22 06:02:06 crc kubenswrapper[4933]: E0122 06:02:06.461441 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/barbican-operator@sha256:e5e017be64edd679623ea1b7e6a1ae780fdcee4ef79be989b93d8c1d082da15b\\\"\"" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-z6g5l" podUID="78b94bce-a7a5-471f-bab2-f57baeff12b6" Jan 22 06:02:06 crc kubenswrapper[4933]: E0122 06:02:06.461501 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rdbn6" podUID="3028718b-d03f-414e-834d-93eb28eeb369" Jan 22 06:02:06 crc kubenswrapper[4933]: E0122 06:02:06.461650 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:2d6d13b3c28e45c6bec980b8808dda8da4723ae87e66d04f53d52c3b3c51612b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-gtcc5" podUID="b5af85fb-e3ef-41b7-8c6b-7afddd5200bd" Jan 22 06:02:06 crc kubenswrapper[4933]: E0122 06:02:06.463086 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:e02722d7581bfe1c5fc13e2fa6811d8665102ba86635c77547abf6b933cde127\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-556x5" podUID="f5292b84-8cb2-4f43-96f9-6304705b15bc" Jan 22 06:02:06 crc kubenswrapper[4933]: E0122 06:02:06.472408 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:65cfe5b9d5b0571aaf8ff9840b12cc56e90ca4cef162dd260c3a9fa2b52c6dd0\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-m6g48" podUID="2892be27-6da5-4a19-a30e-36f5907f5d70" Jan 22 06:02:06 crc kubenswrapper[4933]: I0122 06:02:06.614946 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-5vl5w\" (UID: \"bee42408-9ab2-4e83-a06b-8cb123a853f9\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w" Jan 22 06:02:06 crc kubenswrapper[4933]: E0122 06:02:06.615554 4933 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 22 06:02:06 crc kubenswrapper[4933]: E0122 06:02:06.615599 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert podName:bee42408-9ab2-4e83-a06b-8cb123a853f9 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:10.615586377 +0000 UTC m=+978.452711730 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert") pod "infra-operator-controller-manager-54ccf4f85d-5vl5w" (UID: "bee42408-9ab2-4e83-a06b-8cb123a853f9") : secret "infra-operator-webhook-server-cert" not found Jan 22 06:02:07 crc kubenswrapper[4933]: I0122 06:02:07.122942 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert\") pod \"openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns\" (UID: \"ae3ccc66-eed1-4750-8af1-7f99673b1323\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" Jan 22 06:02:07 crc kubenswrapper[4933]: E0122 06:02:07.123088 4933 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 06:02:07 crc kubenswrapper[4933]: E0122 06:02:07.123474 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert podName:ae3ccc66-eed1-4750-8af1-7f99673b1323 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:11.123450492 +0000 UTC m=+978.960575845 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert") pod "openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" (UID: "ae3ccc66-eed1-4750-8af1-7f99673b1323") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 06:02:07 crc kubenswrapper[4933]: I0122 06:02:07.428678 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:07 crc kubenswrapper[4933]: I0122 06:02:07.428797 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:07 crc kubenswrapper[4933]: E0122 06:02:07.428928 4933 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 22 06:02:07 crc kubenswrapper[4933]: E0122 06:02:07.428973 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs podName:07b078fc-4665-4e58-934d-f606471d5942 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:11.428959003 +0000 UTC m=+979.266084356 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs") pod "openstack-operator-controller-manager-647bb87bbd-xc9z6" (UID: "07b078fc-4665-4e58-934d-f606471d5942") : secret "metrics-server-cert" not found Jan 22 06:02:07 crc kubenswrapper[4933]: E0122 06:02:07.429300 4933 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 22 06:02:07 crc kubenswrapper[4933]: E0122 06:02:07.429330 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs podName:07b078fc-4665-4e58-934d-f606471d5942 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:11.429321942 +0000 UTC m=+979.266447295 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs") pod "openstack-operator-controller-manager-647bb87bbd-xc9z6" (UID: "07b078fc-4665-4e58-934d-f606471d5942") : secret "webhook-server-cert" not found Jan 22 06:02:10 crc kubenswrapper[4933]: I0122 06:02:10.716962 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-5vl5w\" (UID: \"bee42408-9ab2-4e83-a06b-8cb123a853f9\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w" Jan 22 06:02:10 crc kubenswrapper[4933]: E0122 06:02:10.717187 4933 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 22 06:02:10 crc kubenswrapper[4933]: E0122 06:02:10.717496 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert podName:bee42408-9ab2-4e83-a06b-8cb123a853f9 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:18.71747767 +0000 UTC m=+986.554603033 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert") pod "infra-operator-controller-manager-54ccf4f85d-5vl5w" (UID: "bee42408-9ab2-4e83-a06b-8cb123a853f9") : secret "infra-operator-webhook-server-cert" not found Jan 22 06:02:11 crc kubenswrapper[4933]: I0122 06:02:11.125258 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert\") pod \"openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns\" (UID: \"ae3ccc66-eed1-4750-8af1-7f99673b1323\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" Jan 22 06:02:11 crc kubenswrapper[4933]: E0122 06:02:11.125471 4933 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 06:02:11 crc kubenswrapper[4933]: E0122 06:02:11.125557 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert podName:ae3ccc66-eed1-4750-8af1-7f99673b1323 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:19.125538417 +0000 UTC m=+986.962663770 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert") pod "openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" (UID: "ae3ccc66-eed1-4750-8af1-7f99673b1323") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 06:02:11 crc kubenswrapper[4933]: I0122 06:02:11.430414 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:11 crc kubenswrapper[4933]: I0122 06:02:11.430857 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:11 crc kubenswrapper[4933]: E0122 06:02:11.431011 4933 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 22 06:02:11 crc kubenswrapper[4933]: E0122 06:02:11.431095 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs podName:07b078fc-4665-4e58-934d-f606471d5942 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:19.431056849 +0000 UTC m=+987.268182212 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs") pod "openstack-operator-controller-manager-647bb87bbd-xc9z6" (UID: "07b078fc-4665-4e58-934d-f606471d5942") : secret "metrics-server-cert" not found Jan 22 06:02:11 crc kubenswrapper[4933]: E0122 06:02:11.431485 4933 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 22 06:02:11 crc kubenswrapper[4933]: E0122 06:02:11.431542 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs podName:07b078fc-4665-4e58-934d-f606471d5942 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:19.431530011 +0000 UTC m=+987.268655374 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs") pod "openstack-operator-controller-manager-647bb87bbd-xc9z6" (UID: "07b078fc-4665-4e58-934d-f606471d5942") : secret "webhook-server-cert" not found Jan 22 06:02:14 crc kubenswrapper[4933]: I0122 06:02:14.792334 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7dnz7"] Jan 22 06:02:14 crc kubenswrapper[4933]: I0122 06:02:14.796860 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7dnz7" Jan 22 06:02:14 crc kubenswrapper[4933]: I0122 06:02:14.804872 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7dnz7"] Jan 22 06:02:14 crc kubenswrapper[4933]: I0122 06:02:14.902847 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7hcc\" (UniqueName: \"kubernetes.io/projected/fe6b55e0-eef0-41a8-8645-1f664e64dee0-kube-api-access-w7hcc\") pod \"redhat-marketplace-7dnz7\" (UID: \"fe6b55e0-eef0-41a8-8645-1f664e64dee0\") " pod="openshift-marketplace/redhat-marketplace-7dnz7" Jan 22 06:02:14 crc kubenswrapper[4933]: I0122 06:02:14.902927 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe6b55e0-eef0-41a8-8645-1f664e64dee0-catalog-content\") pod \"redhat-marketplace-7dnz7\" (UID: \"fe6b55e0-eef0-41a8-8645-1f664e64dee0\") " pod="openshift-marketplace/redhat-marketplace-7dnz7" Jan 22 06:02:14 crc kubenswrapper[4933]: I0122 06:02:14.902984 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe6b55e0-eef0-41a8-8645-1f664e64dee0-utilities\") pod \"redhat-marketplace-7dnz7\" (UID: \"fe6b55e0-eef0-41a8-8645-1f664e64dee0\") " pod="openshift-marketplace/redhat-marketplace-7dnz7" Jan 22 06:02:15 crc kubenswrapper[4933]: I0122 06:02:15.004655 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe6b55e0-eef0-41a8-8645-1f664e64dee0-catalog-content\") pod \"redhat-marketplace-7dnz7\" (UID: \"fe6b55e0-eef0-41a8-8645-1f664e64dee0\") " pod="openshift-marketplace/redhat-marketplace-7dnz7" Jan 22 06:02:15 crc kubenswrapper[4933]: I0122 06:02:15.004726 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe6b55e0-eef0-41a8-8645-1f664e64dee0-utilities\") pod \"redhat-marketplace-7dnz7\" (UID: \"fe6b55e0-eef0-41a8-8645-1f664e64dee0\") " pod="openshift-marketplace/redhat-marketplace-7dnz7" Jan 22 06:02:15 crc kubenswrapper[4933]: I0122 06:02:15.004809 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7hcc\" (UniqueName: \"kubernetes.io/projected/fe6b55e0-eef0-41a8-8645-1f664e64dee0-kube-api-access-w7hcc\") pod \"redhat-marketplace-7dnz7\" (UID: \"fe6b55e0-eef0-41a8-8645-1f664e64dee0\") " pod="openshift-marketplace/redhat-marketplace-7dnz7" Jan 22 06:02:15 crc kubenswrapper[4933]: I0122 06:02:15.005370 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe6b55e0-eef0-41a8-8645-1f664e64dee0-catalog-content\") pod \"redhat-marketplace-7dnz7\" (UID: \"fe6b55e0-eef0-41a8-8645-1f664e64dee0\") " pod="openshift-marketplace/redhat-marketplace-7dnz7" Jan 22 06:02:15 crc kubenswrapper[4933]: I0122 06:02:15.005468 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe6b55e0-eef0-41a8-8645-1f664e64dee0-utilities\") pod \"redhat-marketplace-7dnz7\" (UID: \"fe6b55e0-eef0-41a8-8645-1f664e64dee0\") " pod="openshift-marketplace/redhat-marketplace-7dnz7" Jan 22 06:02:15 crc kubenswrapper[4933]: I0122 06:02:15.030961 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7hcc\" (UniqueName: \"kubernetes.io/projected/fe6b55e0-eef0-41a8-8645-1f664e64dee0-kube-api-access-w7hcc\") pod \"redhat-marketplace-7dnz7\" (UID: \"fe6b55e0-eef0-41a8-8645-1f664e64dee0\") " pod="openshift-marketplace/redhat-marketplace-7dnz7" Jan 22 06:02:15 crc kubenswrapper[4933]: I0122 06:02:15.131304 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7dnz7" Jan 22 06:02:16 crc kubenswrapper[4933]: E0122 06:02:16.920431 4933 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922" Jan 22 06:02:16 crc kubenswrapper[4933]: E0122 06:02:16.921231 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-428wz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-547cbdb99f-2wjhd_openstack-operators(48e1a8f3-00fd-48a6-be02-7c61f0425809): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 06:02:16 crc kubenswrapper[4933]: E0122 06:02:16.922425 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-2wjhd" podUID="48e1a8f3-00fd-48a6-be02-7c61f0425809" Jan 22 06:02:17 crc kubenswrapper[4933]: E0122 06:02:17.531575 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922\\\"\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-2wjhd" podUID="48e1a8f3-00fd-48a6-be02-7c61f0425809" Jan 22 06:02:17 crc kubenswrapper[4933]: E0122 06:02:17.635086 4933 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:a8fc8f9d445b1232f446119015b226008b07c6a259f5bebc1fcbb39ec310afe5" Jan 22 06:02:17 crc kubenswrapper[4933]: E0122 06:02:17.635293 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:a8fc8f9d445b1232f446119015b226008b07c6a259f5bebc1fcbb39ec310afe5,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ww9ct,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-7bd9774b6-w25vn_openstack-operators(04683325-6972-455c-9ca5-ddf1fd4b9862): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 06:02:17 crc kubenswrapper[4933]: E0122 06:02:17.636620 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-w25vn" podUID="04683325-6972-455c-9ca5-ddf1fd4b9862" Jan 22 06:02:18 crc kubenswrapper[4933]: E0122 06:02:18.186817 4933 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/cinder-operator@sha256:e950ac2df7be78ae0cbcf62fe12ee7a06b628f1903da6fcb741609e857eb1a7f" Jan 22 06:02:18 crc kubenswrapper[4933]: E0122 06:02:18.187021 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/cinder-operator@sha256:e950ac2df7be78ae0cbcf62fe12ee7a06b628f1903da6fcb741609e857eb1a7f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wqzv9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-69cf5d4557-6ktqg_openstack-operators(53b0d937-20e6-4a4b-b61b-f172c672c43f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 06:02:18 crc kubenswrapper[4933]: E0122 06:02:18.188174 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-6ktqg" podUID="53b0d937-20e6-4a4b-b61b-f172c672c43f" Jan 22 06:02:18 crc kubenswrapper[4933]: E0122 06:02:18.540710 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/cinder-operator@sha256:e950ac2df7be78ae0cbcf62fe12ee7a06b628f1903da6fcb741609e857eb1a7f\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-6ktqg" podUID="53b0d937-20e6-4a4b-b61b-f172c672c43f" Jan 22 06:02:18 crc kubenswrapper[4933]: E0122 06:02:18.541484 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:a8fc8f9d445b1232f446119015b226008b07c6a259f5bebc1fcbb39ec310afe5\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-w25vn" podUID="04683325-6972-455c-9ca5-ddf1fd4b9862" Jan 22 06:02:18 crc kubenswrapper[4933]: I0122 06:02:18.760639 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-5vl5w\" (UID: \"bee42408-9ab2-4e83-a06b-8cb123a853f9\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w" Jan 22 06:02:18 crc kubenswrapper[4933]: E0122 06:02:18.760831 4933 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 22 06:02:18 crc kubenswrapper[4933]: E0122 06:02:18.760912 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert podName:bee42408-9ab2-4e83-a06b-8cb123a853f9 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:34.760893728 +0000 UTC m=+1002.598019081 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert") pod "infra-operator-controller-manager-54ccf4f85d-5vl5w" (UID: "bee42408-9ab2-4e83-a06b-8cb123a853f9") : secret "infra-operator-webhook-server-cert" not found Jan 22 06:02:19 crc kubenswrapper[4933]: I0122 06:02:19.167634 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert\") pod \"openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns\" (UID: \"ae3ccc66-eed1-4750-8af1-7f99673b1323\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" Jan 22 06:02:19 crc kubenswrapper[4933]: E0122 06:02:19.167852 4933 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 06:02:19 crc kubenswrapper[4933]: E0122 06:02:19.167937 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert podName:ae3ccc66-eed1-4750-8af1-7f99673b1323 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:35.16791501 +0000 UTC m=+1003.005040423 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert") pod "openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" (UID: "ae3ccc66-eed1-4750-8af1-7f99673b1323") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 06:02:19 crc kubenswrapper[4933]: I0122 06:02:19.471849 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:19 crc kubenswrapper[4933]: E0122 06:02:19.471993 4933 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 22 06:02:19 crc kubenswrapper[4933]: E0122 06:02:19.472995 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs podName:07b078fc-4665-4e58-934d-f606471d5942 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:35.472979481 +0000 UTC m=+1003.310104834 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs") pod "openstack-operator-controller-manager-647bb87bbd-xc9z6" (UID: "07b078fc-4665-4e58-934d-f606471d5942") : secret "metrics-server-cert" not found Jan 22 06:02:19 crc kubenswrapper[4933]: I0122 06:02:19.473564 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:19 crc kubenswrapper[4933]: E0122 06:02:19.473662 4933 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 22 06:02:19 crc kubenswrapper[4933]: E0122 06:02:19.474123 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs podName:07b078fc-4665-4e58-934d-f606471d5942 nodeName:}" failed. No retries permitted until 2026-01-22 06:02:35.474059528 +0000 UTC m=+1003.311184911 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs") pod "openstack-operator-controller-manager-647bb87bbd-xc9z6" (UID: "07b078fc-4665-4e58-934d-f606471d5942") : secret "webhook-server-cert" not found Jan 22 06:02:19 crc kubenswrapper[4933]: E0122 06:02:19.872364 4933 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:4e995cfa360a9d595a01b9c0541ab934692f2374203cb5738127dd784f793831" Jan 22 06:02:19 crc kubenswrapper[4933]: E0122 06:02:19.872547 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:4e995cfa360a9d595a01b9c0541ab934692f2374203cb5738127dd784f793831,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gmjz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-6b8bc8d87d-czc9v_openstack-operators(300ef4d3-6c13-4a9d-96e6-a707abccca2c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 06:02:19 crc kubenswrapper[4933]: E0122 06:02:19.873751 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-czc9v" podUID="300ef4d3-6c13-4a9d-96e6-a707abccca2c" Jan 22 06:02:20 crc kubenswrapper[4933]: E0122 06:02:20.510852 4933 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349" Jan 22 06:02:20 crc kubenswrapper[4933]: E0122 06:02:20.511011 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6nqj8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-b8b6d4659-pkpcl_openstack-operators(d3f5d746-ae61-4d36-bcce-4530c7f7a899): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 06:02:20 crc kubenswrapper[4933]: E0122 06:02:20.513394 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-pkpcl" podUID="d3f5d746-ae61-4d36-bcce-4530c7f7a899" Jan 22 06:02:20 crc kubenswrapper[4933]: E0122 06:02:20.564137 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:4e995cfa360a9d595a01b9c0541ab934692f2374203cb5738127dd784f793831\\\"\"" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-czc9v" podUID="300ef4d3-6c13-4a9d-96e6-a707abccca2c" Jan 22 06:02:20 crc kubenswrapper[4933]: E0122 06:02:20.564151 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-pkpcl" podUID="d3f5d746-ae61-4d36-bcce-4530c7f7a899" Jan 22 06:02:20 crc kubenswrapper[4933]: I0122 06:02:20.963589 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7dnz7"] Jan 22 06:02:21 crc kubenswrapper[4933]: W0122 06:02:21.589404 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe6b55e0_eef0_41a8_8645_1f664e64dee0.slice/crio-64f857f4104edbe111af9a862a71d66e3e04dd1fdd639a29d25b6df7307dde5d WatchSource:0}: Error finding container 64f857f4104edbe111af9a862a71d66e3e04dd1fdd639a29d25b6df7307dde5d: Status 404 returned error can't find the container with id 64f857f4104edbe111af9a862a71d66e3e04dd1fdd639a29d25b6df7307dde5d Jan 22 06:02:22 crc kubenswrapper[4933]: I0122 06:02:22.587171 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-dmwlv" event={"ID":"ab3830be-ff45-443a-9089-29438fca5c75","Type":"ContainerStarted","Data":"de0da6e885d472945f2b2c3a6d1f4bc7376fe125a350f7cc74c5e1855c96c883"} Jan 22 06:02:22 crc kubenswrapper[4933]: I0122 06:02:22.587822 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-dmwlv" Jan 22 06:02:22 crc kubenswrapper[4933]: I0122 06:02:22.588827 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dnz7" event={"ID":"fe6b55e0-eef0-41a8-8645-1f664e64dee0","Type":"ContainerStarted","Data":"64f857f4104edbe111af9a862a71d66e3e04dd1fdd639a29d25b6df7307dde5d"} Jan 22 06:02:22 crc kubenswrapper[4933]: I0122 06:02:22.627877 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-dmwlv" podStartSLOduration=4.322584956 podStartE2EDuration="20.627859627s" podCreationTimestamp="2026-01-22 06:02:02 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.227605418 +0000 UTC m=+972.064730771" lastFinishedPulling="2026-01-22 06:02:20.532880089 +0000 UTC m=+988.370005442" observedRunningTime="2026-01-22 06:02:22.603096801 +0000 UTC m=+990.440222164" watchObservedRunningTime="2026-01-22 06:02:22.627859627 +0000 UTC m=+990.464984980" Jan 22 06:02:31 crc kubenswrapper[4933]: E0122 06:02:31.287258 4933 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:2d6d13b3c28e45c6bec980b8808dda8da4723ae87e66d04f53d52c3b3c51612b" Jan 22 06:02:31 crc kubenswrapper[4933]: E0122 06:02:31.287909 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:2d6d13b3c28e45c6bec980b8808dda8da4723ae87e66d04f53d52c3b3c51612b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-p7l89,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-5ffb9c6597-gtcc5_openstack-operators(b5af85fb-e3ef-41b7-8c6b-7afddd5200bd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 06:02:31 crc kubenswrapper[4933]: E0122 06:02:31.289158 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-gtcc5" podUID="b5af85fb-e3ef-41b7-8c6b-7afddd5200bd" Jan 22 06:02:32 crc kubenswrapper[4933]: E0122 06:02:32.175270 4933 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:65cfe5b9d5b0571aaf8ff9840b12cc56e90ca4cef162dd260c3a9fa2b52c6dd0" Jan 22 06:02:32 crc kubenswrapper[4933]: E0122 06:02:32.176024 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:65cfe5b9d5b0571aaf8ff9840b12cc56e90ca4cef162dd260c3a9fa2b52c6dd0,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tqbbt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5d646b7d76-m6g48_openstack-operators(2892be27-6da5-4a19-a30e-36f5907f5d70): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 06:02:32 crc kubenswrapper[4933]: E0122 06:02:32.177474 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-m6g48" podUID="2892be27-6da5-4a19-a30e-36f5907f5d70" Jan 22 06:02:32 crc kubenswrapper[4933]: I0122 06:02:32.676270 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-sb8mm" event={"ID":"c2a2b482-2d1c-4c3b-b1dc-7d3f291b665e","Type":"ContainerStarted","Data":"390f5e16a79448fc8f95deb94f80003631fffbf749ba413fc0d5d47e761ddf91"} Jan 22 06:02:32 crc kubenswrapper[4933]: I0122 06:02:32.676420 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-sb8mm" Jan 22 06:02:32 crc kubenswrapper[4933]: I0122 06:02:32.678493 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-dtg9q" event={"ID":"e0335df2-7bd1-4b61-8057-7663a730d2ff","Type":"ContainerStarted","Data":"1941b290fca10555f2717cd2196768068873eaf5d5d4f2a198735b96fd907047"} Jan 22 06:02:32 crc kubenswrapper[4933]: I0122 06:02:32.678625 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-dtg9q" Jan 22 06:02:32 crc kubenswrapper[4933]: I0122 06:02:32.694214 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-sb8mm" podStartSLOduration=15.147109262 podStartE2EDuration="30.694195789s" podCreationTimestamp="2026-01-22 06:02:02 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.937909279 +0000 UTC m=+972.775034622" lastFinishedPulling="2026-01-22 06:02:20.484995796 +0000 UTC m=+988.322121149" observedRunningTime="2026-01-22 06:02:32.688935718 +0000 UTC m=+1000.526061071" watchObservedRunningTime="2026-01-22 06:02:32.694195789 +0000 UTC m=+1000.531321142" Jan 22 06:02:32 crc kubenswrapper[4933]: I0122 06:02:32.712815 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-dtg9q" podStartSLOduration=15.109814108 podStartE2EDuration="30.712798102s" podCreationTimestamp="2026-01-22 06:02:02 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.881764956 +0000 UTC m=+972.718890309" lastFinishedPulling="2026-01-22 06:02:20.48474895 +0000 UTC m=+988.321874303" observedRunningTime="2026-01-22 06:02:32.711586302 +0000 UTC m=+1000.548711655" watchObservedRunningTime="2026-01-22 06:02:32.712798102 +0000 UTC m=+1000.549923455" Jan 22 06:02:33 crc kubenswrapper[4933]: I0122 06:02:33.197188 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-dmwlv" Jan 22 06:02:34 crc kubenswrapper[4933]: E0122 06:02:34.413014 4933 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Jan 22 06:02:34 crc kubenswrapper[4933]: E0122 06:02:34.413544 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fw878,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-rdbn6_openstack-operators(3028718b-d03f-414e-834d-93eb28eeb369): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 06:02:34 crc kubenswrapper[4933]: E0122 06:02:34.414770 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rdbn6" podUID="3028718b-d03f-414e-834d-93eb28eeb369" Jan 22 06:02:34 crc kubenswrapper[4933]: I0122 06:02:34.704572 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dnz7" event={"ID":"fe6b55e0-eef0-41a8-8645-1f664e64dee0","Type":"ContainerStarted","Data":"7ac1736bb37c6f1ea5a37bf847843c219b1db908f29e274a853f005a937d6043"} Jan 22 06:02:34 crc kubenswrapper[4933]: I0122 06:02:34.706780 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-7q7hb" event={"ID":"289a66a7-9513-4b66-990a-3d9f11919531","Type":"ContainerStarted","Data":"77cc0676aa68dc70fbbabef737755fce91471779587c00da19180e84e3e6c6d9"} Jan 22 06:02:34 crc kubenswrapper[4933]: I0122 06:02:34.707550 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-7q7hb" Jan 22 06:02:34 crc kubenswrapper[4933]: I0122 06:02:34.724321 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-p6w9k" event={"ID":"10fc162d-8e83-4741-88be-c1e8dd9f291a","Type":"ContainerStarted","Data":"797cc66d396cb730eb3147eeecc242c6465e1cfef48a867c2e3d71f87ae791dc"} Jan 22 06:02:34 crc kubenswrapper[4933]: I0122 06:02:34.724405 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-p6w9k" Jan 22 06:02:34 crc kubenswrapper[4933]: I0122 06:02:34.741342 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-7q7hb" podStartSLOduration=17.183239911 podStartE2EDuration="32.741319415s" podCreationTimestamp="2026-01-22 06:02:02 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.955400282 +0000 UTC m=+972.792525625" lastFinishedPulling="2026-01-22 06:02:20.513479776 +0000 UTC m=+988.350605129" observedRunningTime="2026-01-22 06:02:34.728550067 +0000 UTC m=+1002.565675430" watchObservedRunningTime="2026-01-22 06:02:34.741319415 +0000 UTC m=+1002.578444778" Jan 22 06:02:34 crc kubenswrapper[4933]: I0122 06:02:34.813382 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-5vl5w\" (UID: \"bee42408-9ab2-4e83-a06b-8cb123a853f9\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w" Jan 22 06:02:34 crc kubenswrapper[4933]: I0122 06:02:34.820902 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/bee42408-9ab2-4e83-a06b-8cb123a853f9-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-5vl5w\" (UID: \"bee42408-9ab2-4e83-a06b-8cb123a853f9\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w" Jan 22 06:02:34 crc kubenswrapper[4933]: I0122 06:02:34.954888 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.221793 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert\") pod \"openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns\" (UID: \"ae3ccc66-eed1-4750-8af1-7f99673b1323\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.229942 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ae3ccc66-eed1-4750-8af1-7f99673b1323-cert\") pod \"openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns\" (UID: \"ae3ccc66-eed1-4750-8af1-7f99673b1323\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.470687 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.529256 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.529365 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.536796 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-metrics-certs\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.537824 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/07b078fc-4665-4e58-934d-f606471d5942-webhook-certs\") pod \"openstack-operator-controller-manager-647bb87bbd-xc9z6\" (UID: \"07b078fc-4665-4e58-934d-f606471d5942\") " pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.591017 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-p6w9k" podStartSLOduration=17.223948798 podStartE2EDuration="33.590999755s" podCreationTimestamp="2026-01-22 06:02:02 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.146299385 +0000 UTC m=+971.983424728" lastFinishedPulling="2026-01-22 06:02:20.513350332 +0000 UTC m=+988.350475685" observedRunningTime="2026-01-22 06:02:34.751443157 +0000 UTC m=+1002.588568520" watchObservedRunningTime="2026-01-22 06:02:35.590999755 +0000 UTC m=+1003.428125108" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.596296 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w"] Jan 22 06:02:35 crc kubenswrapper[4933]: W0122 06:02:35.602545 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbee42408_9ab2_4e83_a06b_8cb123a853f9.slice/crio-56de424e5f927ca1d689b54c05c801e244ec52aa6e04316df8861e37777afc14 WatchSource:0}: Error finding container 56de424e5f927ca1d689b54c05c801e244ec52aa6e04316df8861e37777afc14: Status 404 returned error can't find the container with id 56de424e5f927ca1d689b54c05c801e244ec52aa6e04316df8861e37777afc14 Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.734032 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-w25vn" event={"ID":"04683325-6972-455c-9ca5-ddf1fd4b9862","Type":"ContainerStarted","Data":"0e703935beb83e2ae07ac8d3e2a0e8a8158b4f0778b4c72c4777330287aa90b7"} Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.734887 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-w25vn" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.737277 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w" event={"ID":"bee42408-9ab2-4e83-a06b-8cb123a853f9","Type":"ContainerStarted","Data":"56de424e5f927ca1d689b54c05c801e244ec52aa6e04316df8861e37777afc14"} Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.749417 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-dwrg6" event={"ID":"34f0d75a-dc72-4dad-82a6-512c1351210b","Type":"ContainerStarted","Data":"817d8dcb2e1dcdee0f7beb517f843f9feea7f8165219b0bc8fa2a43e9fd79d53"} Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.750065 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-dwrg6" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.751406 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-pkpcl" event={"ID":"d3f5d746-ae61-4d36-bcce-4530c7f7a899","Type":"ContainerStarted","Data":"ca0c20d8c52419f97e2227c8ce7aad7f44349b6682b69afb71ba14eb08b2ba12"} Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.751765 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-pkpcl" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.752779 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-czc9v" event={"ID":"300ef4d3-6c13-4a9d-96e6-a707abccca2c","Type":"ContainerStarted","Data":"0825e709ababe601b37c319a91d573f6d56f55548f5e432280c7b28859283660"} Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.753170 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-czc9v" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.754106 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-ghk9g" event={"ID":"45469fd5-7d9d-44f4-82a1-61d82f8e2dc8","Type":"ContainerStarted","Data":"be8ce866f909d95277b79781008b49dd27518b47bd4f284970b3dd28816ace5b"} Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.754463 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-ghk9g" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.755507 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-556x5" event={"ID":"f5292b84-8cb2-4f43-96f9-6304705b15bc","Type":"ContainerStarted","Data":"cc8a5c9dcb19b509dd654b76b9cc2cd2db87421a3fa7b8d635f10443baa14491"} Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.755811 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-556x5" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.756768 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-2wjhd" event={"ID":"48e1a8f3-00fd-48a6-be02-7c61f0425809","Type":"ContainerStarted","Data":"a80c95bc8b2ecddeb27e8ae0ac9468a422087f1873625fad9315ae935f22ebdd"} Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.757175 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-2wjhd" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.758307 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-9rc7q" event={"ID":"0a5c558d-f69d-4299-97e2-00326ec7e416","Type":"ContainerStarted","Data":"7a276f63b15fc4d7a6a1e8bad7d81ec1be10c2eb2623d0d88bd54ed05fc84366"} Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.758639 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-9rc7q" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.769388 4933 generic.go:334] "Generic (PLEG): container finished" podID="fe6b55e0-eef0-41a8-8645-1f664e64dee0" containerID="7ac1736bb37c6f1ea5a37bf847843c219b1db908f29e274a853f005a937d6043" exitCode=0 Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.769935 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dnz7" event={"ID":"fe6b55e0-eef0-41a8-8645-1f664e64dee0","Type":"ContainerDied","Data":"7ac1736bb37c6f1ea5a37bf847843c219b1db908f29e274a853f005a937d6043"} Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.770669 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-w25vn" podStartSLOduration=4.208712885 podStartE2EDuration="33.770654581s" podCreationTimestamp="2026-01-22 06:02:02 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.937122433 +0000 UTC m=+972.774247776" lastFinishedPulling="2026-01-22 06:02:34.499064119 +0000 UTC m=+1002.336189472" observedRunningTime="2026-01-22 06:02:35.766472497 +0000 UTC m=+1003.603597860" watchObservedRunningTime="2026-01-22 06:02:35.770654581 +0000 UTC m=+1003.607779934" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.777208 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-nkc7j" event={"ID":"21941523-226c-4a4c-a099-51df0766a712","Type":"ContainerStarted","Data":"b1781d70e1e33ecbd78c6b9fa2d132d56371dd7fd285bc7cc18dc044b54b107e"} Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.777246 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-nkc7j" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.777333 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.786457 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-x7t54" event={"ID":"f4f610a0-0ede-4d86-b1d4-fcd4a70d9c1c","Type":"ContainerStarted","Data":"e8cb230eff9f4ba695921c657e60eea2909728034274657a83935f193566bbfe"} Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.787155 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-x7t54" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.788886 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-czc9v" podStartSLOduration=4.245582239 podStartE2EDuration="33.788870965s" podCreationTimestamp="2026-01-22 06:02:02 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.895806709 +0000 UTC m=+972.732932062" lastFinishedPulling="2026-01-22 06:02:34.439095425 +0000 UTC m=+1002.276220788" observedRunningTime="2026-01-22 06:02:35.783246545 +0000 UTC m=+1003.620371898" watchObservedRunningTime="2026-01-22 06:02:35.788870965 +0000 UTC m=+1003.625996318" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.810242 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-9rc7q" podStartSLOduration=4.063747342 podStartE2EDuration="32.810227817s" podCreationTimestamp="2026-01-22 06:02:03 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.959809851 +0000 UTC m=+972.796935204" lastFinishedPulling="2026-01-22 06:02:33.706290326 +0000 UTC m=+1001.543415679" observedRunningTime="2026-01-22 06:02:35.807381796 +0000 UTC m=+1003.644507149" watchObservedRunningTime="2026-01-22 06:02:35.810227817 +0000 UTC m=+1003.647353160" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.822314 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-6ktqg" event={"ID":"53b0d937-20e6-4a4b-b61b-f172c672c43f","Type":"ContainerStarted","Data":"04c57d9de9b4d0de7b7b2a722fb62840279f509ac2ad81164d7c07740fb4043c"} Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.823063 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-6ktqg" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.836807 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-z6g5l" event={"ID":"78b94bce-a7a5-471f-bab2-f57baeff12b6","Type":"ContainerStarted","Data":"843a5ba89db1d41168ffa36915029f2bf24c647045bd71019f32cbebbbccda49"} Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.837154 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-z6g5l" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.856848 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-dwrg6" podStartSLOduration=17.740285788 podStartE2EDuration="33.856832819s" podCreationTimestamp="2026-01-22 06:02:02 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.367765078 +0000 UTC m=+972.204890431" lastFinishedPulling="2026-01-22 06:02:20.484312109 +0000 UTC m=+988.321437462" observedRunningTime="2026-01-22 06:02:35.839002965 +0000 UTC m=+1003.676128318" watchObservedRunningTime="2026-01-22 06:02:35.856832819 +0000 UTC m=+1003.693958172" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.892233 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-2wjhd" podStartSLOduration=3.298514555 podStartE2EDuration="32.89221743s" podCreationTimestamp="2026-01-22 06:02:03 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.938686354 +0000 UTC m=+972.775811707" lastFinishedPulling="2026-01-22 06:02:34.532389229 +0000 UTC m=+1002.369514582" observedRunningTime="2026-01-22 06:02:35.88742257 +0000 UTC m=+1003.724548083" watchObservedRunningTime="2026-01-22 06:02:35.89221743 +0000 UTC m=+1003.729342783" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.896873 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns"] Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.949612 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-pkpcl" podStartSLOduration=3.128830713 podStartE2EDuration="33.94959177s" podCreationTimestamp="2026-01-22 06:02:02 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.36939101 +0000 UTC m=+972.206516363" lastFinishedPulling="2026-01-22 06:02:35.190152067 +0000 UTC m=+1003.027277420" observedRunningTime="2026-01-22 06:02:35.947059307 +0000 UTC m=+1003.784184660" watchObservedRunningTime="2026-01-22 06:02:35.94959177 +0000 UTC m=+1003.786717123" Jan 22 06:02:35 crc kubenswrapper[4933]: I0122 06:02:35.986809 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-ghk9g" podStartSLOduration=17.700598575 podStartE2EDuration="33.986791226s" podCreationTimestamp="2026-01-22 06:02:02 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.247714694 +0000 UTC m=+972.084840047" lastFinishedPulling="2026-01-22 06:02:20.533907345 +0000 UTC m=+988.371032698" observedRunningTime="2026-01-22 06:02:35.980280905 +0000 UTC m=+1003.817406258" watchObservedRunningTime="2026-01-22 06:02:35.986791226 +0000 UTC m=+1003.823916579" Jan 22 06:02:36 crc kubenswrapper[4933]: I0122 06:02:36.018799 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-556x5" podStartSLOduration=3.541887356 podStartE2EDuration="33.018781883s" podCreationTimestamp="2026-01-22 06:02:03 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.960401483 +0000 UTC m=+972.797526836" lastFinishedPulling="2026-01-22 06:02:34.43729601 +0000 UTC m=+1002.274421363" observedRunningTime="2026-01-22 06:02:36.017880422 +0000 UTC m=+1003.855005775" watchObservedRunningTime="2026-01-22 06:02:36.018781883 +0000 UTC m=+1003.855907236" Jan 22 06:02:36 crc kubenswrapper[4933]: I0122 06:02:36.112509 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-x7t54" podStartSLOduration=18.472524946 podStartE2EDuration="34.112490158s" podCreationTimestamp="2026-01-22 06:02:02 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.895679076 +0000 UTC m=+972.732804429" lastFinishedPulling="2026-01-22 06:02:20.535644288 +0000 UTC m=+988.372769641" observedRunningTime="2026-01-22 06:02:36.090337676 +0000 UTC m=+1003.927463029" watchObservedRunningTime="2026-01-22 06:02:36.112490158 +0000 UTC m=+1003.949615511" Jan 22 06:02:36 crc kubenswrapper[4933]: I0122 06:02:36.171263 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-z6g5l" podStartSLOduration=4.679177889 podStartE2EDuration="34.171237412s" podCreationTimestamp="2026-01-22 06:02:02 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.945253287 +0000 UTC m=+972.782378640" lastFinishedPulling="2026-01-22 06:02:34.43731281 +0000 UTC m=+1002.274438163" observedRunningTime="2026-01-22 06:02:36.170949705 +0000 UTC m=+1004.008075048" watchObservedRunningTime="2026-01-22 06:02:36.171237412 +0000 UTC m=+1004.008362855" Jan 22 06:02:36 crc kubenswrapper[4933]: I0122 06:02:36.240545 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-nkc7j" podStartSLOduration=18.614211298 podStartE2EDuration="34.240529009s" podCreationTimestamp="2026-01-22 06:02:02 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.9082419 +0000 UTC m=+972.745367253" lastFinishedPulling="2026-01-22 06:02:20.534559611 +0000 UTC m=+988.371684964" observedRunningTime="2026-01-22 06:02:36.23935557 +0000 UTC m=+1004.076480923" watchObservedRunningTime="2026-01-22 06:02:36.240529009 +0000 UTC m=+1004.077654362" Jan 22 06:02:36 crc kubenswrapper[4933]: I0122 06:02:36.275315 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-6ktqg" podStartSLOduration=4.689872862 podStartE2EDuration="34.275290954s" podCreationTimestamp="2026-01-22 06:02:02 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.914327713 +0000 UTC m=+972.751453056" lastFinishedPulling="2026-01-22 06:02:34.499745795 +0000 UTC m=+1002.336871148" observedRunningTime="2026-01-22 06:02:36.263469 +0000 UTC m=+1004.100594353" watchObservedRunningTime="2026-01-22 06:02:36.275290954 +0000 UTC m=+1004.112416327" Jan 22 06:02:36 crc kubenswrapper[4933]: I0122 06:02:36.567651 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6"] Jan 22 06:02:36 crc kubenswrapper[4933]: W0122 06:02:36.572302 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod07b078fc_4665_4e58_934d_f606471d5942.slice/crio-95bf8a18a02263c5cdcfac52076eb2c5a20a0ccf60c434fc48735c108512db74 WatchSource:0}: Error finding container 95bf8a18a02263c5cdcfac52076eb2c5a20a0ccf60c434fc48735c108512db74: Status 404 returned error can't find the container with id 95bf8a18a02263c5cdcfac52076eb2c5a20a0ccf60c434fc48735c108512db74 Jan 22 06:02:36 crc kubenswrapper[4933]: I0122 06:02:36.850990 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" event={"ID":"07b078fc-4665-4e58-934d-f606471d5942","Type":"ContainerStarted","Data":"7842800ac5add323c8701c6af85c06d44ce26eaf01f54ee1151dfaecc4bb5291"} Jan 22 06:02:36 crc kubenswrapper[4933]: I0122 06:02:36.851035 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" event={"ID":"07b078fc-4665-4e58-934d-f606471d5942","Type":"ContainerStarted","Data":"95bf8a18a02263c5cdcfac52076eb2c5a20a0ccf60c434fc48735c108512db74"} Jan 22 06:02:36 crc kubenswrapper[4933]: I0122 06:02:36.851128 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:36 crc kubenswrapper[4933]: I0122 06:02:36.854236 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dnz7" event={"ID":"fe6b55e0-eef0-41a8-8645-1f664e64dee0","Type":"ContainerStarted","Data":"abee0a345dd6c7a281996100969aae5c8a892db44175737a831b7bf8f0f5a0e5"} Jan 22 06:02:36 crc kubenswrapper[4933]: I0122 06:02:36.857020 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" event={"ID":"ae3ccc66-eed1-4750-8af1-7f99673b1323","Type":"ContainerStarted","Data":"fad810b0f7c1d909e1fe8990f1ed93e0d49f3f9727427ba0ffe386b78f404653"} Jan 22 06:02:36 crc kubenswrapper[4933]: I0122 06:02:36.884562 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" podStartSLOduration=33.884538625 podStartE2EDuration="33.884538625s" podCreationTimestamp="2026-01-22 06:02:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:02:36.878263238 +0000 UTC m=+1004.715388591" watchObservedRunningTime="2026-01-22 06:02:36.884538625 +0000 UTC m=+1004.721663978" Jan 22 06:02:37 crc kubenswrapper[4933]: I0122 06:02:37.871139 4933 generic.go:334] "Generic (PLEG): container finished" podID="fe6b55e0-eef0-41a8-8645-1f664e64dee0" containerID="abee0a345dd6c7a281996100969aae5c8a892db44175737a831b7bf8f0f5a0e5" exitCode=0 Jan 22 06:02:37 crc kubenswrapper[4933]: I0122 06:02:37.871750 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dnz7" event={"ID":"fe6b55e0-eef0-41a8-8645-1f664e64dee0","Type":"ContainerDied","Data":"abee0a345dd6c7a281996100969aae5c8a892db44175737a831b7bf8f0f5a0e5"} Jan 22 06:02:37 crc kubenswrapper[4933]: I0122 06:02:37.871806 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dnz7" event={"ID":"fe6b55e0-eef0-41a8-8645-1f664e64dee0","Type":"ContainerStarted","Data":"d33c38d55d5edfa39ca2c4a3467ffafe06892a0f32260e4f37ea3de2c7a844fc"} Jan 22 06:02:37 crc kubenswrapper[4933]: I0122 06:02:37.895146 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7dnz7" podStartSLOduration=22.379786858 podStartE2EDuration="23.895127954s" podCreationTimestamp="2026-01-22 06:02:14 +0000 UTC" firstStartedPulling="2026-01-22 06:02:35.771029731 +0000 UTC m=+1003.608155084" lastFinishedPulling="2026-01-22 06:02:37.286370827 +0000 UTC m=+1005.123496180" observedRunningTime="2026-01-22 06:02:37.890059369 +0000 UTC m=+1005.727184722" watchObservedRunningTime="2026-01-22 06:02:37.895127954 +0000 UTC m=+1005.732253297" Jan 22 06:02:39 crc kubenswrapper[4933]: I0122 06:02:39.892526 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" event={"ID":"ae3ccc66-eed1-4750-8af1-7f99673b1323","Type":"ContainerStarted","Data":"4ee83bda931a163cf826c9e277416e374bb8c562bef35b81098fc7cee2310ce3"} Jan 22 06:02:39 crc kubenswrapper[4933]: I0122 06:02:39.893456 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" Jan 22 06:02:39 crc kubenswrapper[4933]: I0122 06:02:39.895608 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w" event={"ID":"bee42408-9ab2-4e83-a06b-8cb123a853f9","Type":"ContainerStarted","Data":"be940204735c0c04c8a751935451af4d5f84a93ea1f2220a2e26514f5210e030"} Jan 22 06:02:39 crc kubenswrapper[4933]: I0122 06:02:39.895823 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w" Jan 22 06:02:39 crc kubenswrapper[4933]: I0122 06:02:39.944120 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" podStartSLOduration=34.767613923 podStartE2EDuration="37.944103577s" podCreationTimestamp="2026-01-22 06:02:02 +0000 UTC" firstStartedPulling="2026-01-22 06:02:35.953456187 +0000 UTC m=+1003.790581540" lastFinishedPulling="2026-01-22 06:02:39.129945841 +0000 UTC m=+1006.967071194" observedRunningTime="2026-01-22 06:02:39.942596439 +0000 UTC m=+1007.779721812" watchObservedRunningTime="2026-01-22 06:02:39.944103577 +0000 UTC m=+1007.781228950" Jan 22 06:02:39 crc kubenswrapper[4933]: I0122 06:02:39.973282 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w" podStartSLOduration=34.45407833 podStartE2EDuration="37.973263694s" podCreationTimestamp="2026-01-22 06:02:02 +0000 UTC" firstStartedPulling="2026-01-22 06:02:35.60445393 +0000 UTC m=+1003.441579283" lastFinishedPulling="2026-01-22 06:02:39.123639284 +0000 UTC m=+1006.960764647" observedRunningTime="2026-01-22 06:02:39.970279208 +0000 UTC m=+1007.807404561" watchObservedRunningTime="2026-01-22 06:02:39.973263694 +0000 UTC m=+1007.810389047" Jan 22 06:02:40 crc kubenswrapper[4933]: I0122 06:02:40.943041 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:02:40 crc kubenswrapper[4933]: I0122 06:02:40.943398 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:02:43 crc kubenswrapper[4933]: I0122 06:02:43.136244 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-p6w9k" Jan 22 06:02:43 crc kubenswrapper[4933]: I0122 06:02:43.145279 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-ghk9g" Jan 22 06:02:43 crc kubenswrapper[4933]: I0122 06:02:43.254973 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-pkpcl" Jan 22 06:02:43 crc kubenswrapper[4933]: I0122 06:02:43.267128 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-dwrg6" Jan 22 06:02:43 crc kubenswrapper[4933]: I0122 06:02:43.300576 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-nkc7j" Jan 22 06:02:43 crc kubenswrapper[4933]: I0122 06:02:43.363784 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-z6g5l" Jan 22 06:02:43 crc kubenswrapper[4933]: I0122 06:02:43.390866 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-6ktqg" Jan 22 06:02:43 crc kubenswrapper[4933]: I0122 06:02:43.401628 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-sb8mm" Jan 22 06:02:43 crc kubenswrapper[4933]: I0122 06:02:43.422414 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-x7t54" Jan 22 06:02:43 crc kubenswrapper[4933]: E0122 06:02:43.493735 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:2d6d13b3c28e45c6bec980b8808dda8da4723ae87e66d04f53d52c3b3c51612b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-gtcc5" podUID="b5af85fb-e3ef-41b7-8c6b-7afddd5200bd" Jan 22 06:02:43 crc kubenswrapper[4933]: I0122 06:02:43.582891 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-dtg9q" Jan 22 06:02:43 crc kubenswrapper[4933]: I0122 06:02:43.583132 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-czc9v" Jan 22 06:02:43 crc kubenswrapper[4933]: I0122 06:02:43.590519 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-w25vn" Jan 22 06:02:43 crc kubenswrapper[4933]: I0122 06:02:43.642367 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-7q7hb" Jan 22 06:02:43 crc kubenswrapper[4933]: I0122 06:02:43.734334 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-2wjhd" Jan 22 06:02:43 crc kubenswrapper[4933]: I0122 06:02:43.784114 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-556x5" Jan 22 06:02:43 crc kubenswrapper[4933]: I0122 06:02:43.798051 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-9rc7q" Jan 22 06:02:44 crc kubenswrapper[4933]: I0122 06:02:44.967713 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-5vl5w" Jan 22 06:02:45 crc kubenswrapper[4933]: I0122 06:02:45.131830 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7dnz7" Jan 22 06:02:45 crc kubenswrapper[4933]: I0122 06:02:45.131943 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7dnz7" Jan 22 06:02:45 crc kubenswrapper[4933]: I0122 06:02:45.190534 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7dnz7" Jan 22 06:02:45 crc kubenswrapper[4933]: I0122 06:02:45.478442 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns" Jan 22 06:02:45 crc kubenswrapper[4933]: E0122 06:02:45.491784 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:65cfe5b9d5b0571aaf8ff9840b12cc56e90ca4cef162dd260c3a9fa2b52c6dd0\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-m6g48" podUID="2892be27-6da5-4a19-a30e-36f5907f5d70" Jan 22 06:02:45 crc kubenswrapper[4933]: I0122 06:02:45.785503 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-647bb87bbd-xc9z6" Jan 22 06:02:45 crc kubenswrapper[4933]: I0122 06:02:45.987786 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7dnz7" Jan 22 06:02:46 crc kubenswrapper[4933]: I0122 06:02:46.434526 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7dnz7"] Jan 22 06:02:47 crc kubenswrapper[4933]: I0122 06:02:47.956613 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7dnz7" podUID="fe6b55e0-eef0-41a8-8645-1f664e64dee0" containerName="registry-server" containerID="cri-o://d33c38d55d5edfa39ca2c4a3467ffafe06892a0f32260e4f37ea3de2c7a844fc" gracePeriod=2 Jan 22 06:02:48 crc kubenswrapper[4933]: E0122 06:02:48.491857 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rdbn6" podUID="3028718b-d03f-414e-834d-93eb28eeb369" Jan 22 06:02:48 crc kubenswrapper[4933]: I0122 06:02:48.892163 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7dnz7" Jan 22 06:02:48 crc kubenswrapper[4933]: I0122 06:02:48.964623 4933 generic.go:334] "Generic (PLEG): container finished" podID="fe6b55e0-eef0-41a8-8645-1f664e64dee0" containerID="d33c38d55d5edfa39ca2c4a3467ffafe06892a0f32260e4f37ea3de2c7a844fc" exitCode=0 Jan 22 06:02:48 crc kubenswrapper[4933]: I0122 06:02:48.964674 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dnz7" event={"ID":"fe6b55e0-eef0-41a8-8645-1f664e64dee0","Type":"ContainerDied","Data":"d33c38d55d5edfa39ca2c4a3467ffafe06892a0f32260e4f37ea3de2c7a844fc"} Jan 22 06:02:48 crc kubenswrapper[4933]: I0122 06:02:48.964722 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7dnz7" event={"ID":"fe6b55e0-eef0-41a8-8645-1f664e64dee0","Type":"ContainerDied","Data":"64f857f4104edbe111af9a862a71d66e3e04dd1fdd639a29d25b6df7307dde5d"} Jan 22 06:02:48 crc kubenswrapper[4933]: I0122 06:02:48.964750 4933 scope.go:117] "RemoveContainer" containerID="d33c38d55d5edfa39ca2c4a3467ffafe06892a0f32260e4f37ea3de2c7a844fc" Jan 22 06:02:48 crc kubenswrapper[4933]: I0122 06:02:48.964755 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7dnz7" Jan 22 06:02:48 crc kubenswrapper[4933]: I0122 06:02:48.970273 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7hcc\" (UniqueName: \"kubernetes.io/projected/fe6b55e0-eef0-41a8-8645-1f664e64dee0-kube-api-access-w7hcc\") pod \"fe6b55e0-eef0-41a8-8645-1f664e64dee0\" (UID: \"fe6b55e0-eef0-41a8-8645-1f664e64dee0\") " Jan 22 06:02:48 crc kubenswrapper[4933]: I0122 06:02:48.970340 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe6b55e0-eef0-41a8-8645-1f664e64dee0-utilities\") pod \"fe6b55e0-eef0-41a8-8645-1f664e64dee0\" (UID: \"fe6b55e0-eef0-41a8-8645-1f664e64dee0\") " Jan 22 06:02:48 crc kubenswrapper[4933]: I0122 06:02:48.970412 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe6b55e0-eef0-41a8-8645-1f664e64dee0-catalog-content\") pod \"fe6b55e0-eef0-41a8-8645-1f664e64dee0\" (UID: \"fe6b55e0-eef0-41a8-8645-1f664e64dee0\") " Jan 22 06:02:48 crc kubenswrapper[4933]: I0122 06:02:48.971610 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe6b55e0-eef0-41a8-8645-1f664e64dee0-utilities" (OuterVolumeSpecName: "utilities") pod "fe6b55e0-eef0-41a8-8645-1f664e64dee0" (UID: "fe6b55e0-eef0-41a8-8645-1f664e64dee0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:02:48 crc kubenswrapper[4933]: I0122 06:02:48.978245 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe6b55e0-eef0-41a8-8645-1f664e64dee0-kube-api-access-w7hcc" (OuterVolumeSpecName: "kube-api-access-w7hcc") pod "fe6b55e0-eef0-41a8-8645-1f664e64dee0" (UID: "fe6b55e0-eef0-41a8-8645-1f664e64dee0"). InnerVolumeSpecName "kube-api-access-w7hcc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:02:48 crc kubenswrapper[4933]: I0122 06:02:48.990676 4933 scope.go:117] "RemoveContainer" containerID="abee0a345dd6c7a281996100969aae5c8a892db44175737a831b7bf8f0f5a0e5" Jan 22 06:02:49 crc kubenswrapper[4933]: I0122 06:02:49.000925 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe6b55e0-eef0-41a8-8645-1f664e64dee0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fe6b55e0-eef0-41a8-8645-1f664e64dee0" (UID: "fe6b55e0-eef0-41a8-8645-1f664e64dee0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:02:49 crc kubenswrapper[4933]: I0122 06:02:49.009953 4933 scope.go:117] "RemoveContainer" containerID="7ac1736bb37c6f1ea5a37bf847843c219b1db908f29e274a853f005a937d6043" Jan 22 06:02:49 crc kubenswrapper[4933]: I0122 06:02:49.032678 4933 scope.go:117] "RemoveContainer" containerID="d33c38d55d5edfa39ca2c4a3467ffafe06892a0f32260e4f37ea3de2c7a844fc" Jan 22 06:02:49 crc kubenswrapper[4933]: E0122 06:02:49.036973 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d33c38d55d5edfa39ca2c4a3467ffafe06892a0f32260e4f37ea3de2c7a844fc\": container with ID starting with d33c38d55d5edfa39ca2c4a3467ffafe06892a0f32260e4f37ea3de2c7a844fc not found: ID does not exist" containerID="d33c38d55d5edfa39ca2c4a3467ffafe06892a0f32260e4f37ea3de2c7a844fc" Jan 22 06:02:49 crc kubenswrapper[4933]: I0122 06:02:49.037039 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d33c38d55d5edfa39ca2c4a3467ffafe06892a0f32260e4f37ea3de2c7a844fc"} err="failed to get container status \"d33c38d55d5edfa39ca2c4a3467ffafe06892a0f32260e4f37ea3de2c7a844fc\": rpc error: code = NotFound desc = could not find container \"d33c38d55d5edfa39ca2c4a3467ffafe06892a0f32260e4f37ea3de2c7a844fc\": container with ID starting with d33c38d55d5edfa39ca2c4a3467ffafe06892a0f32260e4f37ea3de2c7a844fc not found: ID does not exist" Jan 22 06:02:49 crc kubenswrapper[4933]: I0122 06:02:49.037143 4933 scope.go:117] "RemoveContainer" containerID="abee0a345dd6c7a281996100969aae5c8a892db44175737a831b7bf8f0f5a0e5" Jan 22 06:02:49 crc kubenswrapper[4933]: E0122 06:02:49.038010 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"abee0a345dd6c7a281996100969aae5c8a892db44175737a831b7bf8f0f5a0e5\": container with ID starting with abee0a345dd6c7a281996100969aae5c8a892db44175737a831b7bf8f0f5a0e5 not found: ID does not exist" containerID="abee0a345dd6c7a281996100969aae5c8a892db44175737a831b7bf8f0f5a0e5" Jan 22 06:02:49 crc kubenswrapper[4933]: I0122 06:02:49.038065 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abee0a345dd6c7a281996100969aae5c8a892db44175737a831b7bf8f0f5a0e5"} err="failed to get container status \"abee0a345dd6c7a281996100969aae5c8a892db44175737a831b7bf8f0f5a0e5\": rpc error: code = NotFound desc = could not find container \"abee0a345dd6c7a281996100969aae5c8a892db44175737a831b7bf8f0f5a0e5\": container with ID starting with abee0a345dd6c7a281996100969aae5c8a892db44175737a831b7bf8f0f5a0e5 not found: ID does not exist" Jan 22 06:02:49 crc kubenswrapper[4933]: I0122 06:02:49.038114 4933 scope.go:117] "RemoveContainer" containerID="7ac1736bb37c6f1ea5a37bf847843c219b1db908f29e274a853f005a937d6043" Jan 22 06:02:49 crc kubenswrapper[4933]: E0122 06:02:49.038754 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ac1736bb37c6f1ea5a37bf847843c219b1db908f29e274a853f005a937d6043\": container with ID starting with 7ac1736bb37c6f1ea5a37bf847843c219b1db908f29e274a853f005a937d6043 not found: ID does not exist" containerID="7ac1736bb37c6f1ea5a37bf847843c219b1db908f29e274a853f005a937d6043" Jan 22 06:02:49 crc kubenswrapper[4933]: I0122 06:02:49.038801 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ac1736bb37c6f1ea5a37bf847843c219b1db908f29e274a853f005a937d6043"} err="failed to get container status \"7ac1736bb37c6f1ea5a37bf847843c219b1db908f29e274a853f005a937d6043\": rpc error: code = NotFound desc = could not find container \"7ac1736bb37c6f1ea5a37bf847843c219b1db908f29e274a853f005a937d6043\": container with ID starting with 7ac1736bb37c6f1ea5a37bf847843c219b1db908f29e274a853f005a937d6043 not found: ID does not exist" Jan 22 06:02:49 crc kubenswrapper[4933]: I0122 06:02:49.073063 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7hcc\" (UniqueName: \"kubernetes.io/projected/fe6b55e0-eef0-41a8-8645-1f664e64dee0-kube-api-access-w7hcc\") on node \"crc\" DevicePath \"\"" Jan 22 06:02:49 crc kubenswrapper[4933]: I0122 06:02:49.073139 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe6b55e0-eef0-41a8-8645-1f664e64dee0-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:02:49 crc kubenswrapper[4933]: I0122 06:02:49.073158 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe6b55e0-eef0-41a8-8645-1f664e64dee0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:02:49 crc kubenswrapper[4933]: I0122 06:02:49.299023 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7dnz7"] Jan 22 06:02:49 crc kubenswrapper[4933]: I0122 06:02:49.306348 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7dnz7"] Jan 22 06:02:50 crc kubenswrapper[4933]: I0122 06:02:50.501811 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe6b55e0-eef0-41a8-8645-1f664e64dee0" path="/var/lib/kubelet/pods/fe6b55e0-eef0-41a8-8645-1f664e64dee0/volumes" Jan 22 06:02:57 crc kubenswrapper[4933]: I0122 06:02:57.493658 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 06:02:59 crc kubenswrapper[4933]: I0122 06:02:59.037584 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-gtcc5" event={"ID":"b5af85fb-e3ef-41b7-8c6b-7afddd5200bd","Type":"ContainerStarted","Data":"5948ef81ca0a06285490a65b6f967e49166f3630eab2c43d03e404d8e0461e55"} Jan 22 06:02:59 crc kubenswrapper[4933]: I0122 06:02:59.038133 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-gtcc5" Jan 22 06:02:59 crc kubenswrapper[4933]: I0122 06:02:59.058944 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-gtcc5" podStartSLOduration=2.704948521 podStartE2EDuration="56.058925429s" podCreationTimestamp="2026-01-22 06:02:03 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.961251091 +0000 UTC m=+972.798376444" lastFinishedPulling="2026-01-22 06:02:58.315227959 +0000 UTC m=+1026.152353352" observedRunningTime="2026-01-22 06:02:59.054228542 +0000 UTC m=+1026.891353965" watchObservedRunningTime="2026-01-22 06:02:59.058925429 +0000 UTC m=+1026.896050782" Jan 22 06:03:01 crc kubenswrapper[4933]: I0122 06:03:01.055189 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-m6g48" event={"ID":"2892be27-6da5-4a19-a30e-36f5907f5d70","Type":"ContainerStarted","Data":"7f082d673c75b9007e1bff16b67a08b95347f8a12f0eafe587da5e601b731284"} Jan 22 06:03:01 crc kubenswrapper[4933]: I0122 06:03:01.055879 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-m6g48" Jan 22 06:03:01 crc kubenswrapper[4933]: I0122 06:03:01.078148 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-m6g48" podStartSLOduration=4.122922956 podStartE2EDuration="59.078128699s" podCreationTimestamp="2026-01-22 06:02:02 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.971381075 +0000 UTC m=+972.808506428" lastFinishedPulling="2026-01-22 06:02:59.926586798 +0000 UTC m=+1027.763712171" observedRunningTime="2026-01-22 06:03:01.069969396 +0000 UTC m=+1028.907094769" watchObservedRunningTime="2026-01-22 06:03:01.078128699 +0000 UTC m=+1028.915254062" Jan 22 06:03:03 crc kubenswrapper[4933]: I0122 06:03:03.957970 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-gtcc5" Jan 22 06:03:04 crc kubenswrapper[4933]: I0122 06:03:04.076258 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rdbn6" event={"ID":"3028718b-d03f-414e-834d-93eb28eeb369","Type":"ContainerStarted","Data":"b9a6363474772a8206201c62795f5636924b12d9745dd036b963fa2ea3bc40f7"} Jan 22 06:03:04 crc kubenswrapper[4933]: I0122 06:03:04.096659 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-rdbn6" podStartSLOduration=3.045615143 podStartE2EDuration="1m1.096641539s" podCreationTimestamp="2026-01-22 06:02:03 +0000 UTC" firstStartedPulling="2026-01-22 06:02:04.960201159 +0000 UTC m=+972.797326512" lastFinishedPulling="2026-01-22 06:03:03.011227555 +0000 UTC m=+1030.848352908" observedRunningTime="2026-01-22 06:03:04.092032264 +0000 UTC m=+1031.929157637" watchObservedRunningTime="2026-01-22 06:03:04.096641539 +0000 UTC m=+1031.933766892" Jan 22 06:03:10 crc kubenswrapper[4933]: I0122 06:03:10.942940 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:03:10 crc kubenswrapper[4933]: I0122 06:03:10.943562 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:03:13 crc kubenswrapper[4933]: I0122 06:03:13.701769 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-m6g48" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.218939 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-nwslr"] Jan 22 06:03:30 crc kubenswrapper[4933]: E0122 06:03:30.219841 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe6b55e0-eef0-41a8-8645-1f664e64dee0" containerName="registry-server" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.219860 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe6b55e0-eef0-41a8-8645-1f664e64dee0" containerName="registry-server" Jan 22 06:03:30 crc kubenswrapper[4933]: E0122 06:03:30.219899 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe6b55e0-eef0-41a8-8645-1f664e64dee0" containerName="extract-utilities" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.219907 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe6b55e0-eef0-41a8-8645-1f664e64dee0" containerName="extract-utilities" Jan 22 06:03:30 crc kubenswrapper[4933]: E0122 06:03:30.219915 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe6b55e0-eef0-41a8-8645-1f664e64dee0" containerName="extract-content" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.219922 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe6b55e0-eef0-41a8-8645-1f664e64dee0" containerName="extract-content" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.220071 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe6b55e0-eef0-41a8-8645-1f664e64dee0" containerName="registry-server" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.221014 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-nwslr" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.228435 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.231631 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-gq6q5" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.231949 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.232034 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.241171 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-nwslr"] Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.274487 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-c844b"] Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.276043 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-c844b" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.279242 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.284660 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-c844b"] Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.293263 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/666ddd33-aab9-4348-b9b4-b81b48105cff-config\") pod \"dnsmasq-dns-5f854695bc-c844b\" (UID: \"666ddd33-aab9-4348-b9b4-b81b48105cff\") " pod="openstack/dnsmasq-dns-5f854695bc-c844b" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.293316 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/666ddd33-aab9-4348-b9b4-b81b48105cff-dns-svc\") pod \"dnsmasq-dns-5f854695bc-c844b\" (UID: \"666ddd33-aab9-4348-b9b4-b81b48105cff\") " pod="openstack/dnsmasq-dns-5f854695bc-c844b" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.293350 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9jbp\" (UniqueName: \"kubernetes.io/projected/666ddd33-aab9-4348-b9b4-b81b48105cff-kube-api-access-z9jbp\") pod \"dnsmasq-dns-5f854695bc-c844b\" (UID: \"666ddd33-aab9-4348-b9b4-b81b48105cff\") " pod="openstack/dnsmasq-dns-5f854695bc-c844b" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.293373 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1-config\") pod \"dnsmasq-dns-84bb9d8bd9-nwslr\" (UID: \"5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-nwslr" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.293420 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgn7l\" (UniqueName: \"kubernetes.io/projected/5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1-kube-api-access-mgn7l\") pod \"dnsmasq-dns-84bb9d8bd9-nwslr\" (UID: \"5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-nwslr" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.394037 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/666ddd33-aab9-4348-b9b4-b81b48105cff-config\") pod \"dnsmasq-dns-5f854695bc-c844b\" (UID: \"666ddd33-aab9-4348-b9b4-b81b48105cff\") " pod="openstack/dnsmasq-dns-5f854695bc-c844b" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.394094 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/666ddd33-aab9-4348-b9b4-b81b48105cff-dns-svc\") pod \"dnsmasq-dns-5f854695bc-c844b\" (UID: \"666ddd33-aab9-4348-b9b4-b81b48105cff\") " pod="openstack/dnsmasq-dns-5f854695bc-c844b" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.394119 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9jbp\" (UniqueName: \"kubernetes.io/projected/666ddd33-aab9-4348-b9b4-b81b48105cff-kube-api-access-z9jbp\") pod \"dnsmasq-dns-5f854695bc-c844b\" (UID: \"666ddd33-aab9-4348-b9b4-b81b48105cff\") " pod="openstack/dnsmasq-dns-5f854695bc-c844b" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.394136 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1-config\") pod \"dnsmasq-dns-84bb9d8bd9-nwslr\" (UID: \"5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-nwslr" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.394173 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgn7l\" (UniqueName: \"kubernetes.io/projected/5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1-kube-api-access-mgn7l\") pod \"dnsmasq-dns-84bb9d8bd9-nwslr\" (UID: \"5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-nwslr" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.395293 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/666ddd33-aab9-4348-b9b4-b81b48105cff-config\") pod \"dnsmasq-dns-5f854695bc-c844b\" (UID: \"666ddd33-aab9-4348-b9b4-b81b48105cff\") " pod="openstack/dnsmasq-dns-5f854695bc-c844b" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.395303 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1-config\") pod \"dnsmasq-dns-84bb9d8bd9-nwslr\" (UID: \"5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-nwslr" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.395544 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/666ddd33-aab9-4348-b9b4-b81b48105cff-dns-svc\") pod \"dnsmasq-dns-5f854695bc-c844b\" (UID: \"666ddd33-aab9-4348-b9b4-b81b48105cff\") " pod="openstack/dnsmasq-dns-5f854695bc-c844b" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.418261 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9jbp\" (UniqueName: \"kubernetes.io/projected/666ddd33-aab9-4348-b9b4-b81b48105cff-kube-api-access-z9jbp\") pod \"dnsmasq-dns-5f854695bc-c844b\" (UID: \"666ddd33-aab9-4348-b9b4-b81b48105cff\") " pod="openstack/dnsmasq-dns-5f854695bc-c844b" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.419597 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgn7l\" (UniqueName: \"kubernetes.io/projected/5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1-kube-api-access-mgn7l\") pod \"dnsmasq-dns-84bb9d8bd9-nwslr\" (UID: \"5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-nwslr" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.549878 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-nwslr" Jan 22 06:03:30 crc kubenswrapper[4933]: I0122 06:03:30.593630 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-c844b" Jan 22 06:03:31 crc kubenswrapper[4933]: I0122 06:03:31.028111 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-nwslr"] Jan 22 06:03:31 crc kubenswrapper[4933]: I0122 06:03:31.101443 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-c844b"] Jan 22 06:03:31 crc kubenswrapper[4933]: W0122 06:03:31.104197 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod666ddd33_aab9_4348_b9b4_b81b48105cff.slice/crio-db9d6c8615ea64fe1521eab72d2bf6d7f892efb8d9f5073286db8b6c38a566a7 WatchSource:0}: Error finding container db9d6c8615ea64fe1521eab72d2bf6d7f892efb8d9f5073286db8b6c38a566a7: Status 404 returned error can't find the container with id db9d6c8615ea64fe1521eab72d2bf6d7f892efb8d9f5073286db8b6c38a566a7 Jan 22 06:03:31 crc kubenswrapper[4933]: I0122 06:03:31.285733 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f854695bc-c844b" event={"ID":"666ddd33-aab9-4348-b9b4-b81b48105cff","Type":"ContainerStarted","Data":"db9d6c8615ea64fe1521eab72d2bf6d7f892efb8d9f5073286db8b6c38a566a7"} Jan 22 06:03:31 crc kubenswrapper[4933]: I0122 06:03:31.287286 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bb9d8bd9-nwslr" event={"ID":"5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1","Type":"ContainerStarted","Data":"89c337f050fa9139bba99f8173fbcaffcde985abfa809666de0419ed708cd346"} Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.247105 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-c844b"] Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.275270 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-ps5xd"] Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.276660 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.292343 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-ps5xd"] Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.356062 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a6531f0-8f5f-4e71-bb4c-af976bd38c68-config\") pod \"dnsmasq-dns-744ffd65bc-ps5xd\" (UID: \"7a6531f0-8f5f-4e71-bb4c-af976bd38c68\") " pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.356123 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvlhh\" (UniqueName: \"kubernetes.io/projected/7a6531f0-8f5f-4e71-bb4c-af976bd38c68-kube-api-access-jvlhh\") pod \"dnsmasq-dns-744ffd65bc-ps5xd\" (UID: \"7a6531f0-8f5f-4e71-bb4c-af976bd38c68\") " pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.356172 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7a6531f0-8f5f-4e71-bb4c-af976bd38c68-dns-svc\") pod \"dnsmasq-dns-744ffd65bc-ps5xd\" (UID: \"7a6531f0-8f5f-4e71-bb4c-af976bd38c68\") " pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.457776 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a6531f0-8f5f-4e71-bb4c-af976bd38c68-config\") pod \"dnsmasq-dns-744ffd65bc-ps5xd\" (UID: \"7a6531f0-8f5f-4e71-bb4c-af976bd38c68\") " pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.457836 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvlhh\" (UniqueName: \"kubernetes.io/projected/7a6531f0-8f5f-4e71-bb4c-af976bd38c68-kube-api-access-jvlhh\") pod \"dnsmasq-dns-744ffd65bc-ps5xd\" (UID: \"7a6531f0-8f5f-4e71-bb4c-af976bd38c68\") " pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.457888 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7a6531f0-8f5f-4e71-bb4c-af976bd38c68-dns-svc\") pod \"dnsmasq-dns-744ffd65bc-ps5xd\" (UID: \"7a6531f0-8f5f-4e71-bb4c-af976bd38c68\") " pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.458971 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7a6531f0-8f5f-4e71-bb4c-af976bd38c68-dns-svc\") pod \"dnsmasq-dns-744ffd65bc-ps5xd\" (UID: \"7a6531f0-8f5f-4e71-bb4c-af976bd38c68\") " pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.460227 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a6531f0-8f5f-4e71-bb4c-af976bd38c68-config\") pod \"dnsmasq-dns-744ffd65bc-ps5xd\" (UID: \"7a6531f0-8f5f-4e71-bb4c-af976bd38c68\") " pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.486650 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvlhh\" (UniqueName: \"kubernetes.io/projected/7a6531f0-8f5f-4e71-bb4c-af976bd38c68-kube-api-access-jvlhh\") pod \"dnsmasq-dns-744ffd65bc-ps5xd\" (UID: \"7a6531f0-8f5f-4e71-bb4c-af976bd38c68\") " pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.543425 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-nwslr"] Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.569781 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-rhpgg"] Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.570899 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-rhpgg" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.597600 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-rhpgg"] Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.612033 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.761625 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad99ca8b-cdda-4f3d-b0d0-9b4444e88225-config\") pod \"dnsmasq-dns-95f5f6995-rhpgg\" (UID: \"ad99ca8b-cdda-4f3d-b0d0-9b4444e88225\") " pod="openstack/dnsmasq-dns-95f5f6995-rhpgg" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.762313 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phs6v\" (UniqueName: \"kubernetes.io/projected/ad99ca8b-cdda-4f3d-b0d0-9b4444e88225-kube-api-access-phs6v\") pod \"dnsmasq-dns-95f5f6995-rhpgg\" (UID: \"ad99ca8b-cdda-4f3d-b0d0-9b4444e88225\") " pod="openstack/dnsmasq-dns-95f5f6995-rhpgg" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.762534 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad99ca8b-cdda-4f3d-b0d0-9b4444e88225-dns-svc\") pod \"dnsmasq-dns-95f5f6995-rhpgg\" (UID: \"ad99ca8b-cdda-4f3d-b0d0-9b4444e88225\") " pod="openstack/dnsmasq-dns-95f5f6995-rhpgg" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.864190 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad99ca8b-cdda-4f3d-b0d0-9b4444e88225-config\") pod \"dnsmasq-dns-95f5f6995-rhpgg\" (UID: \"ad99ca8b-cdda-4f3d-b0d0-9b4444e88225\") " pod="openstack/dnsmasq-dns-95f5f6995-rhpgg" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.864304 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phs6v\" (UniqueName: \"kubernetes.io/projected/ad99ca8b-cdda-4f3d-b0d0-9b4444e88225-kube-api-access-phs6v\") pod \"dnsmasq-dns-95f5f6995-rhpgg\" (UID: \"ad99ca8b-cdda-4f3d-b0d0-9b4444e88225\") " pod="openstack/dnsmasq-dns-95f5f6995-rhpgg" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.864386 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad99ca8b-cdda-4f3d-b0d0-9b4444e88225-dns-svc\") pod \"dnsmasq-dns-95f5f6995-rhpgg\" (UID: \"ad99ca8b-cdda-4f3d-b0d0-9b4444e88225\") " pod="openstack/dnsmasq-dns-95f5f6995-rhpgg" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.865293 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad99ca8b-cdda-4f3d-b0d0-9b4444e88225-config\") pod \"dnsmasq-dns-95f5f6995-rhpgg\" (UID: \"ad99ca8b-cdda-4f3d-b0d0-9b4444e88225\") " pod="openstack/dnsmasq-dns-95f5f6995-rhpgg" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.865788 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad99ca8b-cdda-4f3d-b0d0-9b4444e88225-dns-svc\") pod \"dnsmasq-dns-95f5f6995-rhpgg\" (UID: \"ad99ca8b-cdda-4f3d-b0d0-9b4444e88225\") " pod="openstack/dnsmasq-dns-95f5f6995-rhpgg" Jan 22 06:03:33 crc kubenswrapper[4933]: I0122 06:03:33.894314 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phs6v\" (UniqueName: \"kubernetes.io/projected/ad99ca8b-cdda-4f3d-b0d0-9b4444e88225-kube-api-access-phs6v\") pod \"dnsmasq-dns-95f5f6995-rhpgg\" (UID: \"ad99ca8b-cdda-4f3d-b0d0-9b4444e88225\") " pod="openstack/dnsmasq-dns-95f5f6995-rhpgg" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.190821 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-rhpgg" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.436479 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.437689 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.442834 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.442974 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.443127 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.444052 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.444132 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-8kl57" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.444427 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.444455 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.450736 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.573478 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-config-data\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.573525 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-server-conf\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.573603 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.573699 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.573815 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/47299478-bcfd-4f21-a56c-efcf7b167999-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.573855 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.573875 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.573902 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blrb2\" (UniqueName: \"kubernetes.io/projected/47299478-bcfd-4f21-a56c-efcf7b167999-kube-api-access-blrb2\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.573931 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/47299478-bcfd-4f21-a56c-efcf7b167999-pod-info\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.573947 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.573965 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.675602 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/47299478-bcfd-4f21-a56c-efcf7b167999-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.675711 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.675732 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.675772 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blrb2\" (UniqueName: \"kubernetes.io/projected/47299478-bcfd-4f21-a56c-efcf7b167999-kube-api-access-blrb2\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.675810 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.675830 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.675849 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/47299478-bcfd-4f21-a56c-efcf7b167999-pod-info\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.676160 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.676413 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.676498 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-config-data\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.676530 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-server-conf\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.676577 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.676600 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.676858 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.677431 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-config-data\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.678037 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-server-conf\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.681261 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.692651 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/47299478-bcfd-4f21-a56c-efcf7b167999-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.719700 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/47299478-bcfd-4f21-a56c-efcf7b167999-pod-info\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.720275 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.720738 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.720785 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blrb2\" (UniqueName: \"kubernetes.io/projected/47299478-bcfd-4f21-a56c-efcf7b167999-kube-api-access-blrb2\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.742426 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.756931 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.757049 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.765922 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.766091 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " pod="openstack/rabbitmq-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.766275 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.774914 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.775042 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.775258 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-n6k24" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.775351 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.775507 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.881471 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.881546 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4d712958-1ece-47de-9798-6e852b03c565-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.881600 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.881643 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4d712958-1ece-47de-9798-6e852b03c565-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.881667 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6ctq\" (UniqueName: \"kubernetes.io/projected/4d712958-1ece-47de-9798-6e852b03c565-kube-api-access-c6ctq\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.881693 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.881743 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.881771 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.881793 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.881817 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.881861 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.987966 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.988017 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.988036 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.988056 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.988087 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.988105 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.988130 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4d712958-1ece-47de-9798-6e852b03c565-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.988422 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.988937 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.989464 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.989469 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.989541 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4d712958-1ece-47de-9798-6e852b03c565-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.989569 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6ctq\" (UniqueName: \"kubernetes.io/projected/4d712958-1ece-47de-9798-6e852b03c565-kube-api-access-c6ctq\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.989587 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.989597 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.989878 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:34 crc kubenswrapper[4933]: I0122 06:03:34.990647 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:35 crc kubenswrapper[4933]: I0122 06:03:35.001373 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4d712958-1ece-47de-9798-6e852b03c565-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:35 crc kubenswrapper[4933]: I0122 06:03:35.001888 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:35 crc kubenswrapper[4933]: I0122 06:03:35.003322 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4d712958-1ece-47de-9798-6e852b03c565-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:35 crc kubenswrapper[4933]: I0122 06:03:35.004272 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:35 crc kubenswrapper[4933]: I0122 06:03:35.010035 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6ctq\" (UniqueName: \"kubernetes.io/projected/4d712958-1ece-47de-9798-6e852b03c565-kube-api-access-c6ctq\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:35 crc kubenswrapper[4933]: I0122 06:03:35.015733 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:35 crc kubenswrapper[4933]: I0122 06:03:35.062401 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 06:03:35 crc kubenswrapper[4933]: I0122 06:03:35.101596 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:03:35 crc kubenswrapper[4933]: I0122 06:03:35.861755 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 22 06:03:35 crc kubenswrapper[4933]: I0122 06:03:35.862965 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 22 06:03:35 crc kubenswrapper[4933]: I0122 06:03:35.865549 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 22 06:03:35 crc kubenswrapper[4933]: I0122 06:03:35.868018 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 22 06:03:35 crc kubenswrapper[4933]: I0122 06:03:35.868098 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-q9vmz" Jan 22 06:03:35 crc kubenswrapper[4933]: I0122 06:03:35.869494 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 22 06:03:35 crc kubenswrapper[4933]: I0122 06:03:35.869667 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 22 06:03:35 crc kubenswrapper[4933]: I0122 06:03:35.873032 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.004457 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-kolla-config\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.004527 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.004691 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.004807 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.004846 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.004949 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ttcs\" (UniqueName: \"kubernetes.io/projected/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-kube-api-access-9ttcs\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.005020 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.005058 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-config-data-default\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.106357 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-kolla-config\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.106427 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.107354 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-kolla-config\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.107762 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.107921 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.108154 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.108227 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.108264 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.108298 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.108689 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ttcs\" (UniqueName: \"kubernetes.io/projected/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-kube-api-access-9ttcs\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.108723 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.108742 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-config-data-default\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.109729 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-config-data-default\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.113946 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.131560 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ttcs\" (UniqueName: \"kubernetes.io/projected/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-kube-api-access-9ttcs\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.132230 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.138219 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " pod="openstack/openstack-galera-0" Jan 22 06:03:36 crc kubenswrapper[4933]: I0122 06:03:36.184428 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.207156 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.208575 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.210566 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.210736 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.210982 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.217006 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-7z6wt" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.223062 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.329835 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/943da5ba-d325-4686-871d-802b7730d02a-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.329897 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/943da5ba-d325-4686-871d-802b7730d02a-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.329950 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/943da5ba-d325-4686-871d-802b7730d02a-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.329969 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/943da5ba-d325-4686-871d-802b7730d02a-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.329991 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wprwh\" (UniqueName: \"kubernetes.io/projected/943da5ba-d325-4686-871d-802b7730d02a-kube-api-access-wprwh\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.330017 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.330309 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/943da5ba-d325-4686-871d-802b7730d02a-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.330353 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/943da5ba-d325-4686-871d-802b7730d02a-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.421627 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.422474 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.423993 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-j44gj" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.424576 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.425100 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.435580 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/943da5ba-d325-4686-871d-802b7730d02a-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.435861 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/943da5ba-d325-4686-871d-802b7730d02a-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.435922 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/943da5ba-d325-4686-871d-802b7730d02a-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.436175 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/943da5ba-d325-4686-871d-802b7730d02a-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.436223 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wprwh\" (UniqueName: \"kubernetes.io/projected/943da5ba-d325-4686-871d-802b7730d02a-kube-api-access-wprwh\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.436272 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.438024 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/943da5ba-d325-4686-871d-802b7730d02a-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.438054 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/943da5ba-d325-4686-871d-802b7730d02a-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.439104 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/943da5ba-d325-4686-871d-802b7730d02a-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.443472 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/943da5ba-d325-4686-871d-802b7730d02a-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.444514 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.445659 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/943da5ba-d325-4686-871d-802b7730d02a-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.446410 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/943da5ba-d325-4686-871d-802b7730d02a-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.459182 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.487834 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/943da5ba-d325-4686-871d-802b7730d02a-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.488031 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/943da5ba-d325-4686-871d-802b7730d02a-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.500566 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.501184 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wprwh\" (UniqueName: \"kubernetes.io/projected/943da5ba-d325-4686-871d-802b7730d02a-kube-api-access-wprwh\") pod \"openstack-cell1-galera-0\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.528995 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.542015 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-kolla-config\") pod \"memcached-0\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " pod="openstack/memcached-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.542116 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-memcached-tls-certs\") pod \"memcached-0\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " pod="openstack/memcached-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.542149 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-config-data\") pod \"memcached-0\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " pod="openstack/memcached-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.542196 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-combined-ca-bundle\") pod \"memcached-0\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " pod="openstack/memcached-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.542229 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2h8w\" (UniqueName: \"kubernetes.io/projected/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-kube-api-access-l2h8w\") pod \"memcached-0\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " pod="openstack/memcached-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.644165 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-combined-ca-bundle\") pod \"memcached-0\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " pod="openstack/memcached-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.644257 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2h8w\" (UniqueName: \"kubernetes.io/projected/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-kube-api-access-l2h8w\") pod \"memcached-0\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " pod="openstack/memcached-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.644316 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-kolla-config\") pod \"memcached-0\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " pod="openstack/memcached-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.644354 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-memcached-tls-certs\") pod \"memcached-0\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " pod="openstack/memcached-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.644378 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-config-data\") pod \"memcached-0\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " pod="openstack/memcached-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.645234 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-config-data\") pod \"memcached-0\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " pod="openstack/memcached-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.645386 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-kolla-config\") pod \"memcached-0\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " pod="openstack/memcached-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.648834 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-memcached-tls-certs\") pod \"memcached-0\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " pod="openstack/memcached-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.649119 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-combined-ca-bundle\") pod \"memcached-0\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " pod="openstack/memcached-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.664281 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2h8w\" (UniqueName: \"kubernetes.io/projected/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-kube-api-access-l2h8w\") pod \"memcached-0\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " pod="openstack/memcached-0" Jan 22 06:03:37 crc kubenswrapper[4933]: I0122 06:03:37.748012 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 22 06:03:39 crc kubenswrapper[4933]: I0122 06:03:39.432953 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 06:03:39 crc kubenswrapper[4933]: I0122 06:03:39.434493 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 06:03:39 crc kubenswrapper[4933]: I0122 06:03:39.436512 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-qtmxh" Jan 22 06:03:39 crc kubenswrapper[4933]: I0122 06:03:39.446717 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 06:03:39 crc kubenswrapper[4933]: I0122 06:03:39.577884 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8wcz\" (UniqueName: \"kubernetes.io/projected/3070f00c-a8be-4606-bf64-53d3e321b329-kube-api-access-h8wcz\") pod \"kube-state-metrics-0\" (UID: \"3070f00c-a8be-4606-bf64-53d3e321b329\") " pod="openstack/kube-state-metrics-0" Jan 22 06:03:39 crc kubenswrapper[4933]: I0122 06:03:39.679902 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8wcz\" (UniqueName: \"kubernetes.io/projected/3070f00c-a8be-4606-bf64-53d3e321b329-kube-api-access-h8wcz\") pod \"kube-state-metrics-0\" (UID: \"3070f00c-a8be-4606-bf64-53d3e321b329\") " pod="openstack/kube-state-metrics-0" Jan 22 06:03:39 crc kubenswrapper[4933]: I0122 06:03:39.699567 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8wcz\" (UniqueName: \"kubernetes.io/projected/3070f00c-a8be-4606-bf64-53d3e321b329-kube-api-access-h8wcz\") pod \"kube-state-metrics-0\" (UID: \"3070f00c-a8be-4606-bf64-53d3e321b329\") " pod="openstack/kube-state-metrics-0" Jan 22 06:03:39 crc kubenswrapper[4933]: I0122 06:03:39.788395 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 06:03:39 crc kubenswrapper[4933]: I0122 06:03:39.836107 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-ps5xd"] Jan 22 06:03:40 crc kubenswrapper[4933]: I0122 06:03:40.942776 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:03:40 crc kubenswrapper[4933]: I0122 06:03:40.943175 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:03:40 crc kubenswrapper[4933]: I0122 06:03:40.943227 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 06:03:40 crc kubenswrapper[4933]: I0122 06:03:40.943807 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8400a254f9521bb3bb5af6c86bda35345893f7a13920ab409abe36fdefec266d"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:03:40 crc kubenswrapper[4933]: I0122 06:03:40.943851 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://8400a254f9521bb3bb5af6c86bda35345893f7a13920ab409abe36fdefec266d" gracePeriod=600 Jan 22 06:03:41 crc kubenswrapper[4933]: I0122 06:03:41.402738 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="8400a254f9521bb3bb5af6c86bda35345893f7a13920ab409abe36fdefec266d" exitCode=0 Jan 22 06:03:41 crc kubenswrapper[4933]: I0122 06:03:41.402782 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"8400a254f9521bb3bb5af6c86bda35345893f7a13920ab409abe36fdefec266d"} Jan 22 06:03:41 crc kubenswrapper[4933]: I0122 06:03:41.402836 4933 scope.go:117] "RemoveContainer" containerID="828e74f34c881560f38ae267428103e3c4e3e91319d786584ec0777e00c67304" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.686448 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-phtjz"] Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.688285 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.690849 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.690962 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.690893 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-lx96x" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.694585 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-phtjz"] Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.714880 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-rwb6s"] Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.719009 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.736045 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-rwb6s"] Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.837912 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-etc-ovs\") pod \"ovn-controller-ovs-rwb6s\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.838031 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2160e11a-468c-4bf7-9fdc-e579f3ecf896-scripts\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.838097 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2160e11a-468c-4bf7-9fdc-e579f3ecf896-var-run\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.838207 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5g5d\" (UniqueName: \"kubernetes.io/projected/2160e11a-468c-4bf7-9fdc-e579f3ecf896-kube-api-access-b5g5d\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.838244 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-var-run\") pod \"ovn-controller-ovs-rwb6s\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.838295 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2160e11a-468c-4bf7-9fdc-e579f3ecf896-var-log-ovn\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.838334 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/2160e11a-468c-4bf7-9fdc-e579f3ecf896-ovn-controller-tls-certs\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.838361 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-var-lib\") pod \"ovn-controller-ovs-rwb6s\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.838425 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2160e11a-468c-4bf7-9fdc-e579f3ecf896-combined-ca-bundle\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.838477 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2160e11a-468c-4bf7-9fdc-e579f3ecf896-var-run-ovn\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.838511 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-var-log\") pod \"ovn-controller-ovs-rwb6s\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.838528 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hf85\" (UniqueName: \"kubernetes.io/projected/12629e2f-7d6e-417c-a8df-c15b7a3e794e-kube-api-access-9hf85\") pod \"ovn-controller-ovs-rwb6s\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.838553 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/12629e2f-7d6e-417c-a8df-c15b7a3e794e-scripts\") pod \"ovn-controller-ovs-rwb6s\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.939615 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-var-log\") pod \"ovn-controller-ovs-rwb6s\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.939678 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hf85\" (UniqueName: \"kubernetes.io/projected/12629e2f-7d6e-417c-a8df-c15b7a3e794e-kube-api-access-9hf85\") pod \"ovn-controller-ovs-rwb6s\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.939732 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/12629e2f-7d6e-417c-a8df-c15b7a3e794e-scripts\") pod \"ovn-controller-ovs-rwb6s\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.939768 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-etc-ovs\") pod \"ovn-controller-ovs-rwb6s\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.939825 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2160e11a-468c-4bf7-9fdc-e579f3ecf896-scripts\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.939845 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2160e11a-468c-4bf7-9fdc-e579f3ecf896-var-run\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.939910 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5g5d\" (UniqueName: \"kubernetes.io/projected/2160e11a-468c-4bf7-9fdc-e579f3ecf896-kube-api-access-b5g5d\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.939936 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-var-run\") pod \"ovn-controller-ovs-rwb6s\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.940000 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2160e11a-468c-4bf7-9fdc-e579f3ecf896-var-log-ovn\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.940041 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/2160e11a-468c-4bf7-9fdc-e579f3ecf896-ovn-controller-tls-certs\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.940066 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-var-lib\") pod \"ovn-controller-ovs-rwb6s\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.940127 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2160e11a-468c-4bf7-9fdc-e579f3ecf896-combined-ca-bundle\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.940184 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2160e11a-468c-4bf7-9fdc-e579f3ecf896-var-run-ovn\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.940205 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2160e11a-468c-4bf7-9fdc-e579f3ecf896-var-run\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.940272 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-var-log\") pod \"ovn-controller-ovs-rwb6s\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.940472 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-etc-ovs\") pod \"ovn-controller-ovs-rwb6s\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.940672 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-var-lib\") pod \"ovn-controller-ovs-rwb6s\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.941355 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-var-run\") pod \"ovn-controller-ovs-rwb6s\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.941539 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2160e11a-468c-4bf7-9fdc-e579f3ecf896-var-run-ovn\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.941563 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2160e11a-468c-4bf7-9fdc-e579f3ecf896-var-log-ovn\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.942257 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2160e11a-468c-4bf7-9fdc-e579f3ecf896-scripts\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.942617 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/12629e2f-7d6e-417c-a8df-c15b7a3e794e-scripts\") pod \"ovn-controller-ovs-rwb6s\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.950157 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/2160e11a-468c-4bf7-9fdc-e579f3ecf896-ovn-controller-tls-certs\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.951277 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2160e11a-468c-4bf7-9fdc-e579f3ecf896-combined-ca-bundle\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.956435 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hf85\" (UniqueName: \"kubernetes.io/projected/12629e2f-7d6e-417c-a8df-c15b7a3e794e-kube-api-access-9hf85\") pod \"ovn-controller-ovs-rwb6s\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:42 crc kubenswrapper[4933]: I0122 06:03:42.959188 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5g5d\" (UniqueName: \"kubernetes.io/projected/2160e11a-468c-4bf7-9fdc-e579f3ecf896-kube-api-access-b5g5d\") pod \"ovn-controller-phtjz\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " pod="openstack/ovn-controller-phtjz" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.005704 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-phtjz" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.041828 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.581193 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.582505 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.586283 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-wnnbb" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.586561 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.586851 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.587273 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.594714 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.595282 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.651034 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a4b88c60-2edd-436c-996f-b8f07311f5ef-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.651094 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqhks\" (UniqueName: \"kubernetes.io/projected/a4b88c60-2edd-436c-996f-b8f07311f5ef-kube-api-access-hqhks\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.651136 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4b88c60-2edd-436c-996f-b8f07311f5ef-config\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.651220 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.652521 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4b88c60-2edd-436c-996f-b8f07311f5ef-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.652667 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4b88c60-2edd-436c-996f-b8f07311f5ef-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.652729 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4b88c60-2edd-436c-996f-b8f07311f5ef-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.652786 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a4b88c60-2edd-436c-996f-b8f07311f5ef-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.753971 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4b88c60-2edd-436c-996f-b8f07311f5ef-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.754040 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4b88c60-2edd-436c-996f-b8f07311f5ef-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.754067 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a4b88c60-2edd-436c-996f-b8f07311f5ef-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.754120 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a4b88c60-2edd-436c-996f-b8f07311f5ef-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.754144 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqhks\" (UniqueName: \"kubernetes.io/projected/a4b88c60-2edd-436c-996f-b8f07311f5ef-kube-api-access-hqhks\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.754181 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4b88c60-2edd-436c-996f-b8f07311f5ef-config\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.754238 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.754279 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4b88c60-2edd-436c-996f-b8f07311f5ef-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.754761 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a4b88c60-2edd-436c-996f-b8f07311f5ef-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.754858 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.755234 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4b88c60-2edd-436c-996f-b8f07311f5ef-config\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.755379 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a4b88c60-2edd-436c-996f-b8f07311f5ef-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.760812 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4b88c60-2edd-436c-996f-b8f07311f5ef-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.761441 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4b88c60-2edd-436c-996f-b8f07311f5ef-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.761744 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4b88c60-2edd-436c-996f-b8f07311f5ef-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.774821 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.777687 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqhks\" (UniqueName: \"kubernetes.io/projected/a4b88c60-2edd-436c-996f-b8f07311f5ef-kube-api-access-hqhks\") pod \"ovsdbserver-nb-0\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:43 crc kubenswrapper[4933]: I0122 06:03:43.923502 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 22 06:03:44 crc kubenswrapper[4933]: W0122 06:03:44.999489 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7a6531f0_8f5f_4e71_bb4c_af976bd38c68.slice/crio-2981d094a68da0f86ea2c54892bdc1f2a51b999da2fd4d20c17633ec35348e6f WatchSource:0}: Error finding container 2981d094a68da0f86ea2c54892bdc1f2a51b999da2fd4d20c17633ec35348e6f: Status 404 returned error can't find the container with id 2981d094a68da0f86ea2c54892bdc1f2a51b999da2fd4d20c17633ec35348e6f Jan 22 06:03:45 crc kubenswrapper[4933]: I0122 06:03:45.443912 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" event={"ID":"7a6531f0-8f5f-4e71-bb4c-af976bd38c68","Type":"ContainerStarted","Data":"2981d094a68da0f86ea2c54892bdc1f2a51b999da2fd4d20c17633ec35348e6f"} Jan 22 06:03:45 crc kubenswrapper[4933]: E0122 06:03:45.857212 4933 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33" Jan 22 06:03:45 crc kubenswrapper[4933]: E0122 06:03:45.857359 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mgn7l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-84bb9d8bd9-nwslr_openstack(5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 06:03:45 crc kubenswrapper[4933]: E0122 06:03:45.860352 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-84bb9d8bd9-nwslr" podUID="5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1" Jan 22 06:03:45 crc kubenswrapper[4933]: E0122 06:03:45.893489 4933 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33" Jan 22 06:03:45 crc kubenswrapper[4933]: E0122 06:03:45.895886 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-z9jbp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5f854695bc-c844b_openstack(666ddd33-aab9-4348-b9b4-b81b48105cff): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 06:03:45 crc kubenswrapper[4933]: E0122 06:03:45.902163 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-5f854695bc-c844b" podUID="666ddd33-aab9-4348-b9b4-b81b48105cff" Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.454123 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"55d6c6293cbc3ae4b2571461dfbc5b504ef2bc855f8799fa252e05302735e076"} Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.462863 4933 generic.go:334] "Generic (PLEG): container finished" podID="7a6531f0-8f5f-4e71-bb4c-af976bd38c68" containerID="80632fa009d1eddf22e1c4aa302e0ea872fee779368498d53f42e047db31fb2d" exitCode=0 Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.462970 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" event={"ID":"7a6531f0-8f5f-4e71-bb4c-af976bd38c68","Type":"ContainerDied","Data":"80632fa009d1eddf22e1c4aa302e0ea872fee779368498d53f42e047db31fb2d"} Jan 22 06:03:46 crc kubenswrapper[4933]: E0122 06:03:46.658201 4933 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Jan 22 06:03:46 crc kubenswrapper[4933]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/7a6531f0-8f5f-4e71-bb4c-af976bd38c68/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 22 06:03:46 crc kubenswrapper[4933]: > podSandboxID="2981d094a68da0f86ea2c54892bdc1f2a51b999da2fd4d20c17633ec35348e6f" Jan 22 06:03:46 crc kubenswrapper[4933]: E0122 06:03:46.658611 4933 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 22 06:03:46 crc kubenswrapper[4933]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jvlhh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-744ffd65bc-ps5xd_openstack(7a6531f0-8f5f-4e71-bb4c-af976bd38c68): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/7a6531f0-8f5f-4e71-bb4c-af976bd38c68/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 22 06:03:46 crc kubenswrapper[4933]: > logger="UnhandledError" Jan 22 06:03:46 crc kubenswrapper[4933]: E0122 06:03:46.659770 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/7a6531f0-8f5f-4e71-bb4c-af976bd38c68/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" podUID="7a6531f0-8f5f-4e71-bb4c-af976bd38c68" Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.766554 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.787614 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.807204 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.814803 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-rhpgg"] Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.818955 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 06:03:46 crc kubenswrapper[4933]: W0122 06:03:46.834963 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4d712958_1ece_47de_9798_6e852b03c565.slice/crio-7e93c9faa209124fdc4fc8894fd120b5e20dc803c2d608ce5d6c1cc292ca8b16 WatchSource:0}: Error finding container 7e93c9faa209124fdc4fc8894fd120b5e20dc803c2d608ce5d6c1cc292ca8b16: Status 404 returned error can't find the container with id 7e93c9faa209124fdc4fc8894fd120b5e20dc803c2d608ce5d6c1cc292ca8b16 Jan 22 06:03:46 crc kubenswrapper[4933]: W0122 06:03:46.841212 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod47299478_bcfd_4f21_a56c_efcf7b167999.slice/crio-f4435327f556a1535a4d5a954a3602c39782b2ebe8dfcef70552e445db24c9c0 WatchSource:0}: Error finding container f4435327f556a1535a4d5a954a3602c39782b2ebe8dfcef70552e445db24c9c0: Status 404 returned error can't find the container with id f4435327f556a1535a4d5a954a3602c39782b2ebe8dfcef70552e445db24c9c0 Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.867064 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.872216 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.874239 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.874435 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.874566 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-br5rf" Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.884373 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.911530 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.942537 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f4b41ac3-d05d-4bec-952f-c362cb5aad64-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.942594 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7l8nh\" (UniqueName: \"kubernetes.io/projected/f4b41ac3-d05d-4bec-952f-c362cb5aad64-kube-api-access-7l8nh\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.942634 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f4b41ac3-d05d-4bec-952f-c362cb5aad64-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.942676 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.942705 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4b41ac3-d05d-4bec-952f-c362cb5aad64-config\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.942742 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4b41ac3-d05d-4bec-952f-c362cb5aad64-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.942767 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4b41ac3-d05d-4bec-952f-c362cb5aad64-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:46 crc kubenswrapper[4933]: I0122 06:03:46.942804 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4b41ac3-d05d-4bec-952f-c362cb5aad64-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.024638 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 06:03:47 crc kubenswrapper[4933]: W0122 06:03:47.043725 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3070f00c_a8be_4606_bf64_53d3e321b329.slice/crio-3968ebf4a8c88e1084d1555dc30a5668e20fb9c746eaad03f4f86caf6cb2efba WatchSource:0}: Error finding container 3968ebf4a8c88e1084d1555dc30a5668e20fb9c746eaad03f4f86caf6cb2efba: Status 404 returned error can't find the container with id 3968ebf4a8c88e1084d1555dc30a5668e20fb9c746eaad03f4f86caf6cb2efba Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.046271 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4b41ac3-d05d-4bec-952f-c362cb5aad64-config\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.046337 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4b41ac3-d05d-4bec-952f-c362cb5aad64-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.046357 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4b41ac3-d05d-4bec-952f-c362cb5aad64-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.046393 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4b41ac3-d05d-4bec-952f-c362cb5aad64-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.046413 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f4b41ac3-d05d-4bec-952f-c362cb5aad64-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.046436 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7l8nh\" (UniqueName: \"kubernetes.io/projected/f4b41ac3-d05d-4bec-952f-c362cb5aad64-kube-api-access-7l8nh\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.046463 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f4b41ac3-d05d-4bec-952f-c362cb5aad64-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.046499 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.046759 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.047700 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f4b41ac3-d05d-4bec-952f-c362cb5aad64-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.048254 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4b41ac3-d05d-4bec-952f-c362cb5aad64-config\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.049451 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f4b41ac3-d05d-4bec-952f-c362cb5aad64-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.051308 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4b41ac3-d05d-4bec-952f-c362cb5aad64-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.052187 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4b41ac3-d05d-4bec-952f-c362cb5aad64-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.053672 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4b41ac3-d05d-4bec-952f-c362cb5aad64-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.059392 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.067701 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-phtjz"] Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.068434 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7l8nh\" (UniqueName: \"kubernetes.io/projected/f4b41ac3-d05d-4bec-952f-c362cb5aad64-kube-api-access-7l8nh\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:47 crc kubenswrapper[4933]: W0122 06:03:47.071633 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod943da5ba_d325_4686_871d_802b7730d02a.slice/crio-2a7006cf082e7bc070441078492a82c87a212d57dd7e3883a7c78d26a958b0a5 WatchSource:0}: Error finding container 2a7006cf082e7bc070441078492a82c87a212d57dd7e3883a7c78d26a958b0a5: Status 404 returned error can't find the container with id 2a7006cf082e7bc070441078492a82c87a212d57dd7e3883a7c78d26a958b0a5 Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.077835 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:47 crc kubenswrapper[4933]: W0122 06:03:47.078615 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2160e11a_468c_4bf7_9fdc_e579f3ecf896.slice/crio-a2d9ba2da68dc8e442d3212284c30975b51e5c7cbdc4c47cf97d090f71657af7 WatchSource:0}: Error finding container a2d9ba2da68dc8e442d3212284c30975b51e5c7cbdc4c47cf97d090f71657af7: Status 404 returned error can't find the container with id a2d9ba2da68dc8e442d3212284c30975b51e5c7cbdc4c47cf97d090f71657af7 Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.120968 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-c844b" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.146260 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-nwslr" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.248940 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1-config\") pod \"5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1\" (UID: \"5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1\") " Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.249425 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/666ddd33-aab9-4348-b9b4-b81b48105cff-dns-svc\") pod \"666ddd33-aab9-4348-b9b4-b81b48105cff\" (UID: \"666ddd33-aab9-4348-b9b4-b81b48105cff\") " Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.249443 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1-config" (OuterVolumeSpecName: "config") pod "5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1" (UID: "5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.249509 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mgn7l\" (UniqueName: \"kubernetes.io/projected/5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1-kube-api-access-mgn7l\") pod \"5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1\" (UID: \"5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1\") " Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.249899 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.249925 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/666ddd33-aab9-4348-b9b4-b81b48105cff-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "666ddd33-aab9-4348-b9b4-b81b48105cff" (UID: "666ddd33-aab9-4348-b9b4-b81b48105cff"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.249996 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/666ddd33-aab9-4348-b9b4-b81b48105cff-config\") pod \"666ddd33-aab9-4348-b9b4-b81b48105cff\" (UID: \"666ddd33-aab9-4348-b9b4-b81b48105cff\") " Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.250170 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z9jbp\" (UniqueName: \"kubernetes.io/projected/666ddd33-aab9-4348-b9b4-b81b48105cff-kube-api-access-z9jbp\") pod \"666ddd33-aab9-4348-b9b4-b81b48105cff\" (UID: \"666ddd33-aab9-4348-b9b4-b81b48105cff\") " Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.250467 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/666ddd33-aab9-4348-b9b4-b81b48105cff-config" (OuterVolumeSpecName: "config") pod "666ddd33-aab9-4348-b9b4-b81b48105cff" (UID: "666ddd33-aab9-4348-b9b4-b81b48105cff"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.250808 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.250833 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/666ddd33-aab9-4348-b9b4-b81b48105cff-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.250846 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/666ddd33-aab9-4348-b9b4-b81b48105cff-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.256779 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1-kube-api-access-mgn7l" (OuterVolumeSpecName: "kube-api-access-mgn7l") pod "5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1" (UID: "5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1"). InnerVolumeSpecName "kube-api-access-mgn7l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.257067 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/666ddd33-aab9-4348-b9b4-b81b48105cff-kube-api-access-z9jbp" (OuterVolumeSpecName: "kube-api-access-z9jbp") pod "666ddd33-aab9-4348-b9b4-b81b48105cff" (UID: "666ddd33-aab9-4348-b9b4-b81b48105cff"). InnerVolumeSpecName "kube-api-access-z9jbp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.322589 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-rwb6s"] Jan 22 06:03:47 crc kubenswrapper[4933]: W0122 06:03:47.329725 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12629e2f_7d6e_417c_a8df_c15b7a3e794e.slice/crio-b5c0882c058c1f2711772d0b5426c672a9bb2da60241edfde7264d83aeb76b5b WatchSource:0}: Error finding container b5c0882c058c1f2711772d0b5426c672a9bb2da60241edfde7264d83aeb76b5b: Status 404 returned error can't find the container with id b5c0882c058c1f2711772d0b5426c672a9bb2da60241edfde7264d83aeb76b5b Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.352721 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mgn7l\" (UniqueName: \"kubernetes.io/projected/5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1-kube-api-access-mgn7l\") on node \"crc\" DevicePath \"\"" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.352788 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z9jbp\" (UniqueName: \"kubernetes.io/projected/666ddd33-aab9-4348-b9b4-b81b48105cff-kube-api-access-z9jbp\") on node \"crc\" DevicePath \"\"" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.470649 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-rwb6s" event={"ID":"12629e2f-7d6e-417c-a8df-c15b7a3e794e","Type":"ContainerStarted","Data":"b5c0882c058c1f2711772d0b5426c672a9bb2da60241edfde7264d83aeb76b5b"} Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.471714 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-phtjz" event={"ID":"2160e11a-468c-4bf7-9fdc-e579f3ecf896","Type":"ContainerStarted","Data":"a2d9ba2da68dc8e442d3212284c30975b51e5c7cbdc4c47cf97d090f71657af7"} Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.472687 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-rhpgg" event={"ID":"ad99ca8b-cdda-4f3d-b0d0-9b4444e88225","Type":"ContainerStarted","Data":"348de274d02bcdb1084a6de81f6e5d2d9946649af0a50b9e06cfcd86105728e5"} Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.474164 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bb9d8bd9-nwslr" event={"ID":"5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1","Type":"ContainerDied","Data":"89c337f050fa9139bba99f8173fbcaffcde985abfa809666de0419ed708cd346"} Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.474199 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-nwslr" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.479363 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"47299478-bcfd-4f21-a56c-efcf7b167999","Type":"ContainerStarted","Data":"f4435327f556a1535a4d5a954a3602c39782b2ebe8dfcef70552e445db24c9c0"} Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.480208 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8","Type":"ContainerStarted","Data":"ceaee3bb0ae50b783c3f6dc6b46ed72fd8c6ba129c276ba7e0aa397688638198"} Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.481293 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f854695bc-c844b" event={"ID":"666ddd33-aab9-4348-b9b4-b81b48105cff","Type":"ContainerDied","Data":"db9d6c8615ea64fe1521eab72d2bf6d7f892efb8d9f5073286db8b6c38a566a7"} Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.481326 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-c844b" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.482151 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"943da5ba-d325-4686-871d-802b7730d02a","Type":"ContainerStarted","Data":"2a7006cf082e7bc070441078492a82c87a212d57dd7e3883a7c78d26a958b0a5"} Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.484340 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4d712958-1ece-47de-9798-6e852b03c565","Type":"ContainerStarted","Data":"7e93c9faa209124fdc4fc8894fd120b5e20dc803c2d608ce5d6c1cc292ca8b16"} Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.485103 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"3070f00c-a8be-4606-bf64-53d3e321b329","Type":"ContainerStarted","Data":"3968ebf4a8c88e1084d1555dc30a5668e20fb9c746eaad03f4f86caf6cb2efba"} Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.486209 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1270da6b-1c9e-41b6-b628-c2eaef5d9daf","Type":"ContainerStarted","Data":"670f97d2d0f9cb510deb369cedda881b1b1cec9255583ae6dea27865a4173a3f"} Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.542179 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-nwslr"] Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.548307 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-nwslr"] Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.578350 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-c844b"] Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.584068 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-c844b"] Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.787256 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 22 06:03:47 crc kubenswrapper[4933]: W0122 06:03:47.793198 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf4b41ac3_d05d_4bec_952f_c362cb5aad64.slice/crio-46368c2f9984c912eaf46a5911397d7d9f98e2ecfcea66ba3b5a05fdeed1b4e8 WatchSource:0}: Error finding container 46368c2f9984c912eaf46a5911397d7d9f98e2ecfcea66ba3b5a05fdeed1b4e8: Status 404 returned error can't find the container with id 46368c2f9984c912eaf46a5911397d7d9f98e2ecfcea66ba3b5a05fdeed1b4e8 Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.963596 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-szr8s"] Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.964785 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.970732 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 22 06:03:47 crc kubenswrapper[4933]: I0122 06:03:47.983074 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-szr8s"] Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.052400 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.067989 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-szr8s\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.068129 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-ovn-rundir\") pod \"ovn-controller-metrics-szr8s\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.068301 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-config\") pod \"ovn-controller-metrics-szr8s\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.068447 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-combined-ca-bundle\") pod \"ovn-controller-metrics-szr8s\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.068479 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-ovs-rundir\") pod \"ovn-controller-metrics-szr8s\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.068538 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xr9j\" (UniqueName: \"kubernetes.io/projected/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-kube-api-access-2xr9j\") pod \"ovn-controller-metrics-szr8s\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.137417 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-ps5xd"] Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.170604 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-config\") pod \"ovn-controller-metrics-szr8s\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.170808 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-combined-ca-bundle\") pod \"ovn-controller-metrics-szr8s\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.170886 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-ovs-rundir\") pod \"ovn-controller-metrics-szr8s\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.170984 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xr9j\" (UniqueName: \"kubernetes.io/projected/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-kube-api-access-2xr9j\") pod \"ovn-controller-metrics-szr8s\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.171066 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-szr8s\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.171159 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-ovn-rundir\") pod \"ovn-controller-metrics-szr8s\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.171541 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-ovn-rundir\") pod \"ovn-controller-metrics-szr8s\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.172262 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-config\") pod \"ovn-controller-metrics-szr8s\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.174662 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-794868bd45-bxqnt"] Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.176600 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-794868bd45-bxqnt" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.177775 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-ovs-rundir\") pod \"ovn-controller-metrics-szr8s\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.179114 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-combined-ca-bundle\") pod \"ovn-controller-metrics-szr8s\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.180040 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.184514 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-szr8s\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.191666 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-794868bd45-bxqnt"] Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.208722 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xr9j\" (UniqueName: \"kubernetes.io/projected/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-kube-api-access-2xr9j\") pod \"ovn-controller-metrics-szr8s\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.271965 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/27008f77-8d32-4eab-9051-2c22ac2655b6-ovsdbserver-sb\") pod \"dnsmasq-dns-794868bd45-bxqnt\" (UID: \"27008f77-8d32-4eab-9051-2c22ac2655b6\") " pod="openstack/dnsmasq-dns-794868bd45-bxqnt" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.272024 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/27008f77-8d32-4eab-9051-2c22ac2655b6-dns-svc\") pod \"dnsmasq-dns-794868bd45-bxqnt\" (UID: \"27008f77-8d32-4eab-9051-2c22ac2655b6\") " pod="openstack/dnsmasq-dns-794868bd45-bxqnt" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.272511 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27008f77-8d32-4eab-9051-2c22ac2655b6-config\") pod \"dnsmasq-dns-794868bd45-bxqnt\" (UID: \"27008f77-8d32-4eab-9051-2c22ac2655b6\") " pod="openstack/dnsmasq-dns-794868bd45-bxqnt" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.272627 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfv9g\" (UniqueName: \"kubernetes.io/projected/27008f77-8d32-4eab-9051-2c22ac2655b6-kube-api-access-sfv9g\") pod \"dnsmasq-dns-794868bd45-bxqnt\" (UID: \"27008f77-8d32-4eab-9051-2c22ac2655b6\") " pod="openstack/dnsmasq-dns-794868bd45-bxqnt" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.315116 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.374500 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27008f77-8d32-4eab-9051-2c22ac2655b6-config\") pod \"dnsmasq-dns-794868bd45-bxqnt\" (UID: \"27008f77-8d32-4eab-9051-2c22ac2655b6\") " pod="openstack/dnsmasq-dns-794868bd45-bxqnt" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.374591 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfv9g\" (UniqueName: \"kubernetes.io/projected/27008f77-8d32-4eab-9051-2c22ac2655b6-kube-api-access-sfv9g\") pod \"dnsmasq-dns-794868bd45-bxqnt\" (UID: \"27008f77-8d32-4eab-9051-2c22ac2655b6\") " pod="openstack/dnsmasq-dns-794868bd45-bxqnt" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.374651 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/27008f77-8d32-4eab-9051-2c22ac2655b6-ovsdbserver-sb\") pod \"dnsmasq-dns-794868bd45-bxqnt\" (UID: \"27008f77-8d32-4eab-9051-2c22ac2655b6\") " pod="openstack/dnsmasq-dns-794868bd45-bxqnt" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.374685 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/27008f77-8d32-4eab-9051-2c22ac2655b6-dns-svc\") pod \"dnsmasq-dns-794868bd45-bxqnt\" (UID: \"27008f77-8d32-4eab-9051-2c22ac2655b6\") " pod="openstack/dnsmasq-dns-794868bd45-bxqnt" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.375488 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/27008f77-8d32-4eab-9051-2c22ac2655b6-dns-svc\") pod \"dnsmasq-dns-794868bd45-bxqnt\" (UID: \"27008f77-8d32-4eab-9051-2c22ac2655b6\") " pod="openstack/dnsmasq-dns-794868bd45-bxqnt" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.376006 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27008f77-8d32-4eab-9051-2c22ac2655b6-config\") pod \"dnsmasq-dns-794868bd45-bxqnt\" (UID: \"27008f77-8d32-4eab-9051-2c22ac2655b6\") " pod="openstack/dnsmasq-dns-794868bd45-bxqnt" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.376761 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/27008f77-8d32-4eab-9051-2c22ac2655b6-ovsdbserver-sb\") pod \"dnsmasq-dns-794868bd45-bxqnt\" (UID: \"27008f77-8d32-4eab-9051-2c22ac2655b6\") " pod="openstack/dnsmasq-dns-794868bd45-bxqnt" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.419359 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfv9g\" (UniqueName: \"kubernetes.io/projected/27008f77-8d32-4eab-9051-2c22ac2655b6-kube-api-access-sfv9g\") pod \"dnsmasq-dns-794868bd45-bxqnt\" (UID: \"27008f77-8d32-4eab-9051-2c22ac2655b6\") " pod="openstack/dnsmasq-dns-794868bd45-bxqnt" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.469432 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-rhpgg"] Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.509895 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1" path="/var/lib/kubelet/pods/5aa66f09-6c7b-4bad-a9d8-ecd2d5f6b4e1/volumes" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.510368 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="666ddd33-aab9-4348-b9b4-b81b48105cff" path="/var/lib/kubelet/pods/666ddd33-aab9-4348-b9b4-b81b48105cff/volumes" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.510721 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-757dc6fff9-gl29p"] Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.515424 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"a4b88c60-2edd-436c-996f-b8f07311f5ef","Type":"ContainerStarted","Data":"8338bb35015dc88d234fc367a4444750fee638880a211c6b3c47d3ec80313356"} Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.515488 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f4b41ac3-d05d-4bec-952f-c362cb5aad64","Type":"ContainerStarted","Data":"46368c2f9984c912eaf46a5911397d7d9f98e2ecfcea66ba3b5a05fdeed1b4e8"} Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.515614 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.524270 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.526167 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-757dc6fff9-gl29p"] Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.574494 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-794868bd45-bxqnt" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.581004 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-ovsdbserver-nb\") pod \"dnsmasq-dns-757dc6fff9-gl29p\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.581075 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhnth\" (UniqueName: \"kubernetes.io/projected/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-kube-api-access-mhnth\") pod \"dnsmasq-dns-757dc6fff9-gl29p\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.581164 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-dns-svc\") pod \"dnsmasq-dns-757dc6fff9-gl29p\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.581223 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-ovsdbserver-sb\") pod \"dnsmasq-dns-757dc6fff9-gl29p\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.581253 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-config\") pod \"dnsmasq-dns-757dc6fff9-gl29p\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.682961 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-ovsdbserver-nb\") pod \"dnsmasq-dns-757dc6fff9-gl29p\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.683032 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhnth\" (UniqueName: \"kubernetes.io/projected/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-kube-api-access-mhnth\") pod \"dnsmasq-dns-757dc6fff9-gl29p\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.683122 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-dns-svc\") pod \"dnsmasq-dns-757dc6fff9-gl29p\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.683175 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-ovsdbserver-sb\") pod \"dnsmasq-dns-757dc6fff9-gl29p\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.683234 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-config\") pod \"dnsmasq-dns-757dc6fff9-gl29p\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.684253 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-config\") pod \"dnsmasq-dns-757dc6fff9-gl29p\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.684720 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-ovsdbserver-nb\") pod \"dnsmasq-dns-757dc6fff9-gl29p\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.684926 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-dns-svc\") pod \"dnsmasq-dns-757dc6fff9-gl29p\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.685390 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-ovsdbserver-sb\") pod \"dnsmasq-dns-757dc6fff9-gl29p\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.701238 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhnth\" (UniqueName: \"kubernetes.io/projected/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-kube-api-access-mhnth\") pod \"dnsmasq-dns-757dc6fff9-gl29p\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.845272 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:03:48 crc kubenswrapper[4933]: I0122 06:03:48.930665 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-szr8s"] Jan 22 06:03:49 crc kubenswrapper[4933]: I0122 06:03:49.035582 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-794868bd45-bxqnt"] Jan 22 06:03:49 crc kubenswrapper[4933]: W0122 06:03:49.076749 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeb8b9caf_9b8a_4fc5_b9cd_6704c9fb2e8a.slice/crio-914be5bfee9e203b71c6416243d7c2b9d913edeb5d4b3d64cb1dcad13384d3be WatchSource:0}: Error finding container 914be5bfee9e203b71c6416243d7c2b9d913edeb5d4b3d64cb1dcad13384d3be: Status 404 returned error can't find the container with id 914be5bfee9e203b71c6416243d7c2b9d913edeb5d4b3d64cb1dcad13384d3be Jan 22 06:03:49 crc kubenswrapper[4933]: I0122 06:03:49.303305 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-757dc6fff9-gl29p"] Jan 22 06:03:49 crc kubenswrapper[4933]: I0122 06:03:49.523053 4933 generic.go:334] "Generic (PLEG): container finished" podID="ad99ca8b-cdda-4f3d-b0d0-9b4444e88225" containerID="7c8ad7daf6d5809d51769d3fb46e279fe05030add71089a036ab76ec39ac28ad" exitCode=0 Jan 22 06:03:49 crc kubenswrapper[4933]: I0122 06:03:49.523131 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-rhpgg" event={"ID":"ad99ca8b-cdda-4f3d-b0d0-9b4444e88225","Type":"ContainerDied","Data":"7c8ad7daf6d5809d51769d3fb46e279fe05030add71089a036ab76ec39ac28ad"} Jan 22 06:03:49 crc kubenswrapper[4933]: I0122 06:03:49.529020 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" event={"ID":"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d","Type":"ContainerStarted","Data":"91748a5090dace86365d53df724b19f26ca88465068608c09b068efb0b1af661"} Jan 22 06:03:49 crc kubenswrapper[4933]: I0122 06:03:49.532163 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-szr8s" event={"ID":"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a","Type":"ContainerStarted","Data":"914be5bfee9e203b71c6416243d7c2b9d913edeb5d4b3d64cb1dcad13384d3be"} Jan 22 06:03:49 crc kubenswrapper[4933]: I0122 06:03:49.535841 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" event={"ID":"7a6531f0-8f5f-4e71-bb4c-af976bd38c68","Type":"ContainerStarted","Data":"61904fd16d6fea9cc0a92cf827634befd96a5126374297505f7a3c8543abdd1b"} Jan 22 06:03:49 crc kubenswrapper[4933]: I0122 06:03:49.535957 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" podUID="7a6531f0-8f5f-4e71-bb4c-af976bd38c68" containerName="dnsmasq-dns" containerID="cri-o://61904fd16d6fea9cc0a92cf827634befd96a5126374297505f7a3c8543abdd1b" gracePeriod=10 Jan 22 06:03:49 crc kubenswrapper[4933]: I0122 06:03:49.535993 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" Jan 22 06:03:49 crc kubenswrapper[4933]: I0122 06:03:49.537760 4933 generic.go:334] "Generic (PLEG): container finished" podID="27008f77-8d32-4eab-9051-2c22ac2655b6" containerID="04920aa3341efb42fbb04bf075d825f992a3dc873995e1ba4c065a8f06ef201c" exitCode=0 Jan 22 06:03:49 crc kubenswrapper[4933]: I0122 06:03:49.537809 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-794868bd45-bxqnt" event={"ID":"27008f77-8d32-4eab-9051-2c22ac2655b6","Type":"ContainerDied","Data":"04920aa3341efb42fbb04bf075d825f992a3dc873995e1ba4c065a8f06ef201c"} Jan 22 06:03:49 crc kubenswrapper[4933]: I0122 06:03:49.537830 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-794868bd45-bxqnt" event={"ID":"27008f77-8d32-4eab-9051-2c22ac2655b6","Type":"ContainerStarted","Data":"f858589a15698ad3f81fd5050d5828e03fcddff2bfbb1dccffd1770ea549f6e1"} Jan 22 06:03:49 crc kubenswrapper[4933]: I0122 06:03:49.567849 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" podStartSLOduration=15.501591646 podStartE2EDuration="16.567832073s" podCreationTimestamp="2026-01-22 06:03:33 +0000 UTC" firstStartedPulling="2026-01-22 06:03:45.004998066 +0000 UTC m=+1072.842123419" lastFinishedPulling="2026-01-22 06:03:46.071238483 +0000 UTC m=+1073.908363846" observedRunningTime="2026-01-22 06:03:49.566139531 +0000 UTC m=+1077.403264894" watchObservedRunningTime="2026-01-22 06:03:49.567832073 +0000 UTC m=+1077.404957426" Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.194501 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-rhpgg" Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.351950 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad99ca8b-cdda-4f3d-b0d0-9b4444e88225-dns-svc\") pod \"ad99ca8b-cdda-4f3d-b0d0-9b4444e88225\" (UID: \"ad99ca8b-cdda-4f3d-b0d0-9b4444e88225\") " Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.352026 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad99ca8b-cdda-4f3d-b0d0-9b4444e88225-config\") pod \"ad99ca8b-cdda-4f3d-b0d0-9b4444e88225\" (UID: \"ad99ca8b-cdda-4f3d-b0d0-9b4444e88225\") " Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.352090 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phs6v\" (UniqueName: \"kubernetes.io/projected/ad99ca8b-cdda-4f3d-b0d0-9b4444e88225-kube-api-access-phs6v\") pod \"ad99ca8b-cdda-4f3d-b0d0-9b4444e88225\" (UID: \"ad99ca8b-cdda-4f3d-b0d0-9b4444e88225\") " Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.373298 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad99ca8b-cdda-4f3d-b0d0-9b4444e88225-kube-api-access-phs6v" (OuterVolumeSpecName: "kube-api-access-phs6v") pod "ad99ca8b-cdda-4f3d-b0d0-9b4444e88225" (UID: "ad99ca8b-cdda-4f3d-b0d0-9b4444e88225"). InnerVolumeSpecName "kube-api-access-phs6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.377060 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad99ca8b-cdda-4f3d-b0d0-9b4444e88225-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ad99ca8b-cdda-4f3d-b0d0-9b4444e88225" (UID: "ad99ca8b-cdda-4f3d-b0d0-9b4444e88225"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.398600 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad99ca8b-cdda-4f3d-b0d0-9b4444e88225-config" (OuterVolumeSpecName: "config") pod "ad99ca8b-cdda-4f3d-b0d0-9b4444e88225" (UID: "ad99ca8b-cdda-4f3d-b0d0-9b4444e88225"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.454738 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad99ca8b-cdda-4f3d-b0d0-9b4444e88225-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.454771 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad99ca8b-cdda-4f3d-b0d0-9b4444e88225-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.454782 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phs6v\" (UniqueName: \"kubernetes.io/projected/ad99ca8b-cdda-4f3d-b0d0-9b4444e88225-kube-api-access-phs6v\") on node \"crc\" DevicePath \"\"" Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.550655 4933 generic.go:334] "Generic (PLEG): container finished" podID="7e17f8d0-6f1d-4831-bb0e-7a8f673a080d" containerID="8d81fe725ecefe9c300c0a92464f3ebdf078b44ea154cfac18da9e240aaa37b0" exitCode=0 Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.550712 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" event={"ID":"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d","Type":"ContainerDied","Data":"8d81fe725ecefe9c300c0a92464f3ebdf078b44ea154cfac18da9e240aaa37b0"} Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.553880 4933 generic.go:334] "Generic (PLEG): container finished" podID="7a6531f0-8f5f-4e71-bb4c-af976bd38c68" containerID="61904fd16d6fea9cc0a92cf827634befd96a5126374297505f7a3c8543abdd1b" exitCode=0 Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.553931 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" event={"ID":"7a6531f0-8f5f-4e71-bb4c-af976bd38c68","Type":"ContainerDied","Data":"61904fd16d6fea9cc0a92cf827634befd96a5126374297505f7a3c8543abdd1b"} Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.573184 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-794868bd45-bxqnt" event={"ID":"27008f77-8d32-4eab-9051-2c22ac2655b6","Type":"ContainerStarted","Data":"dffb74aafb35e2991d47a704b2035461b8ce0a8b71356eec82ee84a839eb8778"} Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.573284 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-794868bd45-bxqnt" Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.577671 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-rhpgg" event={"ID":"ad99ca8b-cdda-4f3d-b0d0-9b4444e88225","Type":"ContainerDied","Data":"348de274d02bcdb1084a6de81f6e5d2d9946649af0a50b9e06cfcd86105728e5"} Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.577720 4933 scope.go:117] "RemoveContainer" containerID="7c8ad7daf6d5809d51769d3fb46e279fe05030add71089a036ab76ec39ac28ad" Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.577843 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-rhpgg" Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.597357 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-794868bd45-bxqnt" podStartSLOduration=2.597337204 podStartE2EDuration="2.597337204s" podCreationTimestamp="2026-01-22 06:03:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:03:50.592888323 +0000 UTC m=+1078.430013676" watchObservedRunningTime="2026-01-22 06:03:50.597337204 +0000 UTC m=+1078.434462557" Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.633918 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-rhpgg"] Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.643413 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-rhpgg"] Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.845576 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.964610 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7a6531f0-8f5f-4e71-bb4c-af976bd38c68-dns-svc\") pod \"7a6531f0-8f5f-4e71-bb4c-af976bd38c68\" (UID: \"7a6531f0-8f5f-4e71-bb4c-af976bd38c68\") " Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.964735 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jvlhh\" (UniqueName: \"kubernetes.io/projected/7a6531f0-8f5f-4e71-bb4c-af976bd38c68-kube-api-access-jvlhh\") pod \"7a6531f0-8f5f-4e71-bb4c-af976bd38c68\" (UID: \"7a6531f0-8f5f-4e71-bb4c-af976bd38c68\") " Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.964843 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a6531f0-8f5f-4e71-bb4c-af976bd38c68-config\") pod \"7a6531f0-8f5f-4e71-bb4c-af976bd38c68\" (UID: \"7a6531f0-8f5f-4e71-bb4c-af976bd38c68\") " Jan 22 06:03:50 crc kubenswrapper[4933]: I0122 06:03:50.968861 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a6531f0-8f5f-4e71-bb4c-af976bd38c68-kube-api-access-jvlhh" (OuterVolumeSpecName: "kube-api-access-jvlhh") pod "7a6531f0-8f5f-4e71-bb4c-af976bd38c68" (UID: "7a6531f0-8f5f-4e71-bb4c-af976bd38c68"). InnerVolumeSpecName "kube-api-access-jvlhh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:03:51 crc kubenswrapper[4933]: I0122 06:03:51.005032 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a6531f0-8f5f-4e71-bb4c-af976bd38c68-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7a6531f0-8f5f-4e71-bb4c-af976bd38c68" (UID: "7a6531f0-8f5f-4e71-bb4c-af976bd38c68"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:03:51 crc kubenswrapper[4933]: I0122 06:03:51.007093 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a6531f0-8f5f-4e71-bb4c-af976bd38c68-config" (OuterVolumeSpecName: "config") pod "7a6531f0-8f5f-4e71-bb4c-af976bd38c68" (UID: "7a6531f0-8f5f-4e71-bb4c-af976bd38c68"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:03:51 crc kubenswrapper[4933]: I0122 06:03:51.066610 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7a6531f0-8f5f-4e71-bb4c-af976bd38c68-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 06:03:51 crc kubenswrapper[4933]: I0122 06:03:51.066652 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jvlhh\" (UniqueName: \"kubernetes.io/projected/7a6531f0-8f5f-4e71-bb4c-af976bd38c68-kube-api-access-jvlhh\") on node \"crc\" DevicePath \"\"" Jan 22 06:03:51 crc kubenswrapper[4933]: I0122 06:03:51.066668 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a6531f0-8f5f-4e71-bb4c-af976bd38c68-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:03:51 crc kubenswrapper[4933]: I0122 06:03:51.588921 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" event={"ID":"7a6531f0-8f5f-4e71-bb4c-af976bd38c68","Type":"ContainerDied","Data":"2981d094a68da0f86ea2c54892bdc1f2a51b999da2fd4d20c17633ec35348e6f"} Jan 22 06:03:51 crc kubenswrapper[4933]: I0122 06:03:51.588942 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-ps5xd" Jan 22 06:03:51 crc kubenswrapper[4933]: I0122 06:03:51.616020 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-ps5xd"] Jan 22 06:03:51 crc kubenswrapper[4933]: I0122 06:03:51.621851 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-ps5xd"] Jan 22 06:03:52 crc kubenswrapper[4933]: I0122 06:03:52.503622 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a6531f0-8f5f-4e71-bb4c-af976bd38c68" path="/var/lib/kubelet/pods/7a6531f0-8f5f-4e71-bb4c-af976bd38c68/volumes" Jan 22 06:03:52 crc kubenswrapper[4933]: I0122 06:03:52.504376 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad99ca8b-cdda-4f3d-b0d0-9b4444e88225" path="/var/lib/kubelet/pods/ad99ca8b-cdda-4f3d-b0d0-9b4444e88225/volumes" Jan 22 06:03:53 crc kubenswrapper[4933]: I0122 06:03:53.345814 4933 scope.go:117] "RemoveContainer" containerID="61904fd16d6fea9cc0a92cf827634befd96a5126374297505f7a3c8543abdd1b" Jan 22 06:03:58 crc kubenswrapper[4933]: I0122 06:03:58.577454 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-794868bd45-bxqnt" Jan 22 06:03:58 crc kubenswrapper[4933]: I0122 06:03:58.730344 4933 scope.go:117] "RemoveContainer" containerID="80632fa009d1eddf22e1c4aa302e0ea872fee779368498d53f42e047db31fb2d" Jan 22 06:03:59 crc kubenswrapper[4933]: I0122 06:03:59.829365 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8","Type":"ContainerStarted","Data":"c8b3c82c4b888183709e4c809da692746a4eb74efe1e21ce5e8fbd16fb29ade5"} Jan 22 06:03:59 crc kubenswrapper[4933]: I0122 06:03:59.829724 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 22 06:03:59 crc kubenswrapper[4933]: I0122 06:03:59.831864 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" event={"ID":"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d","Type":"ContainerStarted","Data":"2d7dcc55c52f0332979086d141c4ed233b35909cc999cb2c04861c8692cc503f"} Jan 22 06:03:59 crc kubenswrapper[4933]: I0122 06:03:59.832309 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:03:59 crc kubenswrapper[4933]: I0122 06:03:59.857933 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=11.675953961 podStartE2EDuration="22.857911629s" podCreationTimestamp="2026-01-22 06:03:37 +0000 UTC" firstStartedPulling="2026-01-22 06:03:46.86139676 +0000 UTC m=+1074.698522113" lastFinishedPulling="2026-01-22 06:03:58.043354408 +0000 UTC m=+1085.880479781" observedRunningTime="2026-01-22 06:03:59.849498029 +0000 UTC m=+1087.686623422" watchObservedRunningTime="2026-01-22 06:03:59.857911629 +0000 UTC m=+1087.695036992" Jan 22 06:03:59 crc kubenswrapper[4933]: I0122 06:03:59.874731 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" podStartSLOduration=11.874708958 podStartE2EDuration="11.874708958s" podCreationTimestamp="2026-01-22 06:03:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:03:59.86955127 +0000 UTC m=+1087.706676653" watchObservedRunningTime="2026-01-22 06:03:59.874708958 +0000 UTC m=+1087.711834311" Jan 22 06:04:00 crc kubenswrapper[4933]: I0122 06:04:00.846858 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-szr8s" event={"ID":"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a","Type":"ContainerStarted","Data":"49be386c17bf17cbf852a7ced425f8a40d45b629b580c910b7c1ed1881aef46f"} Jan 22 06:04:00 crc kubenswrapper[4933]: I0122 06:04:00.852592 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1270da6b-1c9e-41b6-b628-c2eaef5d9daf","Type":"ContainerStarted","Data":"48a217a83e3da85312b55ef33ee0ad86d71c50c2b1e74dfbffefed794bc3c01e"} Jan 22 06:04:00 crc kubenswrapper[4933]: I0122 06:04:00.858359 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f4b41ac3-d05d-4bec-952f-c362cb5aad64","Type":"ContainerStarted","Data":"1e48373d2f9642eae495cf2b2ade3933c39166aaf3df09df17b1f26537b81222"} Jan 22 06:04:00 crc kubenswrapper[4933]: I0122 06:04:00.863310 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"943da5ba-d325-4686-871d-802b7730d02a","Type":"ContainerStarted","Data":"b956f6b2dbe48cf74ff7e20f1605a13bc4acfeb9f49ccdf24ca9c5172e1373b4"} Jan 22 06:04:00 crc kubenswrapper[4933]: I0122 06:04:00.868831 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-szr8s" podStartSLOduration=3.6387595470000003 podStartE2EDuration="13.868813927s" podCreationTimestamp="2026-01-22 06:03:47 +0000 UTC" firstStartedPulling="2026-01-22 06:03:49.08486911 +0000 UTC m=+1076.921994463" lastFinishedPulling="2026-01-22 06:03:59.31492349 +0000 UTC m=+1087.152048843" observedRunningTime="2026-01-22 06:04:00.867423902 +0000 UTC m=+1088.704549275" watchObservedRunningTime="2026-01-22 06:04:00.868813927 +0000 UTC m=+1088.705939280" Jan 22 06:04:00 crc kubenswrapper[4933]: I0122 06:04:00.876191 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-rwb6s" event={"ID":"12629e2f-7d6e-417c-a8df-c15b7a3e794e","Type":"ContainerStarted","Data":"97dd8c1857fe2cca4317c1796aab3e3c32a4e333c89ca50415ee631196e34bde"} Jan 22 06:04:00 crc kubenswrapper[4933]: I0122 06:04:00.881262 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"a4b88c60-2edd-436c-996f-b8f07311f5ef","Type":"ContainerStarted","Data":"61aa1634a7490f2c19d0a61583367b3b595c012d49db045a0f91f868015c9e92"} Jan 22 06:04:00 crc kubenswrapper[4933]: I0122 06:04:00.888568 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-phtjz" event={"ID":"2160e11a-468c-4bf7-9fdc-e579f3ecf896","Type":"ContainerStarted","Data":"cffd7868d1d8343c9ebe115ec9c5afd392b4e675691b973c2e8edd358ac529ec"} Jan 22 06:04:00 crc kubenswrapper[4933]: I0122 06:04:00.888610 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-phtjz" Jan 22 06:04:00 crc kubenswrapper[4933]: I0122 06:04:00.973474 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-phtjz" podStartSLOduration=6.871152686 podStartE2EDuration="18.973452804s" podCreationTimestamp="2026-01-22 06:03:42 +0000 UTC" firstStartedPulling="2026-01-22 06:03:47.080630373 +0000 UTC m=+1074.917755726" lastFinishedPulling="2026-01-22 06:03:59.182930491 +0000 UTC m=+1087.020055844" observedRunningTime="2026-01-22 06:04:00.948226865 +0000 UTC m=+1088.785352228" watchObservedRunningTime="2026-01-22 06:04:00.973452804 +0000 UTC m=+1088.810578157" Jan 22 06:04:01 crc kubenswrapper[4933]: I0122 06:04:01.897523 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f4b41ac3-d05d-4bec-952f-c362cb5aad64","Type":"ContainerStarted","Data":"b8fa72a1d737aaba2c6ef382aac2a61139f81989233737acfd4fd0708eec386a"} Jan 22 06:04:01 crc kubenswrapper[4933]: I0122 06:04:01.898904 4933 generic.go:334] "Generic (PLEG): container finished" podID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerID="97dd8c1857fe2cca4317c1796aab3e3c32a4e333c89ca50415ee631196e34bde" exitCode=0 Jan 22 06:04:01 crc kubenswrapper[4933]: I0122 06:04:01.898976 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-rwb6s" event={"ID":"12629e2f-7d6e-417c-a8df-c15b7a3e794e","Type":"ContainerDied","Data":"97dd8c1857fe2cca4317c1796aab3e3c32a4e333c89ca50415ee631196e34bde"} Jan 22 06:04:01 crc kubenswrapper[4933]: I0122 06:04:01.903264 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"a4b88c60-2edd-436c-996f-b8f07311f5ef","Type":"ContainerStarted","Data":"7561e4048595cee77f6047174945f8a81c575b242d3b3be183508c84bf12d15d"} Jan 22 06:04:01 crc kubenswrapper[4933]: I0122 06:04:01.905942 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4d712958-1ece-47de-9798-6e852b03c565","Type":"ContainerStarted","Data":"452184a465d8d1be0e80a527dbc5f992b7ee47b495271d69908b57e67655f195"} Jan 22 06:04:01 crc kubenswrapper[4933]: I0122 06:04:01.911894 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"47299478-bcfd-4f21-a56c-efcf7b167999","Type":"ContainerStarted","Data":"83e14ee02b552e375ddb43f6d79d9fe6adc343bd3efcacc2ece24e2451dc5275"} Jan 22 06:04:01 crc kubenswrapper[4933]: I0122 06:04:01.914052 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"3070f00c-a8be-4606-bf64-53d3e321b329","Type":"ContainerStarted","Data":"cf6f2f928d77621dd900f7415a2f45d2131a39b6c456968dd6db4e3f7965e128"} Jan 22 06:04:01 crc kubenswrapper[4933]: I0122 06:04:01.924336 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 22 06:04:01 crc kubenswrapper[4933]: I0122 06:04:01.944704 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=5.561202394 podStartE2EDuration="16.944688233s" podCreationTimestamp="2026-01-22 06:03:45 +0000 UTC" firstStartedPulling="2026-01-22 06:03:47.799380551 +0000 UTC m=+1075.636505914" lastFinishedPulling="2026-01-22 06:03:59.18286636 +0000 UTC m=+1087.019991753" observedRunningTime="2026-01-22 06:04:01.943717319 +0000 UTC m=+1089.780842732" watchObservedRunningTime="2026-01-22 06:04:01.944688233 +0000 UTC m=+1089.781813586" Jan 22 06:04:02 crc kubenswrapper[4933]: I0122 06:04:02.051516 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=9.929590181 podStartE2EDuration="23.051493334s" podCreationTimestamp="2026-01-22 06:03:39 +0000 UTC" firstStartedPulling="2026-01-22 06:03:47.05404362 +0000 UTC m=+1074.891168973" lastFinishedPulling="2026-01-22 06:04:00.175946773 +0000 UTC m=+1088.013072126" observedRunningTime="2026-01-22 06:04:02.030289066 +0000 UTC m=+1089.867414459" watchObservedRunningTime="2026-01-22 06:04:02.051493334 +0000 UTC m=+1089.888618707" Jan 22 06:04:02 crc kubenswrapper[4933]: I0122 06:04:02.092357 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=9.452255496 podStartE2EDuration="20.092332102s" podCreationTimestamp="2026-01-22 06:03:42 +0000 UTC" firstStartedPulling="2026-01-22 06:03:48.07625646 +0000 UTC m=+1075.913381823" lastFinishedPulling="2026-01-22 06:03:58.716333086 +0000 UTC m=+1086.553458429" observedRunningTime="2026-01-22 06:04:02.084524498 +0000 UTC m=+1089.921649861" watchObservedRunningTime="2026-01-22 06:04:02.092332102 +0000 UTC m=+1089.929457455" Jan 22 06:04:02 crc kubenswrapper[4933]: I0122 06:04:02.250474 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 22 06:04:02 crc kubenswrapper[4933]: I0122 06:04:02.250770 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 22 06:04:02 crc kubenswrapper[4933]: I0122 06:04:02.934371 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-rwb6s" event={"ID":"12629e2f-7d6e-417c-a8df-c15b7a3e794e","Type":"ContainerStarted","Data":"fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90"} Jan 22 06:04:02 crc kubenswrapper[4933]: I0122 06:04:02.934458 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-rwb6s" event={"ID":"12629e2f-7d6e-417c-a8df-c15b7a3e794e","Type":"ContainerStarted","Data":"4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe"} Jan 22 06:04:02 crc kubenswrapper[4933]: I0122 06:04:02.934615 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 22 06:04:02 crc kubenswrapper[4933]: I0122 06:04:02.965863 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-rwb6s" podStartSLOduration=9.487852682 podStartE2EDuration="20.965845826s" podCreationTimestamp="2026-01-22 06:03:42 +0000 UTC" firstStartedPulling="2026-01-22 06:03:47.331616796 +0000 UTC m=+1075.168742169" lastFinishedPulling="2026-01-22 06:03:58.80960996 +0000 UTC m=+1086.646735313" observedRunningTime="2026-01-22 06:04:02.956928644 +0000 UTC m=+1090.794054007" watchObservedRunningTime="2026-01-22 06:04:02.965845826 +0000 UTC m=+1090.802971179" Jan 22 06:04:03 crc kubenswrapper[4933]: I0122 06:04:03.041994 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:04:03 crc kubenswrapper[4933]: I0122 06:04:03.042168 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:04:03 crc kubenswrapper[4933]: I0122 06:04:03.924608 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 22 06:04:04 crc kubenswrapper[4933]: I0122 06:04:04.956595 4933 generic.go:334] "Generic (PLEG): container finished" podID="943da5ba-d325-4686-871d-802b7730d02a" containerID="b956f6b2dbe48cf74ff7e20f1605a13bc4acfeb9f49ccdf24ca9c5172e1373b4" exitCode=0 Jan 22 06:04:04 crc kubenswrapper[4933]: I0122 06:04:04.956656 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"943da5ba-d325-4686-871d-802b7730d02a","Type":"ContainerDied","Data":"b956f6b2dbe48cf74ff7e20f1605a13bc4acfeb9f49ccdf24ca9c5172e1373b4"} Jan 22 06:04:04 crc kubenswrapper[4933]: I0122 06:04:04.962877 4933 generic.go:334] "Generic (PLEG): container finished" podID="1270da6b-1c9e-41b6-b628-c2eaef5d9daf" containerID="48a217a83e3da85312b55ef33ee0ad86d71c50c2b1e74dfbffefed794bc3c01e" exitCode=0 Jan 22 06:04:04 crc kubenswrapper[4933]: I0122 06:04:04.964382 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1270da6b-1c9e-41b6-b628-c2eaef5d9daf","Type":"ContainerDied","Data":"48a217a83e3da85312b55ef33ee0ad86d71c50c2b1e74dfbffefed794bc3c01e"} Jan 22 06:04:04 crc kubenswrapper[4933]: I0122 06:04:04.994904 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 22 06:04:05 crc kubenswrapper[4933]: I0122 06:04:05.295227 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 22 06:04:05 crc kubenswrapper[4933]: I0122 06:04:05.351437 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 22 06:04:05 crc kubenswrapper[4933]: I0122 06:04:05.972307 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1270da6b-1c9e-41b6-b628-c2eaef5d9daf","Type":"ContainerStarted","Data":"728628962bfae73d3592f2a149b8ee7e86c1b9649701dc3ff1d779b1a0028722"} Jan 22 06:04:05 crc kubenswrapper[4933]: I0122 06:04:05.974952 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"943da5ba-d325-4686-871d-802b7730d02a","Type":"ContainerStarted","Data":"4205f5d3155c766e46db2874af6cdb743c6e1ecc121652ab6d216950365cf512"} Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.003263 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=19.81923074 podStartE2EDuration="32.003237525s" podCreationTimestamp="2026-01-22 06:03:34 +0000 UTC" firstStartedPulling="2026-01-22 06:03:46.826780497 +0000 UTC m=+1074.663905850" lastFinishedPulling="2026-01-22 06:03:59.010787272 +0000 UTC m=+1086.847912635" observedRunningTime="2026-01-22 06:04:05.998222071 +0000 UTC m=+1093.835347434" watchObservedRunningTime="2026-01-22 06:04:06.003237525 +0000 UTC m=+1093.840362918" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.014651 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.032361 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=17.80962736 podStartE2EDuration="30.03233632s" podCreationTimestamp="2026-01-22 06:03:36 +0000 UTC" firstStartedPulling="2026-01-22 06:03:47.072893789 +0000 UTC m=+1074.910019132" lastFinishedPulling="2026-01-22 06:03:59.295602729 +0000 UTC m=+1087.132728092" observedRunningTime="2026-01-22 06:04:06.02506727 +0000 UTC m=+1093.862192663" watchObservedRunningTime="2026-01-22 06:04:06.03233632 +0000 UTC m=+1093.869461683" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.164176 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 22 06:04:06 crc kubenswrapper[4933]: E0122 06:04:06.164548 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a6531f0-8f5f-4e71-bb4c-af976bd38c68" containerName="init" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.164569 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a6531f0-8f5f-4e71-bb4c-af976bd38c68" containerName="init" Jan 22 06:04:06 crc kubenswrapper[4933]: E0122 06:04:06.164613 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a6531f0-8f5f-4e71-bb4c-af976bd38c68" containerName="dnsmasq-dns" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.164622 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a6531f0-8f5f-4e71-bb4c-af976bd38c68" containerName="dnsmasq-dns" Jan 22 06:04:06 crc kubenswrapper[4933]: E0122 06:04:06.164640 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad99ca8b-cdda-4f3d-b0d0-9b4444e88225" containerName="init" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.164648 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad99ca8b-cdda-4f3d-b0d0-9b4444e88225" containerName="init" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.164824 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a6531f0-8f5f-4e71-bb4c-af976bd38c68" containerName="dnsmasq-dns" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.164846 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad99ca8b-cdda-4f3d-b0d0-9b4444e88225" containerName="init" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.165804 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.168316 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.168758 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.168909 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.171488 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-pxjz8" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.185133 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.187160 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.225285 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.255820 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9445b2f3-83ea-4e79-8312-ceffa2208f77-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.255888 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shsz5\" (UniqueName: \"kubernetes.io/projected/9445b2f3-83ea-4e79-8312-ceffa2208f77-kube-api-access-shsz5\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.255928 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9445b2f3-83ea-4e79-8312-ceffa2208f77-scripts\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.255952 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9445b2f3-83ea-4e79-8312-ceffa2208f77-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.256226 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9445b2f3-83ea-4e79-8312-ceffa2208f77-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.256291 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9445b2f3-83ea-4e79-8312-ceffa2208f77-config\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.256333 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/9445b2f3-83ea-4e79-8312-ceffa2208f77-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.357446 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9445b2f3-83ea-4e79-8312-ceffa2208f77-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.357719 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shsz5\" (UniqueName: \"kubernetes.io/projected/9445b2f3-83ea-4e79-8312-ceffa2208f77-kube-api-access-shsz5\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.357815 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9445b2f3-83ea-4e79-8312-ceffa2208f77-scripts\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.357901 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9445b2f3-83ea-4e79-8312-ceffa2208f77-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.357999 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9445b2f3-83ea-4e79-8312-ceffa2208f77-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.358085 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9445b2f3-83ea-4e79-8312-ceffa2208f77-config\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.358160 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/9445b2f3-83ea-4e79-8312-ceffa2208f77-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.359249 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9445b2f3-83ea-4e79-8312-ceffa2208f77-scripts\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.359271 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9445b2f3-83ea-4e79-8312-ceffa2208f77-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.359611 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9445b2f3-83ea-4e79-8312-ceffa2208f77-config\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.362787 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9445b2f3-83ea-4e79-8312-ceffa2208f77-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.362793 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/9445b2f3-83ea-4e79-8312-ceffa2208f77-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.363314 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9445b2f3-83ea-4e79-8312-ceffa2208f77-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.379166 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shsz5\" (UniqueName: \"kubernetes.io/projected/9445b2f3-83ea-4e79-8312-ceffa2208f77-kube-api-access-shsz5\") pod \"ovn-northd-0\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " pod="openstack/ovn-northd-0" Jan 22 06:04:06 crc kubenswrapper[4933]: I0122 06:04:06.494791 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 22 06:04:07 crc kubenswrapper[4933]: I0122 06:04:07.011459 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 22 06:04:07 crc kubenswrapper[4933]: I0122 06:04:07.531027 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 22 06:04:07 crc kubenswrapper[4933]: I0122 06:04:07.531303 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 22 06:04:07 crc kubenswrapper[4933]: I0122 06:04:07.748977 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 22 06:04:07 crc kubenswrapper[4933]: I0122 06:04:07.993234 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"9445b2f3-83ea-4e79-8312-ceffa2208f77","Type":"ContainerStarted","Data":"a02dd2e2dbbd38834a52749a21bc07d14bccdd49bfd8071c336dac65fb9b12f4"} Jan 22 06:04:08 crc kubenswrapper[4933]: E0122 06:04:08.236959 4933 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.163:55584->38.102.83.163:45627: write tcp 38.102.83.163:55584->38.102.83.163:45627: write: broken pipe Jan 22 06:04:08 crc kubenswrapper[4933]: I0122 06:04:08.847264 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:04:08 crc kubenswrapper[4933]: I0122 06:04:08.905306 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-794868bd45-bxqnt"] Jan 22 06:04:08 crc kubenswrapper[4933]: I0122 06:04:08.905806 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-794868bd45-bxqnt" podUID="27008f77-8d32-4eab-9051-2c22ac2655b6" containerName="dnsmasq-dns" containerID="cri-o://dffb74aafb35e2991d47a704b2035461b8ce0a8b71356eec82ee84a839eb8778" gracePeriod=10 Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.004104 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"9445b2f3-83ea-4e79-8312-ceffa2208f77","Type":"ContainerStarted","Data":"bb6a7eef37a247e6e0eaaa17e2b10e4afc7b1b1032f493709b0fed75abf884df"} Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.004159 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"9445b2f3-83ea-4e79-8312-ceffa2208f77","Type":"ContainerStarted","Data":"480d1b66c4dcca2c067fc5041e62b5b6640e85f510d6d67079fc834d3bbb885a"} Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.004409 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.043922 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.008319824 podStartE2EDuration="3.043902796s" podCreationTimestamp="2026-01-22 06:04:06 +0000 UTC" firstStartedPulling="2026-01-22 06:04:07.008837761 +0000 UTC m=+1094.845963114" lastFinishedPulling="2026-01-22 06:04:08.044420733 +0000 UTC m=+1095.881546086" observedRunningTime="2026-01-22 06:04:09.042497771 +0000 UTC m=+1096.879623154" watchObservedRunningTime="2026-01-22 06:04:09.043902796 +0000 UTC m=+1096.881028149" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.444963 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-794868bd45-bxqnt" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.612699 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27008f77-8d32-4eab-9051-2c22ac2655b6-config\") pod \"27008f77-8d32-4eab-9051-2c22ac2655b6\" (UID: \"27008f77-8d32-4eab-9051-2c22ac2655b6\") " Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.612779 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sfv9g\" (UniqueName: \"kubernetes.io/projected/27008f77-8d32-4eab-9051-2c22ac2655b6-kube-api-access-sfv9g\") pod \"27008f77-8d32-4eab-9051-2c22ac2655b6\" (UID: \"27008f77-8d32-4eab-9051-2c22ac2655b6\") " Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.612971 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/27008f77-8d32-4eab-9051-2c22ac2655b6-ovsdbserver-sb\") pod \"27008f77-8d32-4eab-9051-2c22ac2655b6\" (UID: \"27008f77-8d32-4eab-9051-2c22ac2655b6\") " Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.613109 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/27008f77-8d32-4eab-9051-2c22ac2655b6-dns-svc\") pod \"27008f77-8d32-4eab-9051-2c22ac2655b6\" (UID: \"27008f77-8d32-4eab-9051-2c22ac2655b6\") " Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.621655 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27008f77-8d32-4eab-9051-2c22ac2655b6-kube-api-access-sfv9g" (OuterVolumeSpecName: "kube-api-access-sfv9g") pod "27008f77-8d32-4eab-9051-2c22ac2655b6" (UID: "27008f77-8d32-4eab-9051-2c22ac2655b6"). InnerVolumeSpecName "kube-api-access-sfv9g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.663967 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27008f77-8d32-4eab-9051-2c22ac2655b6-config" (OuterVolumeSpecName: "config") pod "27008f77-8d32-4eab-9051-2c22ac2655b6" (UID: "27008f77-8d32-4eab-9051-2c22ac2655b6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:09 crc kubenswrapper[4933]: E0122 06:04:09.688119 4933 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.163:55596->38.102.83.163:45627: write tcp 38.102.83.163:55596->38.102.83.163:45627: write: broken pipe Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.713887 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27008f77-8d32-4eab-9051-2c22ac2655b6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "27008f77-8d32-4eab-9051-2c22ac2655b6" (UID: "27008f77-8d32-4eab-9051-2c22ac2655b6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.714906 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/27008f77-8d32-4eab-9051-2c22ac2655b6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.714941 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27008f77-8d32-4eab-9051-2c22ac2655b6-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.714951 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sfv9g\" (UniqueName: \"kubernetes.io/projected/27008f77-8d32-4eab-9051-2c22ac2655b6-kube-api-access-sfv9g\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.731511 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27008f77-8d32-4eab-9051-2c22ac2655b6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "27008f77-8d32-4eab-9051-2c22ac2655b6" (UID: "27008f77-8d32-4eab-9051-2c22ac2655b6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.795902 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.809989 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6cb545bd4c-r4w7m"] Jan 22 06:04:09 crc kubenswrapper[4933]: E0122 06:04:09.810330 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27008f77-8d32-4eab-9051-2c22ac2655b6" containerName="init" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.810348 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="27008f77-8d32-4eab-9051-2c22ac2655b6" containerName="init" Jan 22 06:04:09 crc kubenswrapper[4933]: E0122 06:04:09.810372 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27008f77-8d32-4eab-9051-2c22ac2655b6" containerName="dnsmasq-dns" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.810379 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="27008f77-8d32-4eab-9051-2c22ac2655b6" containerName="dnsmasq-dns" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.810525 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="27008f77-8d32-4eab-9051-2c22ac2655b6" containerName="dnsmasq-dns" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.811348 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.815999 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/27008f77-8d32-4eab-9051-2c22ac2655b6-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.818685 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cb545bd4c-r4w7m"] Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.917545 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-dns-svc\") pod \"dnsmasq-dns-6cb545bd4c-r4w7m\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.917661 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thbl5\" (UniqueName: \"kubernetes.io/projected/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-kube-api-access-thbl5\") pod \"dnsmasq-dns-6cb545bd4c-r4w7m\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.917866 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-config\") pod \"dnsmasq-dns-6cb545bd4c-r4w7m\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.917902 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-ovsdbserver-nb\") pod \"dnsmasq-dns-6cb545bd4c-r4w7m\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:04:09 crc kubenswrapper[4933]: I0122 06:04:09.918064 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-ovsdbserver-sb\") pod \"dnsmasq-dns-6cb545bd4c-r4w7m\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.012765 4933 generic.go:334] "Generic (PLEG): container finished" podID="27008f77-8d32-4eab-9051-2c22ac2655b6" containerID="dffb74aafb35e2991d47a704b2035461b8ce0a8b71356eec82ee84a839eb8778" exitCode=0 Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.012822 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-794868bd45-bxqnt" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.012878 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-794868bd45-bxqnt" event={"ID":"27008f77-8d32-4eab-9051-2c22ac2655b6","Type":"ContainerDied","Data":"dffb74aafb35e2991d47a704b2035461b8ce0a8b71356eec82ee84a839eb8778"} Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.012903 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-794868bd45-bxqnt" event={"ID":"27008f77-8d32-4eab-9051-2c22ac2655b6","Type":"ContainerDied","Data":"f858589a15698ad3f81fd5050d5828e03fcddff2bfbb1dccffd1770ea549f6e1"} Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.012919 4933 scope.go:117] "RemoveContainer" containerID="dffb74aafb35e2991d47a704b2035461b8ce0a8b71356eec82ee84a839eb8778" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.019224 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-config\") pod \"dnsmasq-dns-6cb545bd4c-r4w7m\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.019259 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-ovsdbserver-nb\") pod \"dnsmasq-dns-6cb545bd4c-r4w7m\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.019285 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-ovsdbserver-sb\") pod \"dnsmasq-dns-6cb545bd4c-r4w7m\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.019333 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-dns-svc\") pod \"dnsmasq-dns-6cb545bd4c-r4w7m\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.019362 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thbl5\" (UniqueName: \"kubernetes.io/projected/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-kube-api-access-thbl5\") pod \"dnsmasq-dns-6cb545bd4c-r4w7m\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.020174 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-config\") pod \"dnsmasq-dns-6cb545bd4c-r4w7m\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.020666 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-ovsdbserver-nb\") pod \"dnsmasq-dns-6cb545bd4c-r4w7m\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.021162 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-ovsdbserver-sb\") pod \"dnsmasq-dns-6cb545bd4c-r4w7m\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.021661 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-dns-svc\") pod \"dnsmasq-dns-6cb545bd4c-r4w7m\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.044337 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thbl5\" (UniqueName: \"kubernetes.io/projected/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-kube-api-access-thbl5\") pod \"dnsmasq-dns-6cb545bd4c-r4w7m\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.050411 4933 scope.go:117] "RemoveContainer" containerID="04920aa3341efb42fbb04bf075d825f992a3dc873995e1ba4c065a8f06ef201c" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.056399 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-794868bd45-bxqnt"] Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.061346 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-794868bd45-bxqnt"] Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.095916 4933 scope.go:117] "RemoveContainer" containerID="dffb74aafb35e2991d47a704b2035461b8ce0a8b71356eec82ee84a839eb8778" Jan 22 06:04:10 crc kubenswrapper[4933]: E0122 06:04:10.096309 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dffb74aafb35e2991d47a704b2035461b8ce0a8b71356eec82ee84a839eb8778\": container with ID starting with dffb74aafb35e2991d47a704b2035461b8ce0a8b71356eec82ee84a839eb8778 not found: ID does not exist" containerID="dffb74aafb35e2991d47a704b2035461b8ce0a8b71356eec82ee84a839eb8778" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.096349 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dffb74aafb35e2991d47a704b2035461b8ce0a8b71356eec82ee84a839eb8778"} err="failed to get container status \"dffb74aafb35e2991d47a704b2035461b8ce0a8b71356eec82ee84a839eb8778\": rpc error: code = NotFound desc = could not find container \"dffb74aafb35e2991d47a704b2035461b8ce0a8b71356eec82ee84a839eb8778\": container with ID starting with dffb74aafb35e2991d47a704b2035461b8ce0a8b71356eec82ee84a839eb8778 not found: ID does not exist" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.096375 4933 scope.go:117] "RemoveContainer" containerID="04920aa3341efb42fbb04bf075d825f992a3dc873995e1ba4c065a8f06ef201c" Jan 22 06:04:10 crc kubenswrapper[4933]: E0122 06:04:10.096830 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04920aa3341efb42fbb04bf075d825f992a3dc873995e1ba4c065a8f06ef201c\": container with ID starting with 04920aa3341efb42fbb04bf075d825f992a3dc873995e1ba4c065a8f06ef201c not found: ID does not exist" containerID="04920aa3341efb42fbb04bf075d825f992a3dc873995e1ba4c065a8f06ef201c" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.096860 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04920aa3341efb42fbb04bf075d825f992a3dc873995e1ba4c065a8f06ef201c"} err="failed to get container status \"04920aa3341efb42fbb04bf075d825f992a3dc873995e1ba4c065a8f06ef201c\": rpc error: code = NotFound desc = could not find container \"04920aa3341efb42fbb04bf075d825f992a3dc873995e1ba4c065a8f06ef201c\": container with ID starting with 04920aa3341efb42fbb04bf075d825f992a3dc873995e1ba4c065a8f06ef201c not found: ID does not exist" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.127613 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.140210 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.226805 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.535876 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27008f77-8d32-4eab-9051-2c22ac2655b6" path="/var/lib/kubelet/pods/27008f77-8d32-4eab-9051-2c22ac2655b6/volumes" Jan 22 06:04:10 crc kubenswrapper[4933]: I0122 06:04:10.623177 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cb545bd4c-r4w7m"] Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.000916 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.006503 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.011683 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.012270 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.012286 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-cz5lz" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.018204 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.022401 4933 generic.go:334] "Generic (PLEG): container finished" podID="a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852" containerID="dd212c2f54dda27e5adf5806d0bbfdd671706e39d841f72d2ebc94dbe62fba15" exitCode=0 Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.022489 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" event={"ID":"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852","Type":"ContainerDied","Data":"dd212c2f54dda27e5adf5806d0bbfdd671706e39d841f72d2ebc94dbe62fba15"} Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.022520 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" event={"ID":"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852","Type":"ContainerStarted","Data":"95de7478a6c34d7821ff8677a21d8dd635741436e44f393713453c39a5dd8a48"} Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.026360 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.146792 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.146844 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdl2r\" (UniqueName: \"kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-kube-api-access-wdl2r\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.147088 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-cache\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.147133 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-lock\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.147270 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.147303 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.248459 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-lock\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.248542 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.248562 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.248619 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.248642 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdl2r\" (UniqueName: \"kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-kube-api-access-wdl2r\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.248678 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-cache\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: E0122 06:04:11.248800 4933 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 22 06:04:11 crc kubenswrapper[4933]: E0122 06:04:11.248834 4933 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 22 06:04:11 crc kubenswrapper[4933]: E0122 06:04:11.248886 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift podName:4d7c7a06-59b1-4cc5-88dd-87bc9bccd016 nodeName:}" failed. No retries permitted until 2026-01-22 06:04:11.748865355 +0000 UTC m=+1099.585990718 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift") pod "swift-storage-0" (UID: "4d7c7a06-59b1-4cc5-88dd-87bc9bccd016") : configmap "swift-ring-files" not found Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.248986 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-lock\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.249048 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-cache\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.249451 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.260247 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.264901 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdl2r\" (UniqueName: \"kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-kube-api-access-wdl2r\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.278438 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: I0122 06:04:11.762191 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:11 crc kubenswrapper[4933]: E0122 06:04:11.762404 4933 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 22 06:04:11 crc kubenswrapper[4933]: E0122 06:04:11.762455 4933 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 22 06:04:11 crc kubenswrapper[4933]: E0122 06:04:11.762546 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift podName:4d7c7a06-59b1-4cc5-88dd-87bc9bccd016 nodeName:}" failed. No retries permitted until 2026-01-22 06:04:12.762513963 +0000 UTC m=+1100.599639346 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift") pod "swift-storage-0" (UID: "4d7c7a06-59b1-4cc5-88dd-87bc9bccd016") : configmap "swift-ring-files" not found Jan 22 06:04:12 crc kubenswrapper[4933]: I0122 06:04:12.034625 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" event={"ID":"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852","Type":"ContainerStarted","Data":"7392029f2f82ccea4bcf90f56fd558047283f0e7a8c30fc09dbe263b2c626149"} Jan 22 06:04:12 crc kubenswrapper[4933]: I0122 06:04:12.034756 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:04:12 crc kubenswrapper[4933]: I0122 06:04:12.054228 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" podStartSLOduration=3.054208101 podStartE2EDuration="3.054208101s" podCreationTimestamp="2026-01-22 06:04:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:04:12.053049742 +0000 UTC m=+1099.890175115" watchObservedRunningTime="2026-01-22 06:04:12.054208101 +0000 UTC m=+1099.891333454" Jan 22 06:04:12 crc kubenswrapper[4933]: I0122 06:04:12.301188 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 22 06:04:12 crc kubenswrapper[4933]: I0122 06:04:12.395385 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 22 06:04:12 crc kubenswrapper[4933]: I0122 06:04:12.780923 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:12 crc kubenswrapper[4933]: E0122 06:04:12.781284 4933 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 22 06:04:12 crc kubenswrapper[4933]: E0122 06:04:12.781327 4933 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 22 06:04:12 crc kubenswrapper[4933]: E0122 06:04:12.781421 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift podName:4d7c7a06-59b1-4cc5-88dd-87bc9bccd016 nodeName:}" failed. No retries permitted until 2026-01-22 06:04:14.781388659 +0000 UTC m=+1102.618514032 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift") pod "swift-storage-0" (UID: "4d7c7a06-59b1-4cc5-88dd-87bc9bccd016") : configmap "swift-ring-files" not found Jan 22 06:04:14 crc kubenswrapper[4933]: I0122 06:04:14.813555 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:14 crc kubenswrapper[4933]: E0122 06:04:14.813887 4933 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 22 06:04:14 crc kubenswrapper[4933]: E0122 06:04:14.814205 4933 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 22 06:04:14 crc kubenswrapper[4933]: E0122 06:04:14.814312 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift podName:4d7c7a06-59b1-4cc5-88dd-87bc9bccd016 nodeName:}" failed. No retries permitted until 2026-01-22 06:04:18.81427772 +0000 UTC m=+1106.651403113 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift") pod "swift-storage-0" (UID: "4d7c7a06-59b1-4cc5-88dd-87bc9bccd016") : configmap "swift-ring-files" not found Jan 22 06:04:14 crc kubenswrapper[4933]: I0122 06:04:14.952599 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-r4zfq"] Jan 22 06:04:14 crc kubenswrapper[4933]: I0122 06:04:14.953835 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:14 crc kubenswrapper[4933]: W0122 06:04:14.956214 4933 reflector.go:561] object-"openstack"/"swift-ring-scripts": failed to list *v1.ConfigMap: configmaps "swift-ring-scripts" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Jan 22 06:04:14 crc kubenswrapper[4933]: E0122 06:04:14.956266 4933 reflector.go:158] "Unhandled Error" err="object-\"openstack\"/\"swift-ring-scripts\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"swift-ring-scripts\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openstack\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 22 06:04:14 crc kubenswrapper[4933]: W0122 06:04:14.958194 4933 reflector.go:561] object-"openstack"/"swift-proxy-config-data": failed to list *v1.Secret: secrets "swift-proxy-config-data" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Jan 22 06:04:14 crc kubenswrapper[4933]: E0122 06:04:14.958238 4933 reflector.go:158] "Unhandled Error" err="object-\"openstack\"/\"swift-proxy-config-data\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"swift-proxy-config-data\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openstack\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 22 06:04:14 crc kubenswrapper[4933]: W0122 06:04:14.958203 4933 reflector.go:561] object-"openstack"/"swift-ring-config-data": failed to list *v1.ConfigMap: configmaps "swift-ring-config-data" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Jan 22 06:04:14 crc kubenswrapper[4933]: E0122 06:04:14.958270 4933 reflector.go:158] "Unhandled Error" err="object-\"openstack\"/\"swift-ring-config-data\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"swift-ring-config-data\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openstack\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 22 06:04:14 crc kubenswrapper[4933]: I0122 06:04:14.977104 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-r4zfq"] Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.015121 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-nz8bk"] Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.016106 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-nz8bk" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.017312 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/76f30c05-b9b8-4439-aab6-b2c7e948a75f-dispersionconf\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.017346 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/76f30c05-b9b8-4439-aab6-b2c7e948a75f-ring-data-devices\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.017375 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzk5x\" (UniqueName: \"kubernetes.io/projected/76f30c05-b9b8-4439-aab6-b2c7e948a75f-kube-api-access-vzk5x\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.017390 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76f30c05-b9b8-4439-aab6-b2c7e948a75f-scripts\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.017428 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/76f30c05-b9b8-4439-aab6-b2c7e948a75f-swiftconf\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.017453 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/76f30c05-b9b8-4439-aab6-b2c7e948a75f-etc-swift\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.017487 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76f30c05-b9b8-4439-aab6-b2c7e948a75f-combined-ca-bundle\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.021670 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.063925 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-nz8bk"] Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.119270 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vzk5x\" (UniqueName: \"kubernetes.io/projected/76f30c05-b9b8-4439-aab6-b2c7e948a75f-kube-api-access-vzk5x\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.119318 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76f30c05-b9b8-4439-aab6-b2c7e948a75f-scripts\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.119370 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/76f30c05-b9b8-4439-aab6-b2c7e948a75f-swiftconf\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.119400 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/76f30c05-b9b8-4439-aab6-b2c7e948a75f-etc-swift\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.119463 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76f30c05-b9b8-4439-aab6-b2c7e948a75f-combined-ca-bundle\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.119500 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9r8p4\" (UniqueName: \"kubernetes.io/projected/6d9ccac2-9b9e-4db1-b062-0ae165c0a0af-kube-api-access-9r8p4\") pod \"root-account-create-update-nz8bk\" (UID: \"6d9ccac2-9b9e-4db1-b062-0ae165c0a0af\") " pod="openstack/root-account-create-update-nz8bk" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.119540 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d9ccac2-9b9e-4db1-b062-0ae165c0a0af-operator-scripts\") pod \"root-account-create-update-nz8bk\" (UID: \"6d9ccac2-9b9e-4db1-b062-0ae165c0a0af\") " pod="openstack/root-account-create-update-nz8bk" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.119568 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/76f30c05-b9b8-4439-aab6-b2c7e948a75f-ring-data-devices\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.119585 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/76f30c05-b9b8-4439-aab6-b2c7e948a75f-dispersionconf\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.119912 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/76f30c05-b9b8-4439-aab6-b2c7e948a75f-etc-swift\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.127668 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76f30c05-b9b8-4439-aab6-b2c7e948a75f-combined-ca-bundle\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.132787 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vzk5x\" (UniqueName: \"kubernetes.io/projected/76f30c05-b9b8-4439-aab6-b2c7e948a75f-kube-api-access-vzk5x\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.133179 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/76f30c05-b9b8-4439-aab6-b2c7e948a75f-swiftconf\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.220362 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9r8p4\" (UniqueName: \"kubernetes.io/projected/6d9ccac2-9b9e-4db1-b062-0ae165c0a0af-kube-api-access-9r8p4\") pod \"root-account-create-update-nz8bk\" (UID: \"6d9ccac2-9b9e-4db1-b062-0ae165c0a0af\") " pod="openstack/root-account-create-update-nz8bk" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.220465 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d9ccac2-9b9e-4db1-b062-0ae165c0a0af-operator-scripts\") pod \"root-account-create-update-nz8bk\" (UID: \"6d9ccac2-9b9e-4db1-b062-0ae165c0a0af\") " pod="openstack/root-account-create-update-nz8bk" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.221282 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d9ccac2-9b9e-4db1-b062-0ae165c0a0af-operator-scripts\") pod \"root-account-create-update-nz8bk\" (UID: \"6d9ccac2-9b9e-4db1-b062-0ae165c0a0af\") " pod="openstack/root-account-create-update-nz8bk" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.235306 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9r8p4\" (UniqueName: \"kubernetes.io/projected/6d9ccac2-9b9e-4db1-b062-0ae165c0a0af-kube-api-access-9r8p4\") pod \"root-account-create-update-nz8bk\" (UID: \"6d9ccac2-9b9e-4db1-b062-0ae165c0a0af\") " pod="openstack/root-account-create-update-nz8bk" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.360670 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-nz8bk" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.866285 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.869839 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-nz8bk"] Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.875566 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/76f30c05-b9b8-4439-aab6-b2c7e948a75f-dispersionconf\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.919189 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 22 06:04:15 crc kubenswrapper[4933]: I0122 06:04:15.921763 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/76f30c05-b9b8-4439-aab6-b2c7e948a75f-ring-data-devices\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:16 crc kubenswrapper[4933]: I0122 06:04:16.077692 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-nz8bk" event={"ID":"6d9ccac2-9b9e-4db1-b062-0ae165c0a0af","Type":"ContainerStarted","Data":"468fc8e02b7f288a8bcc1dab89870a2b352773b266594ef342104d47cd3a421f"} Jan 22 06:04:16 crc kubenswrapper[4933]: E0122 06:04:16.119830 4933 configmap.go:193] Couldn't get configMap openstack/swift-ring-scripts: failed to sync configmap cache: timed out waiting for the condition Jan 22 06:04:16 crc kubenswrapper[4933]: E0122 06:04:16.119982 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/76f30c05-b9b8-4439-aab6-b2c7e948a75f-scripts podName:76f30c05-b9b8-4439-aab6-b2c7e948a75f nodeName:}" failed. No retries permitted until 2026-01-22 06:04:16.619931962 +0000 UTC m=+1104.457057345 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/configmap/76f30c05-b9b8-4439-aab6-b2c7e948a75f-scripts") pod "swift-ring-rebalance-r4zfq" (UID: "76f30c05-b9b8-4439-aab6-b2c7e948a75f") : failed to sync configmap cache: timed out waiting for the condition Jan 22 06:04:16 crc kubenswrapper[4933]: I0122 06:04:16.285427 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 22 06:04:16 crc kubenswrapper[4933]: I0122 06:04:16.646475 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76f30c05-b9b8-4439-aab6-b2c7e948a75f-scripts\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:16 crc kubenswrapper[4933]: I0122 06:04:16.647172 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76f30c05-b9b8-4439-aab6-b2c7e948a75f-scripts\") pod \"swift-ring-rebalance-r4zfq\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:16 crc kubenswrapper[4933]: I0122 06:04:16.786257 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.092184 4933 generic.go:334] "Generic (PLEG): container finished" podID="6d9ccac2-9b9e-4db1-b062-0ae165c0a0af" containerID="421e0d822af799084d1641f728bef335a9738d0d9d662fea4443a2ec830d3aa6" exitCode=0 Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.092523 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-nz8bk" event={"ID":"6d9ccac2-9b9e-4db1-b062-0ae165c0a0af","Type":"ContainerDied","Data":"421e0d822af799084d1641f728bef335a9738d0d9d662fea4443a2ec830d3aa6"} Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.356050 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-r4zfq"] Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.373688 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-7wf66"] Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.392217 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-7wf66" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.402858 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-7wf66"] Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.451638 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-dc32-account-create-update-nhclt"] Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.452966 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dc32-account-create-update-nhclt" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.456533 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.472799 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-dc32-account-create-update-nhclt"] Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.475818 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd2884cc-3d1a-4865-a232-16c459d0372d-operator-scripts\") pod \"keystone-db-create-7wf66\" (UID: \"bd2884cc-3d1a-4865-a232-16c459d0372d\") " pod="openstack/keystone-db-create-7wf66" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.475890 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cr87l\" (UniqueName: \"kubernetes.io/projected/bd2884cc-3d1a-4865-a232-16c459d0372d-kube-api-access-cr87l\") pod \"keystone-db-create-7wf66\" (UID: \"bd2884cc-3d1a-4865-a232-16c459d0372d\") " pod="openstack/keystone-db-create-7wf66" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.577621 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cr87l\" (UniqueName: \"kubernetes.io/projected/bd2884cc-3d1a-4865-a232-16c459d0372d-kube-api-access-cr87l\") pod \"keystone-db-create-7wf66\" (UID: \"bd2884cc-3d1a-4865-a232-16c459d0372d\") " pod="openstack/keystone-db-create-7wf66" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.577669 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqfh6\" (UniqueName: \"kubernetes.io/projected/eb88ea31-d557-48b6-82b9-7e3843f9935c-kube-api-access-tqfh6\") pod \"keystone-dc32-account-create-update-nhclt\" (UID: \"eb88ea31-d557-48b6-82b9-7e3843f9935c\") " pod="openstack/keystone-dc32-account-create-update-nhclt" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.578120 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb88ea31-d557-48b6-82b9-7e3843f9935c-operator-scripts\") pod \"keystone-dc32-account-create-update-nhclt\" (UID: \"eb88ea31-d557-48b6-82b9-7e3843f9935c\") " pod="openstack/keystone-dc32-account-create-update-nhclt" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.578211 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd2884cc-3d1a-4865-a232-16c459d0372d-operator-scripts\") pod \"keystone-db-create-7wf66\" (UID: \"bd2884cc-3d1a-4865-a232-16c459d0372d\") " pod="openstack/keystone-db-create-7wf66" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.578963 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd2884cc-3d1a-4865-a232-16c459d0372d-operator-scripts\") pod \"keystone-db-create-7wf66\" (UID: \"bd2884cc-3d1a-4865-a232-16c459d0372d\") " pod="openstack/keystone-db-create-7wf66" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.608589 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cr87l\" (UniqueName: \"kubernetes.io/projected/bd2884cc-3d1a-4865-a232-16c459d0372d-kube-api-access-cr87l\") pod \"keystone-db-create-7wf66\" (UID: \"bd2884cc-3d1a-4865-a232-16c459d0372d\") " pod="openstack/keystone-db-create-7wf66" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.641273 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-brkgn"] Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.642566 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-brkgn" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.650024 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-brkgn"] Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.679110 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqfh6\" (UniqueName: \"kubernetes.io/projected/eb88ea31-d557-48b6-82b9-7e3843f9935c-kube-api-access-tqfh6\") pod \"keystone-dc32-account-create-update-nhclt\" (UID: \"eb88ea31-d557-48b6-82b9-7e3843f9935c\") " pod="openstack/keystone-dc32-account-create-update-nhclt" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.679242 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb88ea31-d557-48b6-82b9-7e3843f9935c-operator-scripts\") pod \"keystone-dc32-account-create-update-nhclt\" (UID: \"eb88ea31-d557-48b6-82b9-7e3843f9935c\") " pod="openstack/keystone-dc32-account-create-update-nhclt" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.679995 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb88ea31-d557-48b6-82b9-7e3843f9935c-operator-scripts\") pod \"keystone-dc32-account-create-update-nhclt\" (UID: \"eb88ea31-d557-48b6-82b9-7e3843f9935c\") " pod="openstack/keystone-dc32-account-create-update-nhclt" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.694326 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqfh6\" (UniqueName: \"kubernetes.io/projected/eb88ea31-d557-48b6-82b9-7e3843f9935c-kube-api-access-tqfh6\") pod \"keystone-dc32-account-create-update-nhclt\" (UID: \"eb88ea31-d557-48b6-82b9-7e3843f9935c\") " pod="openstack/keystone-dc32-account-create-update-nhclt" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.739674 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-7wf66" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.749207 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-9002-account-create-update-l62qv"] Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.750392 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9002-account-create-update-l62qv" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.751955 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.763354 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-9002-account-create-update-l62qv"] Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.770800 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dc32-account-create-update-nhclt" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.781843 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10081d7b-4a85-4d4a-8b06-d12d59eac231-operator-scripts\") pod \"placement-db-create-brkgn\" (UID: \"10081d7b-4a85-4d4a-8b06-d12d59eac231\") " pod="openstack/placement-db-create-brkgn" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.781965 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hq7v4\" (UniqueName: \"kubernetes.io/projected/10081d7b-4a85-4d4a-8b06-d12d59eac231-kube-api-access-hq7v4\") pod \"placement-db-create-brkgn\" (UID: \"10081d7b-4a85-4d4a-8b06-d12d59eac231\") " pod="openstack/placement-db-create-brkgn" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.878630 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-2tsdt"] Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.880215 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2tsdt" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.883328 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hq7v4\" (UniqueName: \"kubernetes.io/projected/10081d7b-4a85-4d4a-8b06-d12d59eac231-kube-api-access-hq7v4\") pod \"placement-db-create-brkgn\" (UID: \"10081d7b-4a85-4d4a-8b06-d12d59eac231\") " pod="openstack/placement-db-create-brkgn" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.883448 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10081d7b-4a85-4d4a-8b06-d12d59eac231-operator-scripts\") pod \"placement-db-create-brkgn\" (UID: \"10081d7b-4a85-4d4a-8b06-d12d59eac231\") " pod="openstack/placement-db-create-brkgn" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.883473 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hn2l6\" (UniqueName: \"kubernetes.io/projected/6242cc75-a86f-488b-bf60-47cc855a330c-kube-api-access-hn2l6\") pod \"placement-9002-account-create-update-l62qv\" (UID: \"6242cc75-a86f-488b-bf60-47cc855a330c\") " pod="openstack/placement-9002-account-create-update-l62qv" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.883514 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6242cc75-a86f-488b-bf60-47cc855a330c-operator-scripts\") pod \"placement-9002-account-create-update-l62qv\" (UID: \"6242cc75-a86f-488b-bf60-47cc855a330c\") " pod="openstack/placement-9002-account-create-update-l62qv" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.884879 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10081d7b-4a85-4d4a-8b06-d12d59eac231-operator-scripts\") pod \"placement-db-create-brkgn\" (UID: \"10081d7b-4a85-4d4a-8b06-d12d59eac231\") " pod="openstack/placement-db-create-brkgn" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.892982 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-2tsdt"] Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.905188 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hq7v4\" (UniqueName: \"kubernetes.io/projected/10081d7b-4a85-4d4a-8b06-d12d59eac231-kube-api-access-hq7v4\") pod \"placement-db-create-brkgn\" (UID: \"10081d7b-4a85-4d4a-8b06-d12d59eac231\") " pod="openstack/placement-db-create-brkgn" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.970877 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-f2b9-account-create-update-rhljf"] Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.972037 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-f2b9-account-create-update-rhljf" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.976425 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.978322 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-f2b9-account-create-update-rhljf"] Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.985124 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/878b8e95-58ae-4e22-82c2-14e2d85a230c-operator-scripts\") pod \"glance-db-create-2tsdt\" (UID: \"878b8e95-58ae-4e22-82c2-14e2d85a230c\") " pod="openstack/glance-db-create-2tsdt" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.985217 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hn2l6\" (UniqueName: \"kubernetes.io/projected/6242cc75-a86f-488b-bf60-47cc855a330c-kube-api-access-hn2l6\") pod \"placement-9002-account-create-update-l62qv\" (UID: \"6242cc75-a86f-488b-bf60-47cc855a330c\") " pod="openstack/placement-9002-account-create-update-l62qv" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.985584 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-brkgn" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.986040 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6242cc75-a86f-488b-bf60-47cc855a330c-operator-scripts\") pod \"placement-9002-account-create-update-l62qv\" (UID: \"6242cc75-a86f-488b-bf60-47cc855a330c\") " pod="openstack/placement-9002-account-create-update-l62qv" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.986166 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzpf2\" (UniqueName: \"kubernetes.io/projected/878b8e95-58ae-4e22-82c2-14e2d85a230c-kube-api-access-kzpf2\") pod \"glance-db-create-2tsdt\" (UID: \"878b8e95-58ae-4e22-82c2-14e2d85a230c\") " pod="openstack/glance-db-create-2tsdt" Jan 22 06:04:17 crc kubenswrapper[4933]: I0122 06:04:17.988401 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6242cc75-a86f-488b-bf60-47cc855a330c-operator-scripts\") pod \"placement-9002-account-create-update-l62qv\" (UID: \"6242cc75-a86f-488b-bf60-47cc855a330c\") " pod="openstack/placement-9002-account-create-update-l62qv" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.003681 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hn2l6\" (UniqueName: \"kubernetes.io/projected/6242cc75-a86f-488b-bf60-47cc855a330c-kube-api-access-hn2l6\") pod \"placement-9002-account-create-update-l62qv\" (UID: \"6242cc75-a86f-488b-bf60-47cc855a330c\") " pod="openstack/placement-9002-account-create-update-l62qv" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.087912 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/878b8e95-58ae-4e22-82c2-14e2d85a230c-operator-scripts\") pod \"glance-db-create-2tsdt\" (UID: \"878b8e95-58ae-4e22-82c2-14e2d85a230c\") " pod="openstack/glance-db-create-2tsdt" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.087982 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpnjv\" (UniqueName: \"kubernetes.io/projected/b9516c75-f573-46d8-b9b1-036db5eee52f-kube-api-access-qpnjv\") pod \"glance-f2b9-account-create-update-rhljf\" (UID: \"b9516c75-f573-46d8-b9b1-036db5eee52f\") " pod="openstack/glance-f2b9-account-create-update-rhljf" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.088102 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9516c75-f573-46d8-b9b1-036db5eee52f-operator-scripts\") pod \"glance-f2b9-account-create-update-rhljf\" (UID: \"b9516c75-f573-46d8-b9b1-036db5eee52f\") " pod="openstack/glance-f2b9-account-create-update-rhljf" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.088180 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzpf2\" (UniqueName: \"kubernetes.io/projected/878b8e95-58ae-4e22-82c2-14e2d85a230c-kube-api-access-kzpf2\") pod \"glance-db-create-2tsdt\" (UID: \"878b8e95-58ae-4e22-82c2-14e2d85a230c\") " pod="openstack/glance-db-create-2tsdt" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.089292 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/878b8e95-58ae-4e22-82c2-14e2d85a230c-operator-scripts\") pod \"glance-db-create-2tsdt\" (UID: \"878b8e95-58ae-4e22-82c2-14e2d85a230c\") " pod="openstack/glance-db-create-2tsdt" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.104091 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzpf2\" (UniqueName: \"kubernetes.io/projected/878b8e95-58ae-4e22-82c2-14e2d85a230c-kube-api-access-kzpf2\") pod \"glance-db-create-2tsdt\" (UID: \"878b8e95-58ae-4e22-82c2-14e2d85a230c\") " pod="openstack/glance-db-create-2tsdt" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.105885 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-r4zfq" event={"ID":"76f30c05-b9b8-4439-aab6-b2c7e948a75f","Type":"ContainerStarted","Data":"072e256824ac03cfa7a9c1168eeda1a49c5b4cafdbcf2b56c80005d7c3b9e637"} Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.140896 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9002-account-create-update-l62qv" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.199455 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9516c75-f573-46d8-b9b1-036db5eee52f-operator-scripts\") pod \"glance-f2b9-account-create-update-rhljf\" (UID: \"b9516c75-f573-46d8-b9b1-036db5eee52f\") " pod="openstack/glance-f2b9-account-create-update-rhljf" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.199571 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpnjv\" (UniqueName: \"kubernetes.io/projected/b9516c75-f573-46d8-b9b1-036db5eee52f-kube-api-access-qpnjv\") pod \"glance-f2b9-account-create-update-rhljf\" (UID: \"b9516c75-f573-46d8-b9b1-036db5eee52f\") " pod="openstack/glance-f2b9-account-create-update-rhljf" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.201750 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9516c75-f573-46d8-b9b1-036db5eee52f-operator-scripts\") pod \"glance-f2b9-account-create-update-rhljf\" (UID: \"b9516c75-f573-46d8-b9b1-036db5eee52f\") " pod="openstack/glance-f2b9-account-create-update-rhljf" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.207008 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2tsdt" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.218405 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpnjv\" (UniqueName: \"kubernetes.io/projected/b9516c75-f573-46d8-b9b1-036db5eee52f-kube-api-access-qpnjv\") pod \"glance-f2b9-account-create-update-rhljf\" (UID: \"b9516c75-f573-46d8-b9b1-036db5eee52f\") " pod="openstack/glance-f2b9-account-create-update-rhljf" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.228395 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-7wf66"] Jan 22 06:04:18 crc kubenswrapper[4933]: W0122 06:04:18.244888 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd2884cc_3d1a_4865_a232_16c459d0372d.slice/crio-cd116f7c0e392200166f7fda4795926c037b84845ab3bf1f3e30719894ff91aa WatchSource:0}: Error finding container cd116f7c0e392200166f7fda4795926c037b84845ab3bf1f3e30719894ff91aa: Status 404 returned error can't find the container with id cd116f7c0e392200166f7fda4795926c037b84845ab3bf1f3e30719894ff91aa Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.291654 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-f2b9-account-create-update-rhljf" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.360677 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-dc32-account-create-update-nhclt"] Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.576369 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-brkgn"] Jan 22 06:04:18 crc kubenswrapper[4933]: W0122 06:04:18.674661 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod10081d7b_4a85_4d4a_8b06_d12d59eac231.slice/crio-a7bb710dfe0b5c8dc16c8792bbddd130f5784748aa9dd67af8950645f50aa97a WatchSource:0}: Error finding container a7bb710dfe0b5c8dc16c8792bbddd130f5784748aa9dd67af8950645f50aa97a: Status 404 returned error can't find the container with id a7bb710dfe0b5c8dc16c8792bbddd130f5784748aa9dd67af8950645f50aa97a Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.763595 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-nz8bk" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.835400 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d9ccac2-9b9e-4db1-b062-0ae165c0a0af-operator-scripts\") pod \"6d9ccac2-9b9e-4db1-b062-0ae165c0a0af\" (UID: \"6d9ccac2-9b9e-4db1-b062-0ae165c0a0af\") " Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.836357 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9r8p4\" (UniqueName: \"kubernetes.io/projected/6d9ccac2-9b9e-4db1-b062-0ae165c0a0af-kube-api-access-9r8p4\") pod \"6d9ccac2-9b9e-4db1-b062-0ae165c0a0af\" (UID: \"6d9ccac2-9b9e-4db1-b062-0ae165c0a0af\") " Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.836859 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d9ccac2-9b9e-4db1-b062-0ae165c0a0af-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6d9ccac2-9b9e-4db1-b062-0ae165c0a0af" (UID: "6d9ccac2-9b9e-4db1-b062-0ae165c0a0af"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.839016 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:18 crc kubenswrapper[4933]: E0122 06:04:18.839301 4933 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 22 06:04:18 crc kubenswrapper[4933]: E0122 06:04:18.839339 4933 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 22 06:04:18 crc kubenswrapper[4933]: E0122 06:04:18.839429 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift podName:4d7c7a06-59b1-4cc5-88dd-87bc9bccd016 nodeName:}" failed. No retries permitted until 2026-01-22 06:04:26.83940347 +0000 UTC m=+1114.676528823 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift") pod "swift-storage-0" (UID: "4d7c7a06-59b1-4cc5-88dd-87bc9bccd016") : configmap "swift-ring-files" not found Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.839533 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d9ccac2-9b9e-4db1-b062-0ae165c0a0af-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.842690 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d9ccac2-9b9e-4db1-b062-0ae165c0a0af-kube-api-access-9r8p4" (OuterVolumeSpecName: "kube-api-access-9r8p4") pod "6d9ccac2-9b9e-4db1-b062-0ae165c0a0af" (UID: "6d9ccac2-9b9e-4db1-b062-0ae165c0a0af"). InnerVolumeSpecName "kube-api-access-9r8p4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.843631 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-9002-account-create-update-l62qv"] Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.943791 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9r8p4\" (UniqueName: \"kubernetes.io/projected/6d9ccac2-9b9e-4db1-b062-0ae165c0a0af-kube-api-access-9r8p4\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.955180 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-f2b9-account-create-update-rhljf"] Jan 22 06:04:18 crc kubenswrapper[4933]: I0122 06:04:18.964859 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-2tsdt"] Jan 22 06:04:18 crc kubenswrapper[4933]: W0122 06:04:18.969657 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod878b8e95_58ae_4e22_82c2_14e2d85a230c.slice/crio-1eddcf004a58a9d1f029f4c7cb7bd13cc482d06ef274bf6baac6091d90ea86ea WatchSource:0}: Error finding container 1eddcf004a58a9d1f029f4c7cb7bd13cc482d06ef274bf6baac6091d90ea86ea: Status 404 returned error can't find the container with id 1eddcf004a58a9d1f029f4c7cb7bd13cc482d06ef274bf6baac6091d90ea86ea Jan 22 06:04:18 crc kubenswrapper[4933]: W0122 06:04:18.972867 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9516c75_f573_46d8_b9b1_036db5eee52f.slice/crio-81016717fca80d8d742500695ef94aca802ed529dcf9bd7046348c9b9f1e2c61 WatchSource:0}: Error finding container 81016717fca80d8d742500695ef94aca802ed529dcf9bd7046348c9b9f1e2c61: Status 404 returned error can't find the container with id 81016717fca80d8d742500695ef94aca802ed529dcf9bd7046348c9b9f1e2c61 Jan 22 06:04:19 crc kubenswrapper[4933]: I0122 06:04:19.112779 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2tsdt" event={"ID":"878b8e95-58ae-4e22-82c2-14e2d85a230c","Type":"ContainerStarted","Data":"1eddcf004a58a9d1f029f4c7cb7bd13cc482d06ef274bf6baac6091d90ea86ea"} Jan 22 06:04:19 crc kubenswrapper[4933]: I0122 06:04:19.115283 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-dc32-account-create-update-nhclt" event={"ID":"eb88ea31-d557-48b6-82b9-7e3843f9935c","Type":"ContainerStarted","Data":"283201d0dac347556f10fda9e200794264538dec6a45a3058176fa979f2100c1"} Jan 22 06:04:19 crc kubenswrapper[4933]: I0122 06:04:19.116290 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-brkgn" event={"ID":"10081d7b-4a85-4d4a-8b06-d12d59eac231","Type":"ContainerStarted","Data":"a7bb710dfe0b5c8dc16c8792bbddd130f5784748aa9dd67af8950645f50aa97a"} Jan 22 06:04:19 crc kubenswrapper[4933]: I0122 06:04:19.117819 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-nz8bk" event={"ID":"6d9ccac2-9b9e-4db1-b062-0ae165c0a0af","Type":"ContainerDied","Data":"468fc8e02b7f288a8bcc1dab89870a2b352773b266594ef342104d47cd3a421f"} Jan 22 06:04:19 crc kubenswrapper[4933]: I0122 06:04:19.117839 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-nz8bk" Jan 22 06:04:19 crc kubenswrapper[4933]: I0122 06:04:19.117845 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="468fc8e02b7f288a8bcc1dab89870a2b352773b266594ef342104d47cd3a421f" Jan 22 06:04:19 crc kubenswrapper[4933]: I0122 06:04:19.119790 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9002-account-create-update-l62qv" event={"ID":"6242cc75-a86f-488b-bf60-47cc855a330c","Type":"ContainerStarted","Data":"09abebda098214235f4824c1b9d51a0096782ceb3cf4cc1644c735bc4eb24c5d"} Jan 22 06:04:19 crc kubenswrapper[4933]: I0122 06:04:19.120870 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-f2b9-account-create-update-rhljf" event={"ID":"b9516c75-f573-46d8-b9b1-036db5eee52f","Type":"ContainerStarted","Data":"81016717fca80d8d742500695ef94aca802ed529dcf9bd7046348c9b9f1e2c61"} Jan 22 06:04:19 crc kubenswrapper[4933]: I0122 06:04:19.121694 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-7wf66" event={"ID":"bd2884cc-3d1a-4865-a232-16c459d0372d","Type":"ContainerStarted","Data":"cd116f7c0e392200166f7fda4795926c037b84845ab3bf1f3e30719894ff91aa"} Jan 22 06:04:20 crc kubenswrapper[4933]: I0122 06:04:20.131114 4933 generic.go:334] "Generic (PLEG): container finished" podID="eb88ea31-d557-48b6-82b9-7e3843f9935c" containerID="54f1e8e12f4686cbd8996c3d96c7684dac1c79b18f44175f59423edaa54dd578" exitCode=0 Jan 22 06:04:20 crc kubenswrapper[4933]: I0122 06:04:20.131316 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-dc32-account-create-update-nhclt" event={"ID":"eb88ea31-d557-48b6-82b9-7e3843f9935c","Type":"ContainerDied","Data":"54f1e8e12f4686cbd8996c3d96c7684dac1c79b18f44175f59423edaa54dd578"} Jan 22 06:04:20 crc kubenswrapper[4933]: I0122 06:04:20.133020 4933 generic.go:334] "Generic (PLEG): container finished" podID="10081d7b-4a85-4d4a-8b06-d12d59eac231" containerID="705920cc0848428f66fda9619ac71f2c3ab6f952b4da897d8d49e095b329f07e" exitCode=0 Jan 22 06:04:20 crc kubenswrapper[4933]: I0122 06:04:20.133168 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-brkgn" event={"ID":"10081d7b-4a85-4d4a-8b06-d12d59eac231","Type":"ContainerDied","Data":"705920cc0848428f66fda9619ac71f2c3ab6f952b4da897d8d49e095b329f07e"} Jan 22 06:04:20 crc kubenswrapper[4933]: I0122 06:04:20.135183 4933 generic.go:334] "Generic (PLEG): container finished" podID="6242cc75-a86f-488b-bf60-47cc855a330c" containerID="44f70ca56a2c6d50df7e87607b5f575a8b422a9efb31712590aa445609e19ded" exitCode=0 Jan 22 06:04:20 crc kubenswrapper[4933]: I0122 06:04:20.135231 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9002-account-create-update-l62qv" event={"ID":"6242cc75-a86f-488b-bf60-47cc855a330c","Type":"ContainerDied","Data":"44f70ca56a2c6d50df7e87607b5f575a8b422a9efb31712590aa445609e19ded"} Jan 22 06:04:20 crc kubenswrapper[4933]: I0122 06:04:20.137208 4933 generic.go:334] "Generic (PLEG): container finished" podID="b9516c75-f573-46d8-b9b1-036db5eee52f" containerID="753b48659cd3b925e9189ae0fd8ddc7e983a3dbd19ced7ee6277a2f14332223f" exitCode=0 Jan 22 06:04:20 crc kubenswrapper[4933]: I0122 06:04:20.137269 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-f2b9-account-create-update-rhljf" event={"ID":"b9516c75-f573-46d8-b9b1-036db5eee52f","Type":"ContainerDied","Data":"753b48659cd3b925e9189ae0fd8ddc7e983a3dbd19ced7ee6277a2f14332223f"} Jan 22 06:04:20 crc kubenswrapper[4933]: I0122 06:04:20.138917 4933 generic.go:334] "Generic (PLEG): container finished" podID="bd2884cc-3d1a-4865-a232-16c459d0372d" containerID="2f98e446bcaa25591ece8baecf26888aeebdd5837b713037e026429681829807" exitCode=0 Jan 22 06:04:20 crc kubenswrapper[4933]: I0122 06:04:20.138973 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-7wf66" event={"ID":"bd2884cc-3d1a-4865-a232-16c459d0372d","Type":"ContainerDied","Data":"2f98e446bcaa25591ece8baecf26888aeebdd5837b713037e026429681829807"} Jan 22 06:04:20 crc kubenswrapper[4933]: I0122 06:04:20.140134 4933 generic.go:334] "Generic (PLEG): container finished" podID="878b8e95-58ae-4e22-82c2-14e2d85a230c" containerID="03c7665e47c0f6f18ba8efbeca0e9ebdf6cae6b198b21b1db681e429a4a20130" exitCode=0 Jan 22 06:04:20 crc kubenswrapper[4933]: I0122 06:04:20.140176 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2tsdt" event={"ID":"878b8e95-58ae-4e22-82c2-14e2d85a230c","Type":"ContainerDied","Data":"03c7665e47c0f6f18ba8efbeca0e9ebdf6cae6b198b21b1db681e429a4a20130"} Jan 22 06:04:20 crc kubenswrapper[4933]: I0122 06:04:20.144430 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:04:20 crc kubenswrapper[4933]: I0122 06:04:20.252435 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-757dc6fff9-gl29p"] Jan 22 06:04:20 crc kubenswrapper[4933]: I0122 06:04:20.252751 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" podUID="7e17f8d0-6f1d-4831-bb0e-7a8f673a080d" containerName="dnsmasq-dns" containerID="cri-o://2d7dcc55c52f0332979086d141c4ed233b35909cc999cb2c04861c8692cc503f" gracePeriod=10 Jan 22 06:04:21 crc kubenswrapper[4933]: I0122 06:04:21.153202 4933 generic.go:334] "Generic (PLEG): container finished" podID="7e17f8d0-6f1d-4831-bb0e-7a8f673a080d" containerID="2d7dcc55c52f0332979086d141c4ed233b35909cc999cb2c04861c8692cc503f" exitCode=0 Jan 22 06:04:21 crc kubenswrapper[4933]: I0122 06:04:21.154034 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" event={"ID":"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d","Type":"ContainerDied","Data":"2d7dcc55c52f0332979086d141c4ed233b35909cc999cb2c04861c8692cc503f"} Jan 22 06:04:21 crc kubenswrapper[4933]: I0122 06:04:21.188439 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-nz8bk"] Jan 22 06:04:21 crc kubenswrapper[4933]: I0122 06:04:21.195825 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-nz8bk"] Jan 22 06:04:21 crc kubenswrapper[4933]: I0122 06:04:21.594755 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.181143 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-f2b9-account-create-update-rhljf" event={"ID":"b9516c75-f573-46d8-b9b1-036db5eee52f","Type":"ContainerDied","Data":"81016717fca80d8d742500695ef94aca802ed529dcf9bd7046348c9b9f1e2c61"} Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.181427 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="81016717fca80d8d742500695ef94aca802ed529dcf9bd7046348c9b9f1e2c61" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.182868 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-7wf66" event={"ID":"bd2884cc-3d1a-4865-a232-16c459d0372d","Type":"ContainerDied","Data":"cd116f7c0e392200166f7fda4795926c037b84845ab3bf1f3e30719894ff91aa"} Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.182890 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cd116f7c0e392200166f7fda4795926c037b84845ab3bf1f3e30719894ff91aa" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.184783 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2tsdt" event={"ID":"878b8e95-58ae-4e22-82c2-14e2d85a230c","Type":"ContainerDied","Data":"1eddcf004a58a9d1f029f4c7cb7bd13cc482d06ef274bf6baac6091d90ea86ea"} Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.184800 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1eddcf004a58a9d1f029f4c7cb7bd13cc482d06ef274bf6baac6091d90ea86ea" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.185737 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-dc32-account-create-update-nhclt" event={"ID":"eb88ea31-d557-48b6-82b9-7e3843f9935c","Type":"ContainerDied","Data":"283201d0dac347556f10fda9e200794264538dec6a45a3058176fa979f2100c1"} Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.185753 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="283201d0dac347556f10fda9e200794264538dec6a45a3058176fa979f2100c1" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.186733 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-brkgn" event={"ID":"10081d7b-4a85-4d4a-8b06-d12d59eac231","Type":"ContainerDied","Data":"a7bb710dfe0b5c8dc16c8792bbddd130f5784748aa9dd67af8950645f50aa97a"} Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.186748 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a7bb710dfe0b5c8dc16c8792bbddd130f5784748aa9dd67af8950645f50aa97a" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.187680 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9002-account-create-update-l62qv" event={"ID":"6242cc75-a86f-488b-bf60-47cc855a330c","Type":"ContainerDied","Data":"09abebda098214235f4824c1b9d51a0096782ceb3cf4cc1644c735bc4eb24c5d"} Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.187700 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="09abebda098214235f4824c1b9d51a0096782ceb3cf4cc1644c735bc4eb24c5d" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.365765 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dc32-account-create-update-nhclt" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.391617 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-brkgn" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.401860 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-7wf66" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.426133 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-f2b9-account-create-update-rhljf" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.430984 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2tsdt" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.447445 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9002-account-create-update-l62qv" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.457547 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.503447 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d9ccac2-9b9e-4db1-b062-0ae165c0a0af" path="/var/lib/kubelet/pods/6d9ccac2-9b9e-4db1-b062-0ae165c0a0af/volumes" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.533702 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hn2l6\" (UniqueName: \"kubernetes.io/projected/6242cc75-a86f-488b-bf60-47cc855a330c-kube-api-access-hn2l6\") pod \"6242cc75-a86f-488b-bf60-47cc855a330c\" (UID: \"6242cc75-a86f-488b-bf60-47cc855a330c\") " Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.533768 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hq7v4\" (UniqueName: \"kubernetes.io/projected/10081d7b-4a85-4d4a-8b06-d12d59eac231-kube-api-access-hq7v4\") pod \"10081d7b-4a85-4d4a-8b06-d12d59eac231\" (UID: \"10081d7b-4a85-4d4a-8b06-d12d59eac231\") " Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.533833 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10081d7b-4a85-4d4a-8b06-d12d59eac231-operator-scripts\") pod \"10081d7b-4a85-4d4a-8b06-d12d59eac231\" (UID: \"10081d7b-4a85-4d4a-8b06-d12d59eac231\") " Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.533870 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/878b8e95-58ae-4e22-82c2-14e2d85a230c-operator-scripts\") pod \"878b8e95-58ae-4e22-82c2-14e2d85a230c\" (UID: \"878b8e95-58ae-4e22-82c2-14e2d85a230c\") " Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.533925 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cr87l\" (UniqueName: \"kubernetes.io/projected/bd2884cc-3d1a-4865-a232-16c459d0372d-kube-api-access-cr87l\") pod \"bd2884cc-3d1a-4865-a232-16c459d0372d\" (UID: \"bd2884cc-3d1a-4865-a232-16c459d0372d\") " Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.533954 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9516c75-f573-46d8-b9b1-036db5eee52f-operator-scripts\") pod \"b9516c75-f573-46d8-b9b1-036db5eee52f\" (UID: \"b9516c75-f573-46d8-b9b1-036db5eee52f\") " Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.534000 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tqfh6\" (UniqueName: \"kubernetes.io/projected/eb88ea31-d557-48b6-82b9-7e3843f9935c-kube-api-access-tqfh6\") pod \"eb88ea31-d557-48b6-82b9-7e3843f9935c\" (UID: \"eb88ea31-d557-48b6-82b9-7e3843f9935c\") " Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.534029 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb88ea31-d557-48b6-82b9-7e3843f9935c-operator-scripts\") pod \"eb88ea31-d557-48b6-82b9-7e3843f9935c\" (UID: \"eb88ea31-d557-48b6-82b9-7e3843f9935c\") " Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.534064 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kzpf2\" (UniqueName: \"kubernetes.io/projected/878b8e95-58ae-4e22-82c2-14e2d85a230c-kube-api-access-kzpf2\") pod \"878b8e95-58ae-4e22-82c2-14e2d85a230c\" (UID: \"878b8e95-58ae-4e22-82c2-14e2d85a230c\") " Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.534128 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qpnjv\" (UniqueName: \"kubernetes.io/projected/b9516c75-f573-46d8-b9b1-036db5eee52f-kube-api-access-qpnjv\") pod \"b9516c75-f573-46d8-b9b1-036db5eee52f\" (UID: \"b9516c75-f573-46d8-b9b1-036db5eee52f\") " Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.534200 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd2884cc-3d1a-4865-a232-16c459d0372d-operator-scripts\") pod \"bd2884cc-3d1a-4865-a232-16c459d0372d\" (UID: \"bd2884cc-3d1a-4865-a232-16c459d0372d\") " Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.534247 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6242cc75-a86f-488b-bf60-47cc855a330c-operator-scripts\") pod \"6242cc75-a86f-488b-bf60-47cc855a330c\" (UID: \"6242cc75-a86f-488b-bf60-47cc855a330c\") " Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.534964 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9516c75-f573-46d8-b9b1-036db5eee52f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b9516c75-f573-46d8-b9b1-036db5eee52f" (UID: "b9516c75-f573-46d8-b9b1-036db5eee52f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.535151 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6242cc75-a86f-488b-bf60-47cc855a330c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6242cc75-a86f-488b-bf60-47cc855a330c" (UID: "6242cc75-a86f-488b-bf60-47cc855a330c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.535363 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb88ea31-d557-48b6-82b9-7e3843f9935c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "eb88ea31-d557-48b6-82b9-7e3843f9935c" (UID: "eb88ea31-d557-48b6-82b9-7e3843f9935c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.535699 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd2884cc-3d1a-4865-a232-16c459d0372d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bd2884cc-3d1a-4865-a232-16c459d0372d" (UID: "bd2884cc-3d1a-4865-a232-16c459d0372d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.536966 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/878b8e95-58ae-4e22-82c2-14e2d85a230c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "878b8e95-58ae-4e22-82c2-14e2d85a230c" (UID: "878b8e95-58ae-4e22-82c2-14e2d85a230c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.538216 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10081d7b-4a85-4d4a-8b06-d12d59eac231-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "10081d7b-4a85-4d4a-8b06-d12d59eac231" (UID: "10081d7b-4a85-4d4a-8b06-d12d59eac231"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.540208 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb88ea31-d557-48b6-82b9-7e3843f9935c-kube-api-access-tqfh6" (OuterVolumeSpecName: "kube-api-access-tqfh6") pod "eb88ea31-d557-48b6-82b9-7e3843f9935c" (UID: "eb88ea31-d557-48b6-82b9-7e3843f9935c"). InnerVolumeSpecName "kube-api-access-tqfh6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.540574 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/878b8e95-58ae-4e22-82c2-14e2d85a230c-kube-api-access-kzpf2" (OuterVolumeSpecName: "kube-api-access-kzpf2") pod "878b8e95-58ae-4e22-82c2-14e2d85a230c" (UID: "878b8e95-58ae-4e22-82c2-14e2d85a230c"). InnerVolumeSpecName "kube-api-access-kzpf2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.540755 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd2884cc-3d1a-4865-a232-16c459d0372d-kube-api-access-cr87l" (OuterVolumeSpecName: "kube-api-access-cr87l") pod "bd2884cc-3d1a-4865-a232-16c459d0372d" (UID: "bd2884cc-3d1a-4865-a232-16c459d0372d"). InnerVolumeSpecName "kube-api-access-cr87l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.541445 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6242cc75-a86f-488b-bf60-47cc855a330c-kube-api-access-hn2l6" (OuterVolumeSpecName: "kube-api-access-hn2l6") pod "6242cc75-a86f-488b-bf60-47cc855a330c" (UID: "6242cc75-a86f-488b-bf60-47cc855a330c"). InnerVolumeSpecName "kube-api-access-hn2l6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.541984 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10081d7b-4a85-4d4a-8b06-d12d59eac231-kube-api-access-hq7v4" (OuterVolumeSpecName: "kube-api-access-hq7v4") pod "10081d7b-4a85-4d4a-8b06-d12d59eac231" (UID: "10081d7b-4a85-4d4a-8b06-d12d59eac231"). InnerVolumeSpecName "kube-api-access-hq7v4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.545031 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9516c75-f573-46d8-b9b1-036db5eee52f-kube-api-access-qpnjv" (OuterVolumeSpecName: "kube-api-access-qpnjv") pod "b9516c75-f573-46d8-b9b1-036db5eee52f" (UID: "b9516c75-f573-46d8-b9b1-036db5eee52f"). InnerVolumeSpecName "kube-api-access-qpnjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.636298 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-config\") pod \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.640048 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-dns-svc\") pod \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.640372 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-ovsdbserver-sb\") pod \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.640498 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mhnth\" (UniqueName: \"kubernetes.io/projected/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-kube-api-access-mhnth\") pod \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.640610 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-ovsdbserver-nb\") pod \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\" (UID: \"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d\") " Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.641200 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10081d7b-4a85-4d4a-8b06-d12d59eac231-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.641313 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/878b8e95-58ae-4e22-82c2-14e2d85a230c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.641499 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cr87l\" (UniqueName: \"kubernetes.io/projected/bd2884cc-3d1a-4865-a232-16c459d0372d-kube-api-access-cr87l\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.641641 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9516c75-f573-46d8-b9b1-036db5eee52f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.641756 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tqfh6\" (UniqueName: \"kubernetes.io/projected/eb88ea31-d557-48b6-82b9-7e3843f9935c-kube-api-access-tqfh6\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.641955 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb88ea31-d557-48b6-82b9-7e3843f9935c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.642107 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kzpf2\" (UniqueName: \"kubernetes.io/projected/878b8e95-58ae-4e22-82c2-14e2d85a230c-kube-api-access-kzpf2\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.642238 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qpnjv\" (UniqueName: \"kubernetes.io/projected/b9516c75-f573-46d8-b9b1-036db5eee52f-kube-api-access-qpnjv\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.642482 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd2884cc-3d1a-4865-a232-16c459d0372d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.642604 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6242cc75-a86f-488b-bf60-47cc855a330c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.642670 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hn2l6\" (UniqueName: \"kubernetes.io/projected/6242cc75-a86f-488b-bf60-47cc855a330c-kube-api-access-hn2l6\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.642739 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hq7v4\" (UniqueName: \"kubernetes.io/projected/10081d7b-4a85-4d4a-8b06-d12d59eac231-kube-api-access-hq7v4\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.646203 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-kube-api-access-mhnth" (OuterVolumeSpecName: "kube-api-access-mhnth") pod "7e17f8d0-6f1d-4831-bb0e-7a8f673a080d" (UID: "7e17f8d0-6f1d-4831-bb0e-7a8f673a080d"). InnerVolumeSpecName "kube-api-access-mhnth". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.681746 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7e17f8d0-6f1d-4831-bb0e-7a8f673a080d" (UID: "7e17f8d0-6f1d-4831-bb0e-7a8f673a080d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.682391 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7e17f8d0-6f1d-4831-bb0e-7a8f673a080d" (UID: "7e17f8d0-6f1d-4831-bb0e-7a8f673a080d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.690902 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7e17f8d0-6f1d-4831-bb0e-7a8f673a080d" (UID: "7e17f8d0-6f1d-4831-bb0e-7a8f673a080d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.707821 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-config" (OuterVolumeSpecName: "config") pod "7e17f8d0-6f1d-4831-bb0e-7a8f673a080d" (UID: "7e17f8d0-6f1d-4831-bb0e-7a8f673a080d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.744549 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.744579 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.744590 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.744601 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:22 crc kubenswrapper[4933]: I0122 06:04:22.744613 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mhnth\" (UniqueName: \"kubernetes.io/projected/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d-kube-api-access-mhnth\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:23 crc kubenswrapper[4933]: I0122 06:04:23.213638 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" event={"ID":"7e17f8d0-6f1d-4831-bb0e-7a8f673a080d","Type":"ContainerDied","Data":"91748a5090dace86365d53df724b19f26ca88465068608c09b068efb0b1af661"} Jan 22 06:04:23 crc kubenswrapper[4933]: I0122 06:04:23.213702 4933 scope.go:117] "RemoveContainer" containerID="2d7dcc55c52f0332979086d141c4ed233b35909cc999cb2c04861c8692cc503f" Jan 22 06:04:23 crc kubenswrapper[4933]: I0122 06:04:23.213854 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757dc6fff9-gl29p" Jan 22 06:04:23 crc kubenswrapper[4933]: I0122 06:04:23.221061 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-7wf66" Jan 22 06:04:23 crc kubenswrapper[4933]: I0122 06:04:23.221183 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-r4zfq" event={"ID":"76f30c05-b9b8-4439-aab6-b2c7e948a75f","Type":"ContainerStarted","Data":"5c0bab95c149f1ef6f612891529993e753c76f7bc83546ad768f27bd646dcc8f"} Jan 22 06:04:23 crc kubenswrapper[4933]: I0122 06:04:23.221265 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dc32-account-create-update-nhclt" Jan 22 06:04:23 crc kubenswrapper[4933]: I0122 06:04:23.221312 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2tsdt" Jan 22 06:04:23 crc kubenswrapper[4933]: I0122 06:04:23.221788 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-f2b9-account-create-update-rhljf" Jan 22 06:04:23 crc kubenswrapper[4933]: I0122 06:04:23.221886 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-brkgn" Jan 22 06:04:23 crc kubenswrapper[4933]: I0122 06:04:23.221097 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9002-account-create-update-l62qv" Jan 22 06:04:23 crc kubenswrapper[4933]: I0122 06:04:23.249704 4933 scope.go:117] "RemoveContainer" containerID="8d81fe725ecefe9c300c0a92464f3ebdf078b44ea154cfac18da9e240aaa37b0" Jan 22 06:04:23 crc kubenswrapper[4933]: I0122 06:04:23.251609 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-r4zfq" podStartSLOduration=4.411699644 podStartE2EDuration="9.251584176s" podCreationTimestamp="2026-01-22 06:04:14 +0000 UTC" firstStartedPulling="2026-01-22 06:04:17.374653978 +0000 UTC m=+1105.211779351" lastFinishedPulling="2026-01-22 06:04:22.21453853 +0000 UTC m=+1110.051663883" observedRunningTime="2026-01-22 06:04:23.244755295 +0000 UTC m=+1111.081880658" watchObservedRunningTime="2026-01-22 06:04:23.251584176 +0000 UTC m=+1111.088709529" Jan 22 06:04:23 crc kubenswrapper[4933]: I0122 06:04:23.322942 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-757dc6fff9-gl29p"] Jan 22 06:04:23 crc kubenswrapper[4933]: I0122 06:04:23.333843 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-757dc6fff9-gl29p"] Jan 22 06:04:24 crc kubenswrapper[4933]: I0122 06:04:24.500988 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e17f8d0-6f1d-4831-bb0e-7a8f673a080d" path="/var/lib/kubelet/pods/7e17f8d0-6f1d-4831-bb0e-7a8f673a080d/volumes" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.191800 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-knb5h"] Jan 22 06:04:26 crc kubenswrapper[4933]: E0122 06:04:26.192497 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6242cc75-a86f-488b-bf60-47cc855a330c" containerName="mariadb-account-create-update" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.192515 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6242cc75-a86f-488b-bf60-47cc855a330c" containerName="mariadb-account-create-update" Jan 22 06:04:26 crc kubenswrapper[4933]: E0122 06:04:26.192545 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9516c75-f573-46d8-b9b1-036db5eee52f" containerName="mariadb-account-create-update" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.192566 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9516c75-f573-46d8-b9b1-036db5eee52f" containerName="mariadb-account-create-update" Jan 22 06:04:26 crc kubenswrapper[4933]: E0122 06:04:26.192583 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e17f8d0-6f1d-4831-bb0e-7a8f673a080d" containerName="init" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.192592 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e17f8d0-6f1d-4831-bb0e-7a8f673a080d" containerName="init" Jan 22 06:04:26 crc kubenswrapper[4933]: E0122 06:04:26.192602 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d9ccac2-9b9e-4db1-b062-0ae165c0a0af" containerName="mariadb-account-create-update" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.192609 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d9ccac2-9b9e-4db1-b062-0ae165c0a0af" containerName="mariadb-account-create-update" Jan 22 06:04:26 crc kubenswrapper[4933]: E0122 06:04:26.192620 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb88ea31-d557-48b6-82b9-7e3843f9935c" containerName="mariadb-account-create-update" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.192627 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb88ea31-d557-48b6-82b9-7e3843f9935c" containerName="mariadb-account-create-update" Jan 22 06:04:26 crc kubenswrapper[4933]: E0122 06:04:26.192644 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="878b8e95-58ae-4e22-82c2-14e2d85a230c" containerName="mariadb-database-create" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.192652 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="878b8e95-58ae-4e22-82c2-14e2d85a230c" containerName="mariadb-database-create" Jan 22 06:04:26 crc kubenswrapper[4933]: E0122 06:04:26.192665 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e17f8d0-6f1d-4831-bb0e-7a8f673a080d" containerName="dnsmasq-dns" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.192672 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e17f8d0-6f1d-4831-bb0e-7a8f673a080d" containerName="dnsmasq-dns" Jan 22 06:04:26 crc kubenswrapper[4933]: E0122 06:04:26.192687 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10081d7b-4a85-4d4a-8b06-d12d59eac231" containerName="mariadb-database-create" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.192694 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="10081d7b-4a85-4d4a-8b06-d12d59eac231" containerName="mariadb-database-create" Jan 22 06:04:26 crc kubenswrapper[4933]: E0122 06:04:26.192704 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd2884cc-3d1a-4865-a232-16c459d0372d" containerName="mariadb-database-create" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.192712 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd2884cc-3d1a-4865-a232-16c459d0372d" containerName="mariadb-database-create" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.192891 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e17f8d0-6f1d-4831-bb0e-7a8f673a080d" containerName="dnsmasq-dns" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.192907 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="10081d7b-4a85-4d4a-8b06-d12d59eac231" containerName="mariadb-database-create" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.192920 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d9ccac2-9b9e-4db1-b062-0ae165c0a0af" containerName="mariadb-account-create-update" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.192931 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="878b8e95-58ae-4e22-82c2-14e2d85a230c" containerName="mariadb-database-create" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.192946 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6242cc75-a86f-488b-bf60-47cc855a330c" containerName="mariadb-account-create-update" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.192959 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9516c75-f573-46d8-b9b1-036db5eee52f" containerName="mariadb-account-create-update" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.192973 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd2884cc-3d1a-4865-a232-16c459d0372d" containerName="mariadb-database-create" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.192986 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb88ea31-d557-48b6-82b9-7e3843f9935c" containerName="mariadb-account-create-update" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.193635 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-knb5h" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.196892 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.214050 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-knb5h"] Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.223901 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f84f03f-67e9-41d2-8e74-98fd0ce61cac-operator-scripts\") pod \"root-account-create-update-knb5h\" (UID: \"9f84f03f-67e9-41d2-8e74-98fd0ce61cac\") " pod="openstack/root-account-create-update-knb5h" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.224258 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5kt9\" (UniqueName: \"kubernetes.io/projected/9f84f03f-67e9-41d2-8e74-98fd0ce61cac-kube-api-access-z5kt9\") pod \"root-account-create-update-knb5h\" (UID: \"9f84f03f-67e9-41d2-8e74-98fd0ce61cac\") " pod="openstack/root-account-create-update-knb5h" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.326248 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5kt9\" (UniqueName: \"kubernetes.io/projected/9f84f03f-67e9-41d2-8e74-98fd0ce61cac-kube-api-access-z5kt9\") pod \"root-account-create-update-knb5h\" (UID: \"9f84f03f-67e9-41d2-8e74-98fd0ce61cac\") " pod="openstack/root-account-create-update-knb5h" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.326379 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f84f03f-67e9-41d2-8e74-98fd0ce61cac-operator-scripts\") pod \"root-account-create-update-knb5h\" (UID: \"9f84f03f-67e9-41d2-8e74-98fd0ce61cac\") " pod="openstack/root-account-create-update-knb5h" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.327687 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f84f03f-67e9-41d2-8e74-98fd0ce61cac-operator-scripts\") pod \"root-account-create-update-knb5h\" (UID: \"9f84f03f-67e9-41d2-8e74-98fd0ce61cac\") " pod="openstack/root-account-create-update-knb5h" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.353852 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5kt9\" (UniqueName: \"kubernetes.io/projected/9f84f03f-67e9-41d2-8e74-98fd0ce61cac-kube-api-access-z5kt9\") pod \"root-account-create-update-knb5h\" (UID: \"9f84f03f-67e9-41d2-8e74-98fd0ce61cac\") " pod="openstack/root-account-create-update-knb5h" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.523535 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-knb5h" Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.936283 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:26 crc kubenswrapper[4933]: E0122 06:04:26.936511 4933 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 22 06:04:26 crc kubenswrapper[4933]: E0122 06:04:26.936698 4933 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 22 06:04:26 crc kubenswrapper[4933]: E0122 06:04:26.936752 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift podName:4d7c7a06-59b1-4cc5-88dd-87bc9bccd016 nodeName:}" failed. No retries permitted until 2026-01-22 06:04:42.936737756 +0000 UTC m=+1130.773863109 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift") pod "swift-storage-0" (UID: "4d7c7a06-59b1-4cc5-88dd-87bc9bccd016") : configmap "swift-ring-files" not found Jan 22 06:04:26 crc kubenswrapper[4933]: I0122 06:04:26.958052 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-knb5h"] Jan 22 06:04:27 crc kubenswrapper[4933]: I0122 06:04:27.263875 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-knb5h" event={"ID":"9f84f03f-67e9-41d2-8e74-98fd0ce61cac","Type":"ContainerStarted","Data":"7cd00712865e1c6012d1cfde55317eec773f77c6b865e64da93f5386176e7ff3"} Jan 22 06:04:27 crc kubenswrapper[4933]: I0122 06:04:27.264255 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-knb5h" event={"ID":"9f84f03f-67e9-41d2-8e74-98fd0ce61cac","Type":"ContainerStarted","Data":"0c824e1c47bc10c94f9ef7b6310dd374d48fd23ece16518d08d44ff080516c92"} Jan 22 06:04:27 crc kubenswrapper[4933]: I0122 06:04:27.305859 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-knb5h" podStartSLOduration=1.305827664 podStartE2EDuration="1.305827664s" podCreationTimestamp="2026-01-22 06:04:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:04:27.284400108 +0000 UTC m=+1115.121525491" watchObservedRunningTime="2026-01-22 06:04:27.305827664 +0000 UTC m=+1115.142953027" Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.219219 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-zhkks"] Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.220408 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-zhkks" Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.226101 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.226702 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-flfkl" Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.229592 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-zhkks"] Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.263762 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-config-data\") pod \"glance-db-sync-zhkks\" (UID: \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\") " pod="openstack/glance-db-sync-zhkks" Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.263828 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-combined-ca-bundle\") pod \"glance-db-sync-zhkks\" (UID: \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\") " pod="openstack/glance-db-sync-zhkks" Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.263930 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-db-sync-config-data\") pod \"glance-db-sync-zhkks\" (UID: \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\") " pod="openstack/glance-db-sync-zhkks" Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.264199 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x24vj\" (UniqueName: \"kubernetes.io/projected/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-kube-api-access-x24vj\") pod \"glance-db-sync-zhkks\" (UID: \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\") " pod="openstack/glance-db-sync-zhkks" Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.272668 4933 generic.go:334] "Generic (PLEG): container finished" podID="9f84f03f-67e9-41d2-8e74-98fd0ce61cac" containerID="7cd00712865e1c6012d1cfde55317eec773f77c6b865e64da93f5386176e7ff3" exitCode=0 Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.272729 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-knb5h" event={"ID":"9f84f03f-67e9-41d2-8e74-98fd0ce61cac","Type":"ContainerDied","Data":"7cd00712865e1c6012d1cfde55317eec773f77c6b865e64da93f5386176e7ff3"} Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.366198 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-combined-ca-bundle\") pod \"glance-db-sync-zhkks\" (UID: \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\") " pod="openstack/glance-db-sync-zhkks" Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.366314 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-db-sync-config-data\") pod \"glance-db-sync-zhkks\" (UID: \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\") " pod="openstack/glance-db-sync-zhkks" Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.366392 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x24vj\" (UniqueName: \"kubernetes.io/projected/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-kube-api-access-x24vj\") pod \"glance-db-sync-zhkks\" (UID: \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\") " pod="openstack/glance-db-sync-zhkks" Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.366457 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-config-data\") pod \"glance-db-sync-zhkks\" (UID: \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\") " pod="openstack/glance-db-sync-zhkks" Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.378943 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-combined-ca-bundle\") pod \"glance-db-sync-zhkks\" (UID: \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\") " pod="openstack/glance-db-sync-zhkks" Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.378938 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-db-sync-config-data\") pod \"glance-db-sync-zhkks\" (UID: \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\") " pod="openstack/glance-db-sync-zhkks" Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.381604 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-config-data\") pod \"glance-db-sync-zhkks\" (UID: \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\") " pod="openstack/glance-db-sync-zhkks" Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.388764 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x24vj\" (UniqueName: \"kubernetes.io/projected/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-kube-api-access-x24vj\") pod \"glance-db-sync-zhkks\" (UID: \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\") " pod="openstack/glance-db-sync-zhkks" Jan 22 06:04:28 crc kubenswrapper[4933]: I0122 06:04:28.542113 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-zhkks" Jan 22 06:04:29 crc kubenswrapper[4933]: I0122 06:04:29.077192 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-zhkks"] Jan 22 06:04:29 crc kubenswrapper[4933]: W0122 06:04:29.079900 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16cc03f7_df79_437e_903f_c0c6e5ba1cf0.slice/crio-e6b38dc745cdbd323c7f23ed36f5d87c8ea4944d0c964c369ea3a3046754d32d WatchSource:0}: Error finding container e6b38dc745cdbd323c7f23ed36f5d87c8ea4944d0c964c369ea3a3046754d32d: Status 404 returned error can't find the container with id e6b38dc745cdbd323c7f23ed36f5d87c8ea4944d0c964c369ea3a3046754d32d Jan 22 06:04:29 crc kubenswrapper[4933]: I0122 06:04:29.281323 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-zhkks" event={"ID":"16cc03f7-df79-437e-903f-c0c6e5ba1cf0","Type":"ContainerStarted","Data":"e6b38dc745cdbd323c7f23ed36f5d87c8ea4944d0c964c369ea3a3046754d32d"} Jan 22 06:04:29 crc kubenswrapper[4933]: I0122 06:04:29.582061 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-knb5h" Jan 22 06:04:29 crc kubenswrapper[4933]: I0122 06:04:29.692109 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5kt9\" (UniqueName: \"kubernetes.io/projected/9f84f03f-67e9-41d2-8e74-98fd0ce61cac-kube-api-access-z5kt9\") pod \"9f84f03f-67e9-41d2-8e74-98fd0ce61cac\" (UID: \"9f84f03f-67e9-41d2-8e74-98fd0ce61cac\") " Jan 22 06:04:29 crc kubenswrapper[4933]: I0122 06:04:29.692204 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f84f03f-67e9-41d2-8e74-98fd0ce61cac-operator-scripts\") pod \"9f84f03f-67e9-41d2-8e74-98fd0ce61cac\" (UID: \"9f84f03f-67e9-41d2-8e74-98fd0ce61cac\") " Jan 22 06:04:29 crc kubenswrapper[4933]: I0122 06:04:29.692968 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f84f03f-67e9-41d2-8e74-98fd0ce61cac-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9f84f03f-67e9-41d2-8e74-98fd0ce61cac" (UID: "9f84f03f-67e9-41d2-8e74-98fd0ce61cac"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:29 crc kubenswrapper[4933]: I0122 06:04:29.697538 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f84f03f-67e9-41d2-8e74-98fd0ce61cac-kube-api-access-z5kt9" (OuterVolumeSpecName: "kube-api-access-z5kt9") pod "9f84f03f-67e9-41d2-8e74-98fd0ce61cac" (UID: "9f84f03f-67e9-41d2-8e74-98fd0ce61cac"). InnerVolumeSpecName "kube-api-access-z5kt9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:04:29 crc kubenswrapper[4933]: I0122 06:04:29.794511 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z5kt9\" (UniqueName: \"kubernetes.io/projected/9f84f03f-67e9-41d2-8e74-98fd0ce61cac-kube-api-access-z5kt9\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:29 crc kubenswrapper[4933]: I0122 06:04:29.794544 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f84f03f-67e9-41d2-8e74-98fd0ce61cac-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:30 crc kubenswrapper[4933]: I0122 06:04:30.316458 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-knb5h" Jan 22 06:04:30 crc kubenswrapper[4933]: I0122 06:04:30.316403 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-knb5h" event={"ID":"9f84f03f-67e9-41d2-8e74-98fd0ce61cac","Type":"ContainerDied","Data":"0c824e1c47bc10c94f9ef7b6310dd374d48fd23ece16518d08d44ff080516c92"} Jan 22 06:04:30 crc kubenswrapper[4933]: I0122 06:04:30.316674 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0c824e1c47bc10c94f9ef7b6310dd374d48fd23ece16518d08d44ff080516c92" Jan 22 06:04:32 crc kubenswrapper[4933]: I0122 06:04:32.341823 4933 generic.go:334] "Generic (PLEG): container finished" podID="76f30c05-b9b8-4439-aab6-b2c7e948a75f" containerID="5c0bab95c149f1ef6f612891529993e753c76f7bc83546ad768f27bd646dcc8f" exitCode=0 Jan 22 06:04:32 crc kubenswrapper[4933]: I0122 06:04:32.341955 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-r4zfq" event={"ID":"76f30c05-b9b8-4439-aab6-b2c7e948a75f","Type":"ContainerDied","Data":"5c0bab95c149f1ef6f612891529993e753c76f7bc83546ad768f27bd646dcc8f"} Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.043956 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-phtjz" podUID="2160e11a-468c-4bf7-9fdc-e579f3ecf896" containerName="ovn-controller" probeResult="failure" output=< Jan 22 06:04:33 crc kubenswrapper[4933]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 22 06:04:33 crc kubenswrapper[4933]: > Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.077378 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.079779 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.297765 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-phtjz-config-fhx7f"] Jan 22 06:04:33 crc kubenswrapper[4933]: E0122 06:04:33.298160 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f84f03f-67e9-41d2-8e74-98fd0ce61cac" containerName="mariadb-account-create-update" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.298172 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f84f03f-67e9-41d2-8e74-98fd0ce61cac" containerName="mariadb-account-create-update" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.298338 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f84f03f-67e9-41d2-8e74-98fd0ce61cac" containerName="mariadb-account-create-update" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.298833 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.300838 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.305941 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-phtjz-config-fhx7f"] Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.355158 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/af452d54-60a2-44d7-835b-09285b372886-additional-scripts\") pod \"ovn-controller-phtjz-config-fhx7f\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.355231 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/af452d54-60a2-44d7-835b-09285b372886-var-run\") pod \"ovn-controller-phtjz-config-fhx7f\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.355268 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/af452d54-60a2-44d7-835b-09285b372886-scripts\") pod \"ovn-controller-phtjz-config-fhx7f\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.355294 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/af452d54-60a2-44d7-835b-09285b372886-var-run-ovn\") pod \"ovn-controller-phtjz-config-fhx7f\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.355320 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/af452d54-60a2-44d7-835b-09285b372886-var-log-ovn\") pod \"ovn-controller-phtjz-config-fhx7f\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.355348 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92hpg\" (UniqueName: \"kubernetes.io/projected/af452d54-60a2-44d7-835b-09285b372886-kube-api-access-92hpg\") pod \"ovn-controller-phtjz-config-fhx7f\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.457199 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/af452d54-60a2-44d7-835b-09285b372886-var-run-ovn\") pod \"ovn-controller-phtjz-config-fhx7f\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.457269 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/af452d54-60a2-44d7-835b-09285b372886-var-log-ovn\") pod \"ovn-controller-phtjz-config-fhx7f\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.457315 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92hpg\" (UniqueName: \"kubernetes.io/projected/af452d54-60a2-44d7-835b-09285b372886-kube-api-access-92hpg\") pod \"ovn-controller-phtjz-config-fhx7f\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.457583 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/af452d54-60a2-44d7-835b-09285b372886-additional-scripts\") pod \"ovn-controller-phtjz-config-fhx7f\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.457634 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/af452d54-60a2-44d7-835b-09285b372886-var-run-ovn\") pod \"ovn-controller-phtjz-config-fhx7f\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.457673 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/af452d54-60a2-44d7-835b-09285b372886-var-run\") pod \"ovn-controller-phtjz-config-fhx7f\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.457677 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/af452d54-60a2-44d7-835b-09285b372886-var-log-ovn\") pod \"ovn-controller-phtjz-config-fhx7f\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.457732 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/af452d54-60a2-44d7-835b-09285b372886-var-run\") pod \"ovn-controller-phtjz-config-fhx7f\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.457752 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/af452d54-60a2-44d7-835b-09285b372886-scripts\") pod \"ovn-controller-phtjz-config-fhx7f\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.458478 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/af452d54-60a2-44d7-835b-09285b372886-additional-scripts\") pod \"ovn-controller-phtjz-config-fhx7f\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.460855 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/af452d54-60a2-44d7-835b-09285b372886-scripts\") pod \"ovn-controller-phtjz-config-fhx7f\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.494858 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92hpg\" (UniqueName: \"kubernetes.io/projected/af452d54-60a2-44d7-835b-09285b372886-kube-api-access-92hpg\") pod \"ovn-controller-phtjz-config-fhx7f\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.652782 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.773654 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.865359 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/76f30c05-b9b8-4439-aab6-b2c7e948a75f-swiftconf\") pod \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.865400 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vzk5x\" (UniqueName: \"kubernetes.io/projected/76f30c05-b9b8-4439-aab6-b2c7e948a75f-kube-api-access-vzk5x\") pod \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.865431 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76f30c05-b9b8-4439-aab6-b2c7e948a75f-combined-ca-bundle\") pod \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.865448 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/76f30c05-b9b8-4439-aab6-b2c7e948a75f-ring-data-devices\") pod \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.865486 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76f30c05-b9b8-4439-aab6-b2c7e948a75f-scripts\") pod \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.865532 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/76f30c05-b9b8-4439-aab6-b2c7e948a75f-dispersionconf\") pod \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.865550 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/76f30c05-b9b8-4439-aab6-b2c7e948a75f-etc-swift\") pod \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\" (UID: \"76f30c05-b9b8-4439-aab6-b2c7e948a75f\") " Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.867304 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76f30c05-b9b8-4439-aab6-b2c7e948a75f-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "76f30c05-b9b8-4439-aab6-b2c7e948a75f" (UID: "76f30c05-b9b8-4439-aab6-b2c7e948a75f"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.869471 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/76f30c05-b9b8-4439-aab6-b2c7e948a75f-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "76f30c05-b9b8-4439-aab6-b2c7e948a75f" (UID: "76f30c05-b9b8-4439-aab6-b2c7e948a75f"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.871948 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76f30c05-b9b8-4439-aab6-b2c7e948a75f-kube-api-access-vzk5x" (OuterVolumeSpecName: "kube-api-access-vzk5x") pod "76f30c05-b9b8-4439-aab6-b2c7e948a75f" (UID: "76f30c05-b9b8-4439-aab6-b2c7e948a75f"). InnerVolumeSpecName "kube-api-access-vzk5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.878929 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76f30c05-b9b8-4439-aab6-b2c7e948a75f-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "76f30c05-b9b8-4439-aab6-b2c7e948a75f" (UID: "76f30c05-b9b8-4439-aab6-b2c7e948a75f"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.891593 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/76f30c05-b9b8-4439-aab6-b2c7e948a75f-scripts" (OuterVolumeSpecName: "scripts") pod "76f30c05-b9b8-4439-aab6-b2c7e948a75f" (UID: "76f30c05-b9b8-4439-aab6-b2c7e948a75f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.897187 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76f30c05-b9b8-4439-aab6-b2c7e948a75f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "76f30c05-b9b8-4439-aab6-b2c7e948a75f" (UID: "76f30c05-b9b8-4439-aab6-b2c7e948a75f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.913715 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76f30c05-b9b8-4439-aab6-b2c7e948a75f-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "76f30c05-b9b8-4439-aab6-b2c7e948a75f" (UID: "76f30c05-b9b8-4439-aab6-b2c7e948a75f"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.968372 4933 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/76f30c05-b9b8-4439-aab6-b2c7e948a75f-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.968409 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vzk5x\" (UniqueName: \"kubernetes.io/projected/76f30c05-b9b8-4439-aab6-b2c7e948a75f-kube-api-access-vzk5x\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.968423 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76f30c05-b9b8-4439-aab6-b2c7e948a75f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.968434 4933 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/76f30c05-b9b8-4439-aab6-b2c7e948a75f-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.968445 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76f30c05-b9b8-4439-aab6-b2c7e948a75f-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.968455 4933 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/76f30c05-b9b8-4439-aab6-b2c7e948a75f-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:33 crc kubenswrapper[4933]: I0122 06:04:33.968465 4933 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/76f30c05-b9b8-4439-aab6-b2c7e948a75f-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:34 crc kubenswrapper[4933]: I0122 06:04:34.093658 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-phtjz-config-fhx7f"] Jan 22 06:04:34 crc kubenswrapper[4933]: W0122 06:04:34.097862 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf452d54_60a2_44d7_835b_09285b372886.slice/crio-6b04aef78946d36a1d8cafa0848fa56ba27fe5f5c5420529af6e9b8c120c91d6 WatchSource:0}: Error finding container 6b04aef78946d36a1d8cafa0848fa56ba27fe5f5c5420529af6e9b8c120c91d6: Status 404 returned error can't find the container with id 6b04aef78946d36a1d8cafa0848fa56ba27fe5f5c5420529af6e9b8c120c91d6 Jan 22 06:04:34 crc kubenswrapper[4933]: I0122 06:04:34.363754 4933 generic.go:334] "Generic (PLEG): container finished" podID="47299478-bcfd-4f21-a56c-efcf7b167999" containerID="83e14ee02b552e375ddb43f6d79d9fe6adc343bd3efcacc2ece24e2451dc5275" exitCode=0 Jan 22 06:04:34 crc kubenswrapper[4933]: I0122 06:04:34.363833 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"47299478-bcfd-4f21-a56c-efcf7b167999","Type":"ContainerDied","Data":"83e14ee02b552e375ddb43f6d79d9fe6adc343bd3efcacc2ece24e2451dc5275"} Jan 22 06:04:34 crc kubenswrapper[4933]: I0122 06:04:34.365421 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-r4zfq" event={"ID":"76f30c05-b9b8-4439-aab6-b2c7e948a75f","Type":"ContainerDied","Data":"072e256824ac03cfa7a9c1168eeda1a49c5b4cafdbcf2b56c80005d7c3b9e637"} Jan 22 06:04:34 crc kubenswrapper[4933]: I0122 06:04:34.365457 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="072e256824ac03cfa7a9c1168eeda1a49c5b4cafdbcf2b56c80005d7c3b9e637" Jan 22 06:04:34 crc kubenswrapper[4933]: I0122 06:04:34.365451 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-r4zfq" Jan 22 06:04:34 crc kubenswrapper[4933]: I0122 06:04:34.367165 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-phtjz-config-fhx7f" event={"ID":"af452d54-60a2-44d7-835b-09285b372886","Type":"ContainerStarted","Data":"3409b6d93b9700bfe32856ab9cee3f6d9bd53400322f262fa48eebadc6de3a23"} Jan 22 06:04:34 crc kubenswrapper[4933]: I0122 06:04:34.367190 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-phtjz-config-fhx7f" event={"ID":"af452d54-60a2-44d7-835b-09285b372886","Type":"ContainerStarted","Data":"6b04aef78946d36a1d8cafa0848fa56ba27fe5f5c5420529af6e9b8c120c91d6"} Jan 22 06:04:34 crc kubenswrapper[4933]: I0122 06:04:34.370165 4933 generic.go:334] "Generic (PLEG): container finished" podID="4d712958-1ece-47de-9798-6e852b03c565" containerID="452184a465d8d1be0e80a527dbc5f992b7ee47b495271d69908b57e67655f195" exitCode=0 Jan 22 06:04:34 crc kubenswrapper[4933]: I0122 06:04:34.370202 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4d712958-1ece-47de-9798-6e852b03c565","Type":"ContainerDied","Data":"452184a465d8d1be0e80a527dbc5f992b7ee47b495271d69908b57e67655f195"} Jan 22 06:04:34 crc kubenswrapper[4933]: I0122 06:04:34.417380 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-phtjz-config-fhx7f" podStartSLOduration=1.417362105 podStartE2EDuration="1.417362105s" podCreationTimestamp="2026-01-22 06:04:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:04:34.402252527 +0000 UTC m=+1122.239377880" watchObservedRunningTime="2026-01-22 06:04:34.417362105 +0000 UTC m=+1122.254487458" Jan 22 06:04:35 crc kubenswrapper[4933]: I0122 06:04:35.394205 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4d712958-1ece-47de-9798-6e852b03c565","Type":"ContainerStarted","Data":"c1710389b2dae67dfbd6fae597c0b78a024bc303c5cb265a90023ca99e2818b9"} Jan 22 06:04:35 crc kubenswrapper[4933]: I0122 06:04:35.394626 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:04:35 crc kubenswrapper[4933]: I0122 06:04:35.396142 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"47299478-bcfd-4f21-a56c-efcf7b167999","Type":"ContainerStarted","Data":"9c99762ed66dc820d592fe5b2a44c175901d1c948185099ec445d18b9d3c9e4e"} Jan 22 06:04:35 crc kubenswrapper[4933]: I0122 06:04:35.396967 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 22 06:04:35 crc kubenswrapper[4933]: I0122 06:04:35.398973 4933 generic.go:334] "Generic (PLEG): container finished" podID="af452d54-60a2-44d7-835b-09285b372886" containerID="3409b6d93b9700bfe32856ab9cee3f6d9bd53400322f262fa48eebadc6de3a23" exitCode=0 Jan 22 06:04:35 crc kubenswrapper[4933]: I0122 06:04:35.399005 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-phtjz-config-fhx7f" event={"ID":"af452d54-60a2-44d7-835b-09285b372886","Type":"ContainerDied","Data":"3409b6d93b9700bfe32856ab9cee3f6d9bd53400322f262fa48eebadc6de3a23"} Jan 22 06:04:35 crc kubenswrapper[4933]: I0122 06:04:35.424313 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=50.13983077 podStartE2EDuration="1m2.424290289s" podCreationTimestamp="2026-01-22 06:03:33 +0000 UTC" firstStartedPulling="2026-01-22 06:03:46.881332616 +0000 UTC m=+1074.718457969" lastFinishedPulling="2026-01-22 06:03:59.165792105 +0000 UTC m=+1087.002917488" observedRunningTime="2026-01-22 06:04:35.417198141 +0000 UTC m=+1123.254323514" watchObservedRunningTime="2026-01-22 06:04:35.424290289 +0000 UTC m=+1123.261415652" Jan 22 06:04:35 crc kubenswrapper[4933]: I0122 06:04:35.440890 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=50.57112746 podStartE2EDuration="1m2.440870074s" podCreationTimestamp="2026-01-22 06:03:33 +0000 UTC" firstStartedPulling="2026-01-22 06:03:46.860810156 +0000 UTC m=+1074.697935509" lastFinishedPulling="2026-01-22 06:03:58.73055277 +0000 UTC m=+1086.567678123" observedRunningTime="2026-01-22 06:04:35.436503305 +0000 UTC m=+1123.273628668" watchObservedRunningTime="2026-01-22 06:04:35.440870074 +0000 UTC m=+1123.277995427" Jan 22 06:04:36 crc kubenswrapper[4933]: I0122 06:04:36.712732 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:36 crc kubenswrapper[4933]: I0122 06:04:36.812524 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/af452d54-60a2-44d7-835b-09285b372886-var-run-ovn\") pod \"af452d54-60a2-44d7-835b-09285b372886\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " Jan 22 06:04:36 crc kubenswrapper[4933]: I0122 06:04:36.812630 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/af452d54-60a2-44d7-835b-09285b372886-scripts\") pod \"af452d54-60a2-44d7-835b-09285b372886\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " Jan 22 06:04:36 crc kubenswrapper[4933]: I0122 06:04:36.812697 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/af452d54-60a2-44d7-835b-09285b372886-var-run\") pod \"af452d54-60a2-44d7-835b-09285b372886\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " Jan 22 06:04:36 crc kubenswrapper[4933]: I0122 06:04:36.812730 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92hpg\" (UniqueName: \"kubernetes.io/projected/af452d54-60a2-44d7-835b-09285b372886-kube-api-access-92hpg\") pod \"af452d54-60a2-44d7-835b-09285b372886\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " Jan 22 06:04:36 crc kubenswrapper[4933]: I0122 06:04:36.812757 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/af452d54-60a2-44d7-835b-09285b372886-var-log-ovn\") pod \"af452d54-60a2-44d7-835b-09285b372886\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " Jan 22 06:04:36 crc kubenswrapper[4933]: I0122 06:04:36.812744 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/af452d54-60a2-44d7-835b-09285b372886-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "af452d54-60a2-44d7-835b-09285b372886" (UID: "af452d54-60a2-44d7-835b-09285b372886"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 06:04:36 crc kubenswrapper[4933]: I0122 06:04:36.812804 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/af452d54-60a2-44d7-835b-09285b372886-additional-scripts\") pod \"af452d54-60a2-44d7-835b-09285b372886\" (UID: \"af452d54-60a2-44d7-835b-09285b372886\") " Jan 22 06:04:36 crc kubenswrapper[4933]: I0122 06:04:36.813142 4933 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/af452d54-60a2-44d7-835b-09285b372886-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:36 crc kubenswrapper[4933]: I0122 06:04:36.813561 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af452d54-60a2-44d7-835b-09285b372886-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "af452d54-60a2-44d7-835b-09285b372886" (UID: "af452d54-60a2-44d7-835b-09285b372886"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:36 crc kubenswrapper[4933]: I0122 06:04:36.813598 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/af452d54-60a2-44d7-835b-09285b372886-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "af452d54-60a2-44d7-835b-09285b372886" (UID: "af452d54-60a2-44d7-835b-09285b372886"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 06:04:36 crc kubenswrapper[4933]: I0122 06:04:36.813616 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/af452d54-60a2-44d7-835b-09285b372886-var-run" (OuterVolumeSpecName: "var-run") pod "af452d54-60a2-44d7-835b-09285b372886" (UID: "af452d54-60a2-44d7-835b-09285b372886"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 06:04:36 crc kubenswrapper[4933]: I0122 06:04:36.813765 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af452d54-60a2-44d7-835b-09285b372886-scripts" (OuterVolumeSpecName: "scripts") pod "af452d54-60a2-44d7-835b-09285b372886" (UID: "af452d54-60a2-44d7-835b-09285b372886"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:36 crc kubenswrapper[4933]: I0122 06:04:36.826699 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af452d54-60a2-44d7-835b-09285b372886-kube-api-access-92hpg" (OuterVolumeSpecName: "kube-api-access-92hpg") pod "af452d54-60a2-44d7-835b-09285b372886" (UID: "af452d54-60a2-44d7-835b-09285b372886"). InnerVolumeSpecName "kube-api-access-92hpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:04:36 crc kubenswrapper[4933]: I0122 06:04:36.914449 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/af452d54-60a2-44d7-835b-09285b372886-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:36 crc kubenswrapper[4933]: I0122 06:04:36.914485 4933 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/af452d54-60a2-44d7-835b-09285b372886-var-run\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:36 crc kubenswrapper[4933]: I0122 06:04:36.914494 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92hpg\" (UniqueName: \"kubernetes.io/projected/af452d54-60a2-44d7-835b-09285b372886-kube-api-access-92hpg\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:36 crc kubenswrapper[4933]: I0122 06:04:36.914503 4933 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/af452d54-60a2-44d7-835b-09285b372886-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:36 crc kubenswrapper[4933]: I0122 06:04:36.914512 4933 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/af452d54-60a2-44d7-835b-09285b372886-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:37 crc kubenswrapper[4933]: I0122 06:04:37.415478 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-phtjz-config-fhx7f" event={"ID":"af452d54-60a2-44d7-835b-09285b372886","Type":"ContainerDied","Data":"6b04aef78946d36a1d8cafa0848fa56ba27fe5f5c5420529af6e9b8c120c91d6"} Jan 22 06:04:37 crc kubenswrapper[4933]: I0122 06:04:37.415517 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-phtjz-config-fhx7f" Jan 22 06:04:37 crc kubenswrapper[4933]: I0122 06:04:37.415531 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b04aef78946d36a1d8cafa0848fa56ba27fe5f5c5420529af6e9b8c120c91d6" Jan 22 06:04:37 crc kubenswrapper[4933]: I0122 06:04:37.538238 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-phtjz-config-fhx7f"] Jan 22 06:04:37 crc kubenswrapper[4933]: I0122 06:04:37.545092 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-phtjz-config-fhx7f"] Jan 22 06:04:38 crc kubenswrapper[4933]: I0122 06:04:38.057446 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-phtjz" Jan 22 06:04:38 crc kubenswrapper[4933]: I0122 06:04:38.499241 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af452d54-60a2-44d7-835b-09285b372886" path="/var/lib/kubelet/pods/af452d54-60a2-44d7-835b-09285b372886/volumes" Jan 22 06:04:43 crc kubenswrapper[4933]: I0122 06:04:43.024279 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:43 crc kubenswrapper[4933]: I0122 06:04:43.035863 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift\") pod \"swift-storage-0\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " pod="openstack/swift-storage-0" Jan 22 06:04:43 crc kubenswrapper[4933]: I0122 06:04:43.132504 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 22 06:04:45 crc kubenswrapper[4933]: I0122 06:04:45.065066 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="47299478-bcfd-4f21-a56c-efcf7b167999" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.102:5671: connect: connection refused" Jan 22 06:04:45 crc kubenswrapper[4933]: I0122 06:04:45.104800 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="4d712958-1ece-47de-9798-6e852b03c565" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.103:5671: connect: connection refused" Jan 22 06:04:47 crc kubenswrapper[4933]: I0122 06:04:47.209509 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 22 06:04:47 crc kubenswrapper[4933]: I0122 06:04:47.495206 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerStarted","Data":"9adc157c5fe2f46a84e84df9166b88a0ce7debcfa8230363732ceed13db40bd0"} Jan 22 06:04:47 crc kubenswrapper[4933]: I0122 06:04:47.497307 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-zhkks" event={"ID":"16cc03f7-df79-437e-903f-c0c6e5ba1cf0","Type":"ContainerStarted","Data":"beb6906e88fabf98fbefd441f25673bf4d870ba69a3d6a353004e55d74d7321f"} Jan 22 06:04:47 crc kubenswrapper[4933]: I0122 06:04:47.521632 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-zhkks" podStartSLOduration=1.87576342 podStartE2EDuration="19.521615934s" podCreationTimestamp="2026-01-22 06:04:28 +0000 UTC" firstStartedPulling="2026-01-22 06:04:29.082117875 +0000 UTC m=+1116.919243238" lastFinishedPulling="2026-01-22 06:04:46.727970379 +0000 UTC m=+1134.565095752" observedRunningTime="2026-01-22 06:04:47.518057916 +0000 UTC m=+1135.355183289" watchObservedRunningTime="2026-01-22 06:04:47.521615934 +0000 UTC m=+1135.358741287" Jan 22 06:04:49 crc kubenswrapper[4933]: I0122 06:04:49.520458 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerStarted","Data":"b474cfa5d681b7ffb201c8ace8fb7d3efb77a53712a69fa5646c7089e6d05e5a"} Jan 22 06:04:49 crc kubenswrapper[4933]: I0122 06:04:49.521161 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerStarted","Data":"89e9ea346551a5c5894ae7469a69c2ea0a9fc34a0adf372fc6b1fea201f66654"} Jan 22 06:04:49 crc kubenswrapper[4933]: I0122 06:04:49.521181 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerStarted","Data":"07d6aad661ae1121fa77133d2f0b4b28385e2d29ec41899d592ae1ee48161fdd"} Jan 22 06:04:49 crc kubenswrapper[4933]: I0122 06:04:49.521194 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerStarted","Data":"88bc3429a376b19172757bdf15fd8015c87d29a4672fc50f7cd63426a4a15deb"} Jan 22 06:04:51 crc kubenswrapper[4933]: I0122 06:04:51.542438 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerStarted","Data":"02aabe8bc9d6a787100f261aab25ec19fece062ced3a51ee5af7db32e0476c01"} Jan 22 06:04:51 crc kubenswrapper[4933]: I0122 06:04:51.544307 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerStarted","Data":"5c84341f9cb1713a1792b0f79a08b86f98220a86f1ae11140038b774810dc9bf"} Jan 22 06:04:51 crc kubenswrapper[4933]: I0122 06:04:51.544421 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerStarted","Data":"ce89b1febf7814e26dbdcab688f4151d8251ed7ce3d27c8d2405f7735ce3e4ad"} Jan 22 06:04:51 crc kubenswrapper[4933]: I0122 06:04:51.544517 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerStarted","Data":"263bbaf72a78f3a591d84bbd2a5fddf505db79d66e26fc745570da4a483e5714"} Jan 22 06:04:53 crc kubenswrapper[4933]: I0122 06:04:53.573375 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerStarted","Data":"9a3a457b6ab2d11ee8b59c4ef3cb0fb0706bc20b89418ab13b9ceffb95fea763"} Jan 22 06:04:53 crc kubenswrapper[4933]: I0122 06:04:53.573990 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerStarted","Data":"7cfffd64b9e03c3d5063b865c0c0af9e8e61d754936c8d6e9bc69e678886a8de"} Jan 22 06:04:53 crc kubenswrapper[4933]: I0122 06:04:53.574009 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerStarted","Data":"0a2c7ba35b45c00194109715a53245977cc22628a5deb202f9c6835fd7a8b075"} Jan 22 06:04:53 crc kubenswrapper[4933]: I0122 06:04:53.574021 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerStarted","Data":"8a27921119da49050071af7c42b3954b7dd3fbf2145808d90887c6de819bffec"} Jan 22 06:04:53 crc kubenswrapper[4933]: I0122 06:04:53.574035 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerStarted","Data":"805b814e2cbc13d8230bd687a77c696f506fa359d8f4364fabf274beca8c9fbe"} Jan 22 06:04:54 crc kubenswrapper[4933]: I0122 06:04:54.595562 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerStarted","Data":"3dbccd349100017de57314d2ef2e4235aa70b98e80d36f8e602e30cd6b29a896"} Jan 22 06:04:54 crc kubenswrapper[4933]: I0122 06:04:54.596148 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerStarted","Data":"4bf3bc4884d64bf94b227e5a3f89d2cd681e2010861ba6ad807f97e6ed46fa36"} Jan 22 06:04:54 crc kubenswrapper[4933]: I0122 06:04:54.600623 4933 generic.go:334] "Generic (PLEG): container finished" podID="16cc03f7-df79-437e-903f-c0c6e5ba1cf0" containerID="beb6906e88fabf98fbefd441f25673bf4d870ba69a3d6a353004e55d74d7321f" exitCode=0 Jan 22 06:04:54 crc kubenswrapper[4933]: I0122 06:04:54.600699 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-zhkks" event={"ID":"16cc03f7-df79-437e-903f-c0c6e5ba1cf0","Type":"ContainerDied","Data":"beb6906e88fabf98fbefd441f25673bf4d870ba69a3d6a353004e55d74d7321f"} Jan 22 06:04:54 crc kubenswrapper[4933]: I0122 06:04:54.659441 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=40.175398138 podStartE2EDuration="45.659346301s" podCreationTimestamp="2026-01-22 06:04:09 +0000 UTC" firstStartedPulling="2026-01-22 06:04:47.217920603 +0000 UTC m=+1135.055045946" lastFinishedPulling="2026-01-22 06:04:52.701868736 +0000 UTC m=+1140.538994109" observedRunningTime="2026-01-22 06:04:54.655009233 +0000 UTC m=+1142.492134636" watchObservedRunningTime="2026-01-22 06:04:54.659346301 +0000 UTC m=+1142.496471684" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.036744 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8467b54bcc-qkldg"] Jan 22 06:04:55 crc kubenswrapper[4933]: E0122 06:04:55.037131 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76f30c05-b9b8-4439-aab6-b2c7e948a75f" containerName="swift-ring-rebalance" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.037146 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="76f30c05-b9b8-4439-aab6-b2c7e948a75f" containerName="swift-ring-rebalance" Jan 22 06:04:55 crc kubenswrapper[4933]: E0122 06:04:55.037181 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af452d54-60a2-44d7-835b-09285b372886" containerName="ovn-config" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.037189 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="af452d54-60a2-44d7-835b-09285b372886" containerName="ovn-config" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.037368 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="76f30c05-b9b8-4439-aab6-b2c7e948a75f" containerName="swift-ring-rebalance" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.037386 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="af452d54-60a2-44d7-835b-09285b372886" containerName="ovn-config" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.038320 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.042103 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.063616 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8467b54bcc-qkldg"] Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.065416 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.110349 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.165470 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-config\") pod \"dnsmasq-dns-8467b54bcc-qkldg\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.165530 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkgz5\" (UniqueName: \"kubernetes.io/projected/81937a04-32b8-4174-8e1c-bf16caabedd7-kube-api-access-lkgz5\") pod \"dnsmasq-dns-8467b54bcc-qkldg\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.165615 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-ovsdbserver-nb\") pod \"dnsmasq-dns-8467b54bcc-qkldg\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.165642 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-dns-svc\") pod \"dnsmasq-dns-8467b54bcc-qkldg\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.165684 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-ovsdbserver-sb\") pod \"dnsmasq-dns-8467b54bcc-qkldg\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.165820 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-dns-swift-storage-0\") pod \"dnsmasq-dns-8467b54bcc-qkldg\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.267804 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-config\") pod \"dnsmasq-dns-8467b54bcc-qkldg\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.267875 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkgz5\" (UniqueName: \"kubernetes.io/projected/81937a04-32b8-4174-8e1c-bf16caabedd7-kube-api-access-lkgz5\") pod \"dnsmasq-dns-8467b54bcc-qkldg\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.267937 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-ovsdbserver-nb\") pod \"dnsmasq-dns-8467b54bcc-qkldg\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.267954 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-dns-svc\") pod \"dnsmasq-dns-8467b54bcc-qkldg\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.267990 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-ovsdbserver-sb\") pod \"dnsmasq-dns-8467b54bcc-qkldg\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.268024 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-dns-swift-storage-0\") pod \"dnsmasq-dns-8467b54bcc-qkldg\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.268961 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-dns-swift-storage-0\") pod \"dnsmasq-dns-8467b54bcc-qkldg\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.269556 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-config\") pod \"dnsmasq-dns-8467b54bcc-qkldg\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.270454 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-ovsdbserver-nb\") pod \"dnsmasq-dns-8467b54bcc-qkldg\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.271040 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-dns-svc\") pod \"dnsmasq-dns-8467b54bcc-qkldg\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.271570 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-ovsdbserver-sb\") pod \"dnsmasq-dns-8467b54bcc-qkldg\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.304450 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkgz5\" (UniqueName: \"kubernetes.io/projected/81937a04-32b8-4174-8e1c-bf16caabedd7-kube-api-access-lkgz5\") pod \"dnsmasq-dns-8467b54bcc-qkldg\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.360146 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.394695 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-sr5dw"] Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.396875 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sr5dw" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.411988 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-sr5dw"] Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.472083 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49b3b5e7-c5e1-4a79-90a2-339b4f07f585-operator-scripts\") pod \"cinder-db-create-sr5dw\" (UID: \"49b3b5e7-c5e1-4a79-90a2-339b4f07f585\") " pod="openstack/cinder-db-create-sr5dw" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.472199 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tpvt\" (UniqueName: \"kubernetes.io/projected/49b3b5e7-c5e1-4a79-90a2-339b4f07f585-kube-api-access-6tpvt\") pod \"cinder-db-create-sr5dw\" (UID: \"49b3b5e7-c5e1-4a79-90a2-339b4f07f585\") " pod="openstack/cinder-db-create-sr5dw" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.513402 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-2fmwb"] Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.514508 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-2fmwb" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.537922 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-2fmwb"] Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.548549 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-e75b-account-create-update-2nrxw"] Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.550583 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e75b-account-create-update-2nrxw" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.552130 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.570001 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-e75b-account-create-update-2nrxw"] Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.591398 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tpvt\" (UniqueName: \"kubernetes.io/projected/49b3b5e7-c5e1-4a79-90a2-339b4f07f585-kube-api-access-6tpvt\") pod \"cinder-db-create-sr5dw\" (UID: \"49b3b5e7-c5e1-4a79-90a2-339b4f07f585\") " pod="openstack/cinder-db-create-sr5dw" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.591552 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqsdr\" (UniqueName: \"kubernetes.io/projected/0235a4fc-a22c-493c-95a8-7b90423eab40-kube-api-access-bqsdr\") pod \"barbican-db-create-2fmwb\" (UID: \"0235a4fc-a22c-493c-95a8-7b90423eab40\") " pod="openstack/barbican-db-create-2fmwb" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.591581 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0235a4fc-a22c-493c-95a8-7b90423eab40-operator-scripts\") pod \"barbican-db-create-2fmwb\" (UID: \"0235a4fc-a22c-493c-95a8-7b90423eab40\") " pod="openstack/barbican-db-create-2fmwb" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.591756 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49b3b5e7-c5e1-4a79-90a2-339b4f07f585-operator-scripts\") pod \"cinder-db-create-sr5dw\" (UID: \"49b3b5e7-c5e1-4a79-90a2-339b4f07f585\") " pod="openstack/cinder-db-create-sr5dw" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.605036 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49b3b5e7-c5e1-4a79-90a2-339b4f07f585-operator-scripts\") pod \"cinder-db-create-sr5dw\" (UID: \"49b3b5e7-c5e1-4a79-90a2-339b4f07f585\") " pod="openstack/cinder-db-create-sr5dw" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.650127 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tpvt\" (UniqueName: \"kubernetes.io/projected/49b3b5e7-c5e1-4a79-90a2-339b4f07f585-kube-api-access-6tpvt\") pod \"cinder-db-create-sr5dw\" (UID: \"49b3b5e7-c5e1-4a79-90a2-339b4f07f585\") " pod="openstack/cinder-db-create-sr5dw" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.660857 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-633c-account-create-update-xl2w2"] Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.661901 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-633c-account-create-update-xl2w2" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.665086 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.674208 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-633c-account-create-update-xl2w2"] Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.693482 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b8bd705-8ba6-471f-8ed4-dbbd18816c6e-operator-scripts\") pod \"barbican-e75b-account-create-update-2nrxw\" (UID: \"8b8bd705-8ba6-471f-8ed4-dbbd18816c6e\") " pod="openstack/barbican-e75b-account-create-update-2nrxw" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.693548 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2md2w\" (UniqueName: \"kubernetes.io/projected/8b8bd705-8ba6-471f-8ed4-dbbd18816c6e-kube-api-access-2md2w\") pod \"barbican-e75b-account-create-update-2nrxw\" (UID: \"8b8bd705-8ba6-471f-8ed4-dbbd18816c6e\") " pod="openstack/barbican-e75b-account-create-update-2nrxw" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.693581 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqsdr\" (UniqueName: \"kubernetes.io/projected/0235a4fc-a22c-493c-95a8-7b90423eab40-kube-api-access-bqsdr\") pod \"barbican-db-create-2fmwb\" (UID: \"0235a4fc-a22c-493c-95a8-7b90423eab40\") " pod="openstack/barbican-db-create-2fmwb" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.693603 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0235a4fc-a22c-493c-95a8-7b90423eab40-operator-scripts\") pod \"barbican-db-create-2fmwb\" (UID: \"0235a4fc-a22c-493c-95a8-7b90423eab40\") " pod="openstack/barbican-db-create-2fmwb" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.694574 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0235a4fc-a22c-493c-95a8-7b90423eab40-operator-scripts\") pod \"barbican-db-create-2fmwb\" (UID: \"0235a4fc-a22c-493c-95a8-7b90423eab40\") " pod="openstack/barbican-db-create-2fmwb" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.731482 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqsdr\" (UniqueName: \"kubernetes.io/projected/0235a4fc-a22c-493c-95a8-7b90423eab40-kube-api-access-bqsdr\") pod \"barbican-db-create-2fmwb\" (UID: \"0235a4fc-a22c-493c-95a8-7b90423eab40\") " pod="openstack/barbican-db-create-2fmwb" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.748384 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sr5dw" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.783125 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-zstt6"] Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.792199 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-zstt6" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.796917 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6b3a9bf8-9baa-4b5a-a321-1bff747279fd-operator-scripts\") pod \"cinder-633c-account-create-update-xl2w2\" (UID: \"6b3a9bf8-9baa-4b5a-a321-1bff747279fd\") " pod="openstack/cinder-633c-account-create-update-xl2w2" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.796981 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kkl9\" (UniqueName: \"kubernetes.io/projected/6b3a9bf8-9baa-4b5a-a321-1bff747279fd-kube-api-access-2kkl9\") pod \"cinder-633c-account-create-update-xl2w2\" (UID: \"6b3a9bf8-9baa-4b5a-a321-1bff747279fd\") " pod="openstack/cinder-633c-account-create-update-xl2w2" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.797032 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b8bd705-8ba6-471f-8ed4-dbbd18816c6e-operator-scripts\") pod \"barbican-e75b-account-create-update-2nrxw\" (UID: \"8b8bd705-8ba6-471f-8ed4-dbbd18816c6e\") " pod="openstack/barbican-e75b-account-create-update-2nrxw" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.797082 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2md2w\" (UniqueName: \"kubernetes.io/projected/8b8bd705-8ba6-471f-8ed4-dbbd18816c6e-kube-api-access-2md2w\") pod \"barbican-e75b-account-create-update-2nrxw\" (UID: \"8b8bd705-8ba6-471f-8ed4-dbbd18816c6e\") " pod="openstack/barbican-e75b-account-create-update-2nrxw" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.799032 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b8bd705-8ba6-471f-8ed4-dbbd18816c6e-operator-scripts\") pod \"barbican-e75b-account-create-update-2nrxw\" (UID: \"8b8bd705-8ba6-471f-8ed4-dbbd18816c6e\") " pod="openstack/barbican-e75b-account-create-update-2nrxw" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.799608 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.799764 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-vftwj" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.799804 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.799884 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.834803 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-9hsxt"] Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.835829 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-9hsxt" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.845134 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-zstt6"] Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.851510 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-2fmwb" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.863403 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-9hsxt"] Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.887425 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2md2w\" (UniqueName: \"kubernetes.io/projected/8b8bd705-8ba6-471f-8ed4-dbbd18816c6e-kube-api-access-2md2w\") pod \"barbican-e75b-account-create-update-2nrxw\" (UID: \"8b8bd705-8ba6-471f-8ed4-dbbd18816c6e\") " pod="openstack/barbican-e75b-account-create-update-2nrxw" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.902916 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mh7f2\" (UniqueName: \"kubernetes.io/projected/47f2883c-cf0b-4774-96ea-6a6e6ec8f335-kube-api-access-mh7f2\") pod \"neutron-db-create-9hsxt\" (UID: \"47f2883c-cf0b-4774-96ea-6a6e6ec8f335\") " pod="openstack/neutron-db-create-9hsxt" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.902972 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6b3a9bf8-9baa-4b5a-a321-1bff747279fd-operator-scripts\") pod \"cinder-633c-account-create-update-xl2w2\" (UID: \"6b3a9bf8-9baa-4b5a-a321-1bff747279fd\") " pod="openstack/cinder-633c-account-create-update-xl2w2" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.903015 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17180ad8-5646-46f7-ad5a-75608d596672-combined-ca-bundle\") pod \"keystone-db-sync-zstt6\" (UID: \"17180ad8-5646-46f7-ad5a-75608d596672\") " pod="openstack/keystone-db-sync-zstt6" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.903037 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kkl9\" (UniqueName: \"kubernetes.io/projected/6b3a9bf8-9baa-4b5a-a321-1bff747279fd-kube-api-access-2kkl9\") pod \"cinder-633c-account-create-update-xl2w2\" (UID: \"6b3a9bf8-9baa-4b5a-a321-1bff747279fd\") " pod="openstack/cinder-633c-account-create-update-xl2w2" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.903094 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17180ad8-5646-46f7-ad5a-75608d596672-config-data\") pod \"keystone-db-sync-zstt6\" (UID: \"17180ad8-5646-46f7-ad5a-75608d596672\") " pod="openstack/keystone-db-sync-zstt6" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.903112 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2q9h5\" (UniqueName: \"kubernetes.io/projected/17180ad8-5646-46f7-ad5a-75608d596672-kube-api-access-2q9h5\") pod \"keystone-db-sync-zstt6\" (UID: \"17180ad8-5646-46f7-ad5a-75608d596672\") " pod="openstack/keystone-db-sync-zstt6" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.903149 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47f2883c-cf0b-4774-96ea-6a6e6ec8f335-operator-scripts\") pod \"neutron-db-create-9hsxt\" (UID: \"47f2883c-cf0b-4774-96ea-6a6e6ec8f335\") " pod="openstack/neutron-db-create-9hsxt" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.903843 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6b3a9bf8-9baa-4b5a-a321-1bff747279fd-operator-scripts\") pod \"cinder-633c-account-create-update-xl2w2\" (UID: \"6b3a9bf8-9baa-4b5a-a321-1bff747279fd\") " pod="openstack/cinder-633c-account-create-update-xl2w2" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.914641 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e75b-account-create-update-2nrxw" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.948573 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kkl9\" (UniqueName: \"kubernetes.io/projected/6b3a9bf8-9baa-4b5a-a321-1bff747279fd-kube-api-access-2kkl9\") pod \"cinder-633c-account-create-update-xl2w2\" (UID: \"6b3a9bf8-9baa-4b5a-a321-1bff747279fd\") " pod="openstack/cinder-633c-account-create-update-xl2w2" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.981135 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-98e3-account-create-update-7zvxp"] Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.982111 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-98e3-account-create-update-7zvxp" Jan 22 06:04:55 crc kubenswrapper[4933]: I0122 06:04:55.989695 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.005220 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mh7f2\" (UniqueName: \"kubernetes.io/projected/47f2883c-cf0b-4774-96ea-6a6e6ec8f335-kube-api-access-mh7f2\") pod \"neutron-db-create-9hsxt\" (UID: \"47f2883c-cf0b-4774-96ea-6a6e6ec8f335\") " pod="openstack/neutron-db-create-9hsxt" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.006356 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17180ad8-5646-46f7-ad5a-75608d596672-combined-ca-bundle\") pod \"keystone-db-sync-zstt6\" (UID: \"17180ad8-5646-46f7-ad5a-75608d596672\") " pod="openstack/keystone-db-sync-zstt6" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.007182 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17180ad8-5646-46f7-ad5a-75608d596672-config-data\") pod \"keystone-db-sync-zstt6\" (UID: \"17180ad8-5646-46f7-ad5a-75608d596672\") " pod="openstack/keystone-db-sync-zstt6" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.007203 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2q9h5\" (UniqueName: \"kubernetes.io/projected/17180ad8-5646-46f7-ad5a-75608d596672-kube-api-access-2q9h5\") pod \"keystone-db-sync-zstt6\" (UID: \"17180ad8-5646-46f7-ad5a-75608d596672\") " pod="openstack/keystone-db-sync-zstt6" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.007245 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47f2883c-cf0b-4774-96ea-6a6e6ec8f335-operator-scripts\") pod \"neutron-db-create-9hsxt\" (UID: \"47f2883c-cf0b-4774-96ea-6a6e6ec8f335\") " pod="openstack/neutron-db-create-9hsxt" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.007773 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47f2883c-cf0b-4774-96ea-6a6e6ec8f335-operator-scripts\") pod \"neutron-db-create-9hsxt\" (UID: \"47f2883c-cf0b-4774-96ea-6a6e6ec8f335\") " pod="openstack/neutron-db-create-9hsxt" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.014020 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-633c-account-create-update-xl2w2" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.014900 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17180ad8-5646-46f7-ad5a-75608d596672-combined-ca-bundle\") pod \"keystone-db-sync-zstt6\" (UID: \"17180ad8-5646-46f7-ad5a-75608d596672\") " pod="openstack/keystone-db-sync-zstt6" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.015209 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17180ad8-5646-46f7-ad5a-75608d596672-config-data\") pod \"keystone-db-sync-zstt6\" (UID: \"17180ad8-5646-46f7-ad5a-75608d596672\") " pod="openstack/keystone-db-sync-zstt6" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.027605 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-98e3-account-create-update-7zvxp"] Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.055789 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2q9h5\" (UniqueName: \"kubernetes.io/projected/17180ad8-5646-46f7-ad5a-75608d596672-kube-api-access-2q9h5\") pod \"keystone-db-sync-zstt6\" (UID: \"17180ad8-5646-46f7-ad5a-75608d596672\") " pod="openstack/keystone-db-sync-zstt6" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.077671 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mh7f2\" (UniqueName: \"kubernetes.io/projected/47f2883c-cf0b-4774-96ea-6a6e6ec8f335-kube-api-access-mh7f2\") pod \"neutron-db-create-9hsxt\" (UID: \"47f2883c-cf0b-4774-96ea-6a6e6ec8f335\") " pod="openstack/neutron-db-create-9hsxt" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.092193 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8467b54bcc-qkldg"] Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.108356 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4587953-b9d4-470c-aaca-3cf9f80c8961-operator-scripts\") pod \"neutron-98e3-account-create-update-7zvxp\" (UID: \"a4587953-b9d4-470c-aaca-3cf9f80c8961\") " pod="openstack/neutron-98e3-account-create-update-7zvxp" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.108441 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hphk\" (UniqueName: \"kubernetes.io/projected/a4587953-b9d4-470c-aaca-3cf9f80c8961-kube-api-access-5hphk\") pod \"neutron-98e3-account-create-update-7zvxp\" (UID: \"a4587953-b9d4-470c-aaca-3cf9f80c8961\") " pod="openstack/neutron-98e3-account-create-update-7zvxp" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.141040 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-zstt6" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.203969 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-9hsxt" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.210053 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hphk\" (UniqueName: \"kubernetes.io/projected/a4587953-b9d4-470c-aaca-3cf9f80c8961-kube-api-access-5hphk\") pod \"neutron-98e3-account-create-update-7zvxp\" (UID: \"a4587953-b9d4-470c-aaca-3cf9f80c8961\") " pod="openstack/neutron-98e3-account-create-update-7zvxp" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.210346 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4587953-b9d4-470c-aaca-3cf9f80c8961-operator-scripts\") pod \"neutron-98e3-account-create-update-7zvxp\" (UID: \"a4587953-b9d4-470c-aaca-3cf9f80c8961\") " pod="openstack/neutron-98e3-account-create-update-7zvxp" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.215034 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4587953-b9d4-470c-aaca-3cf9f80c8961-operator-scripts\") pod \"neutron-98e3-account-create-update-7zvxp\" (UID: \"a4587953-b9d4-470c-aaca-3cf9f80c8961\") " pod="openstack/neutron-98e3-account-create-update-7zvxp" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.235168 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hphk\" (UniqueName: \"kubernetes.io/projected/a4587953-b9d4-470c-aaca-3cf9f80c8961-kube-api-access-5hphk\") pod \"neutron-98e3-account-create-update-7zvxp\" (UID: \"a4587953-b9d4-470c-aaca-3cf9f80c8961\") " pod="openstack/neutron-98e3-account-create-update-7zvxp" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.333339 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-98e3-account-create-update-7zvxp" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.483019 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-zhkks" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.538675 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-db-sync-config-data\") pod \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\" (UID: \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\") " Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.538771 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-config-data\") pod \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\" (UID: \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\") " Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.538814 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-combined-ca-bundle\") pod \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\" (UID: \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\") " Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.538840 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x24vj\" (UniqueName: \"kubernetes.io/projected/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-kube-api-access-x24vj\") pod \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\" (UID: \"16cc03f7-df79-437e-903f-c0c6e5ba1cf0\") " Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.550354 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "16cc03f7-df79-437e-903f-c0c6e5ba1cf0" (UID: "16cc03f7-df79-437e-903f-c0c6e5ba1cf0"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.554634 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-kube-api-access-x24vj" (OuterVolumeSpecName: "kube-api-access-x24vj") pod "16cc03f7-df79-437e-903f-c0c6e5ba1cf0" (UID: "16cc03f7-df79-437e-903f-c0c6e5ba1cf0"). InnerVolumeSpecName "kube-api-access-x24vj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.573268 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "16cc03f7-df79-437e-903f-c0c6e5ba1cf0" (UID: "16cc03f7-df79-437e-903f-c0c6e5ba1cf0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.600666 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-config-data" (OuterVolumeSpecName: "config-data") pod "16cc03f7-df79-437e-903f-c0c6e5ba1cf0" (UID: "16cc03f7-df79-437e-903f-c0c6e5ba1cf0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.642012 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x24vj\" (UniqueName: \"kubernetes.io/projected/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-kube-api-access-x24vj\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.642039 4933 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.642049 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.642058 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16cc03f7-df79-437e-903f-c0c6e5ba1cf0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.652853 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-zhkks" event={"ID":"16cc03f7-df79-437e-903f-c0c6e5ba1cf0","Type":"ContainerDied","Data":"e6b38dc745cdbd323c7f23ed36f5d87c8ea4944d0c964c369ea3a3046754d32d"} Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.652897 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e6b38dc745cdbd323c7f23ed36f5d87c8ea4944d0c964c369ea3a3046754d32d" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.652956 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-zhkks" Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.665652 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" event={"ID":"81937a04-32b8-4174-8e1c-bf16caabedd7","Type":"ContainerStarted","Data":"c431c57dea432274eb902c50547beb94279ceb213c0b6a5c6814aec2bae70844"} Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.756541 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-633c-account-create-update-xl2w2"] Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.762724 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-2fmwb"] Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.770703 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-sr5dw"] Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.825284 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-e75b-account-create-update-2nrxw"] Jan 22 06:04:56 crc kubenswrapper[4933]: I0122 06:04:56.915629 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-zstt6"] Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.010637 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-9hsxt"] Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.095202 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-98e3-account-create-update-7zvxp"] Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.143490 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8467b54bcc-qkldg"] Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.162176 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56c9bc6f5c-qmln7"] Jan 22 06:04:57 crc kubenswrapper[4933]: E0122 06:04:57.162569 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16cc03f7-df79-437e-903f-c0c6e5ba1cf0" containerName="glance-db-sync" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.162586 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="16cc03f7-df79-437e-903f-c0c6e5ba1cf0" containerName="glance-db-sync" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.162770 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="16cc03f7-df79-437e-903f-c0c6e5ba1cf0" containerName="glance-db-sync" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.163620 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.170258 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56c9bc6f5c-qmln7"] Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.275182 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-dns-swift-storage-0\") pod \"dnsmasq-dns-56c9bc6f5c-qmln7\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.275242 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-dns-svc\") pod \"dnsmasq-dns-56c9bc6f5c-qmln7\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.275301 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-ovsdbserver-sb\") pod \"dnsmasq-dns-56c9bc6f5c-qmln7\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.275316 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvqt8\" (UniqueName: \"kubernetes.io/projected/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-kube-api-access-hvqt8\") pod \"dnsmasq-dns-56c9bc6f5c-qmln7\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.275332 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-ovsdbserver-nb\") pod \"dnsmasq-dns-56c9bc6f5c-qmln7\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.275365 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-config\") pod \"dnsmasq-dns-56c9bc6f5c-qmln7\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.376537 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-ovsdbserver-sb\") pod \"dnsmasq-dns-56c9bc6f5c-qmln7\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.376852 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvqt8\" (UniqueName: \"kubernetes.io/projected/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-kube-api-access-hvqt8\") pod \"dnsmasq-dns-56c9bc6f5c-qmln7\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.376932 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-ovsdbserver-nb\") pod \"dnsmasq-dns-56c9bc6f5c-qmln7\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.377031 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-config\") pod \"dnsmasq-dns-56c9bc6f5c-qmln7\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.377196 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-dns-swift-storage-0\") pod \"dnsmasq-dns-56c9bc6f5c-qmln7\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.377276 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-dns-svc\") pod \"dnsmasq-dns-56c9bc6f5c-qmln7\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.377446 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-ovsdbserver-sb\") pod \"dnsmasq-dns-56c9bc6f5c-qmln7\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.377848 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-ovsdbserver-nb\") pod \"dnsmasq-dns-56c9bc6f5c-qmln7\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.377943 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-config\") pod \"dnsmasq-dns-56c9bc6f5c-qmln7\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.378238 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-dns-svc\") pod \"dnsmasq-dns-56c9bc6f5c-qmln7\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.378768 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-dns-swift-storage-0\") pod \"dnsmasq-dns-56c9bc6f5c-qmln7\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.402600 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvqt8\" (UniqueName: \"kubernetes.io/projected/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-kube-api-access-hvqt8\") pod \"dnsmasq-dns-56c9bc6f5c-qmln7\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.502642 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.705333 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e75b-account-create-update-2nrxw" event={"ID":"8b8bd705-8ba6-471f-8ed4-dbbd18816c6e","Type":"ContainerStarted","Data":"182b292aa4c399c51ea47eaf2870bdc3ee5bb3f9a6451c535d36593c849e03d6"} Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.709904 4933 generic.go:334] "Generic (PLEG): container finished" podID="81937a04-32b8-4174-8e1c-bf16caabedd7" containerID="8ddacb03af848b402150cc024506ccb63fde80e81c0dbfaa56949d2a8d093167" exitCode=0 Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.712768 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e75b-account-create-update-2nrxw" event={"ID":"8b8bd705-8ba6-471f-8ed4-dbbd18816c6e","Type":"ContainerStarted","Data":"d0e552db1a5f7ce474d2361098efe5651a7b6d1e8a942fa724ce20b96a0380a7"} Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.712930 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" event={"ID":"81937a04-32b8-4174-8e1c-bf16caabedd7","Type":"ContainerDied","Data":"8ddacb03af848b402150cc024506ccb63fde80e81c0dbfaa56949d2a8d093167"} Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.716048 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-zstt6" event={"ID":"17180ad8-5646-46f7-ad5a-75608d596672","Type":"ContainerStarted","Data":"e9634ecc7b8680b07959fd9a67015adcf05f0b437bdb3730ac1908e05c268a20"} Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.722024 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-9hsxt" event={"ID":"47f2883c-cf0b-4774-96ea-6a6e6ec8f335","Type":"ContainerStarted","Data":"6fd114928698fbfda1eb0719a5519bfbb9c115b02e0b1ebbaeb3ba4ac6e8e3b4"} Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.722064 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-9hsxt" event={"ID":"47f2883c-cf0b-4774-96ea-6a6e6ec8f335","Type":"ContainerStarted","Data":"d78c3ac14445a4662892ca5f4f855209277fb8984eba8f8ed9920f5f0235406d"} Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.737000 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-98e3-account-create-update-7zvxp" event={"ID":"a4587953-b9d4-470c-aaca-3cf9f80c8961","Type":"ContainerStarted","Data":"8330b5239f77c8b2736000ec9bf3d8a736e59fc675f587da19ce26937bdd8640"} Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.737046 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-98e3-account-create-update-7zvxp" event={"ID":"a4587953-b9d4-470c-aaca-3cf9f80c8961","Type":"ContainerStarted","Data":"96ddad987f1eb06c9b9f2a3e9d93d1a8981cee027bbb51352b0dd14033cea86a"} Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.744621 4933 generic.go:334] "Generic (PLEG): container finished" podID="6b3a9bf8-9baa-4b5a-a321-1bff747279fd" containerID="6183e68a58c9738473d34db92429a47aa5b4507c279bf75ece9abeceb453ca17" exitCode=0 Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.744719 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-633c-account-create-update-xl2w2" event={"ID":"6b3a9bf8-9baa-4b5a-a321-1bff747279fd","Type":"ContainerDied","Data":"6183e68a58c9738473d34db92429a47aa5b4507c279bf75ece9abeceb453ca17"} Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.744745 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-633c-account-create-update-xl2w2" event={"ID":"6b3a9bf8-9baa-4b5a-a321-1bff747279fd","Type":"ContainerStarted","Data":"3e9587cf1963cc27bca37cd396199d0881a5d088e51aec2583a47051133f4461"} Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.774122 4933 generic.go:334] "Generic (PLEG): container finished" podID="49b3b5e7-c5e1-4a79-90a2-339b4f07f585" containerID="a1097fa194d6c885505160faaaebf96f845cac25e2ae2b3290a4730eb217e9df" exitCode=0 Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.774181 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-sr5dw" event={"ID":"49b3b5e7-c5e1-4a79-90a2-339b4f07f585","Type":"ContainerDied","Data":"a1097fa194d6c885505160faaaebf96f845cac25e2ae2b3290a4730eb217e9df"} Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.774206 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-sr5dw" event={"ID":"49b3b5e7-c5e1-4a79-90a2-339b4f07f585","Type":"ContainerStarted","Data":"8bbf00b3dc601026a866828536f1692805a49f37a7823d3d8be2aaa844b8e7a4"} Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.782008 4933 generic.go:334] "Generic (PLEG): container finished" podID="0235a4fc-a22c-493c-95a8-7b90423eab40" containerID="91d245d4ed97af7cc2c3d8b609c00d70bb738ba8e18e4de79db1d408fd33d5f3" exitCode=0 Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.782048 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-2fmwb" event={"ID":"0235a4fc-a22c-493c-95a8-7b90423eab40","Type":"ContainerDied","Data":"91d245d4ed97af7cc2c3d8b609c00d70bb738ba8e18e4de79db1d408fd33d5f3"} Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.782156 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-2fmwb" event={"ID":"0235a4fc-a22c-493c-95a8-7b90423eab40","Type":"ContainerStarted","Data":"032177ec25f6337844bb1b47f1f8227d9da9f8e01c7fd1c523d6db67572a492a"} Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.833007 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-9hsxt" podStartSLOduration=2.832989678 podStartE2EDuration="2.832989678s" podCreationTimestamp="2026-01-22 06:04:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:04:57.818139246 +0000 UTC m=+1145.655264599" watchObservedRunningTime="2026-01-22 06:04:57.832989678 +0000 UTC m=+1145.670115031" Jan 22 06:04:57 crc kubenswrapper[4933]: I0122 06:04:57.847043 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-98e3-account-create-update-7zvxp" podStartSLOduration=2.847024409 podStartE2EDuration="2.847024409s" podCreationTimestamp="2026-01-22 06:04:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:04:57.838354972 +0000 UTC m=+1145.675480335" watchObservedRunningTime="2026-01-22 06:04:57.847024409 +0000 UTC m=+1145.684149762" Jan 22 06:04:58 crc kubenswrapper[4933]: E0122 06:04:58.049088 4933 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Jan 22 06:04:58 crc kubenswrapper[4933]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/81937a04-32b8-4174-8e1c-bf16caabedd7/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 22 06:04:58 crc kubenswrapper[4933]: > podSandboxID="c431c57dea432274eb902c50547beb94279ceb213c0b6a5c6814aec2bae70844" Jan 22 06:04:58 crc kubenswrapper[4933]: E0122 06:04:58.049506 4933 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 22 06:04:58 crc kubenswrapper[4933]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n97h57bh654h659h5b6hbfhc4h689h565h578h56ch8dh8bh67fhf7h5f8hc7h5d4h5d5h5f7h687h5cbh5c5h5d8h68fh669h588h59bh5c6h674h5c8h5d7q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-swift-storage-0,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-swift-storage-0,SubPath:dns-swift-storage-0,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-nb,SubPath:ovsdbserver-nb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-sb,SubPath:ovsdbserver-sb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lkgz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-8467b54bcc-qkldg_openstack(81937a04-32b8-4174-8e1c-bf16caabedd7): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/81937a04-32b8-4174-8e1c-bf16caabedd7/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 22 06:04:58 crc kubenswrapper[4933]: > logger="UnhandledError" Jan 22 06:04:58 crc kubenswrapper[4933]: E0122 06:04:58.051233 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/81937a04-32b8-4174-8e1c-bf16caabedd7/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" podUID="81937a04-32b8-4174-8e1c-bf16caabedd7" Jan 22 06:04:58 crc kubenswrapper[4933]: I0122 06:04:58.091575 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56c9bc6f5c-qmln7"] Jan 22 06:04:58 crc kubenswrapper[4933]: I0122 06:04:58.791976 4933 generic.go:334] "Generic (PLEG): container finished" podID="8b8bd705-8ba6-471f-8ed4-dbbd18816c6e" containerID="182b292aa4c399c51ea47eaf2870bdc3ee5bb3f9a6451c535d36593c849e03d6" exitCode=0 Jan 22 06:04:58 crc kubenswrapper[4933]: I0122 06:04:58.792035 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e75b-account-create-update-2nrxw" event={"ID":"8b8bd705-8ba6-471f-8ed4-dbbd18816c6e","Type":"ContainerDied","Data":"182b292aa4c399c51ea47eaf2870bdc3ee5bb3f9a6451c535d36593c849e03d6"} Jan 22 06:04:58 crc kubenswrapper[4933]: I0122 06:04:58.795700 4933 generic.go:334] "Generic (PLEG): container finished" podID="47f2883c-cf0b-4774-96ea-6a6e6ec8f335" containerID="6fd114928698fbfda1eb0719a5519bfbb9c115b02e0b1ebbaeb3ba4ac6e8e3b4" exitCode=0 Jan 22 06:04:58 crc kubenswrapper[4933]: I0122 06:04:58.795768 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-9hsxt" event={"ID":"47f2883c-cf0b-4774-96ea-6a6e6ec8f335","Type":"ContainerDied","Data":"6fd114928698fbfda1eb0719a5519bfbb9c115b02e0b1ebbaeb3ba4ac6e8e3b4"} Jan 22 06:04:58 crc kubenswrapper[4933]: I0122 06:04:58.797265 4933 generic.go:334] "Generic (PLEG): container finished" podID="a4587953-b9d4-470c-aaca-3cf9f80c8961" containerID="8330b5239f77c8b2736000ec9bf3d8a736e59fc675f587da19ce26937bdd8640" exitCode=0 Jan 22 06:04:58 crc kubenswrapper[4933]: I0122 06:04:58.797309 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-98e3-account-create-update-7zvxp" event={"ID":"a4587953-b9d4-470c-aaca-3cf9f80c8961","Type":"ContainerDied","Data":"8330b5239f77c8b2736000ec9bf3d8a736e59fc675f587da19ce26937bdd8640"} Jan 22 06:04:58 crc kubenswrapper[4933]: I0122 06:04:58.798896 4933 generic.go:334] "Generic (PLEG): container finished" podID="6ead38e6-cb9b-4cdf-8b68-2971113c1af1" containerID="8dcfdaade288e4a4e857f537eb4fcbd1fc6851641e77ba9d59db534de87735eb" exitCode=0 Jan 22 06:04:58 crc kubenswrapper[4933]: I0122 06:04:58.798988 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" event={"ID":"6ead38e6-cb9b-4cdf-8b68-2971113c1af1","Type":"ContainerDied","Data":"8dcfdaade288e4a4e857f537eb4fcbd1fc6851641e77ba9d59db534de87735eb"} Jan 22 06:04:58 crc kubenswrapper[4933]: I0122 06:04:58.799034 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" event={"ID":"6ead38e6-cb9b-4cdf-8b68-2971113c1af1","Type":"ContainerStarted","Data":"770d54959538a5094af045c88c13c9801a6b3d1884d0788f3482f469b0d27eca"} Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.189972 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e75b-account-create-update-2nrxw" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.312343 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b8bd705-8ba6-471f-8ed4-dbbd18816c6e-operator-scripts\") pod \"8b8bd705-8ba6-471f-8ed4-dbbd18816c6e\" (UID: \"8b8bd705-8ba6-471f-8ed4-dbbd18816c6e\") " Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.312644 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2md2w\" (UniqueName: \"kubernetes.io/projected/8b8bd705-8ba6-471f-8ed4-dbbd18816c6e-kube-api-access-2md2w\") pod \"8b8bd705-8ba6-471f-8ed4-dbbd18816c6e\" (UID: \"8b8bd705-8ba6-471f-8ed4-dbbd18816c6e\") " Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.313460 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b8bd705-8ba6-471f-8ed4-dbbd18816c6e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8b8bd705-8ba6-471f-8ed4-dbbd18816c6e" (UID: "8b8bd705-8ba6-471f-8ed4-dbbd18816c6e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.331694 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b8bd705-8ba6-471f-8ed4-dbbd18816c6e-kube-api-access-2md2w" (OuterVolumeSpecName: "kube-api-access-2md2w") pod "8b8bd705-8ba6-471f-8ed4-dbbd18816c6e" (UID: "8b8bd705-8ba6-471f-8ed4-dbbd18816c6e"). InnerVolumeSpecName "kube-api-access-2md2w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.416984 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b8bd705-8ba6-471f-8ed4-dbbd18816c6e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.417018 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2md2w\" (UniqueName: \"kubernetes.io/projected/8b8bd705-8ba6-471f-8ed4-dbbd18816c6e-kube-api-access-2md2w\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.502239 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.514449 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-633c-account-create-update-xl2w2" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.517614 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-2fmwb" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.528740 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sr5dw" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.619807 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-dns-svc\") pod \"81937a04-32b8-4174-8e1c-bf16caabedd7\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.619864 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-dns-swift-storage-0\") pod \"81937a04-32b8-4174-8e1c-bf16caabedd7\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.619906 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0235a4fc-a22c-493c-95a8-7b90423eab40-operator-scripts\") pod \"0235a4fc-a22c-493c-95a8-7b90423eab40\" (UID: \"0235a4fc-a22c-493c-95a8-7b90423eab40\") " Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.619988 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-ovsdbserver-nb\") pod \"81937a04-32b8-4174-8e1c-bf16caabedd7\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.620048 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6b3a9bf8-9baa-4b5a-a321-1bff747279fd-operator-scripts\") pod \"6b3a9bf8-9baa-4b5a-a321-1bff747279fd\" (UID: \"6b3a9bf8-9baa-4b5a-a321-1bff747279fd\") " Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.620160 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49b3b5e7-c5e1-4a79-90a2-339b4f07f585-operator-scripts\") pod \"49b3b5e7-c5e1-4a79-90a2-339b4f07f585\" (UID: \"49b3b5e7-c5e1-4a79-90a2-339b4f07f585\") " Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.620207 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6tpvt\" (UniqueName: \"kubernetes.io/projected/49b3b5e7-c5e1-4a79-90a2-339b4f07f585-kube-api-access-6tpvt\") pod \"49b3b5e7-c5e1-4a79-90a2-339b4f07f585\" (UID: \"49b3b5e7-c5e1-4a79-90a2-339b4f07f585\") " Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.620239 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-ovsdbserver-sb\") pod \"81937a04-32b8-4174-8e1c-bf16caabedd7\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.620260 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkgz5\" (UniqueName: \"kubernetes.io/projected/81937a04-32b8-4174-8e1c-bf16caabedd7-kube-api-access-lkgz5\") pod \"81937a04-32b8-4174-8e1c-bf16caabedd7\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.620283 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2kkl9\" (UniqueName: \"kubernetes.io/projected/6b3a9bf8-9baa-4b5a-a321-1bff747279fd-kube-api-access-2kkl9\") pod \"6b3a9bf8-9baa-4b5a-a321-1bff747279fd\" (UID: \"6b3a9bf8-9baa-4b5a-a321-1bff747279fd\") " Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.620313 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-config\") pod \"81937a04-32b8-4174-8e1c-bf16caabedd7\" (UID: \"81937a04-32b8-4174-8e1c-bf16caabedd7\") " Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.620348 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqsdr\" (UniqueName: \"kubernetes.io/projected/0235a4fc-a22c-493c-95a8-7b90423eab40-kube-api-access-bqsdr\") pod \"0235a4fc-a22c-493c-95a8-7b90423eab40\" (UID: \"0235a4fc-a22c-493c-95a8-7b90423eab40\") " Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.624890 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b3a9bf8-9baa-4b5a-a321-1bff747279fd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6b3a9bf8-9baa-4b5a-a321-1bff747279fd" (UID: "6b3a9bf8-9baa-4b5a-a321-1bff747279fd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.624985 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49b3b5e7-c5e1-4a79-90a2-339b4f07f585-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "49b3b5e7-c5e1-4a79-90a2-339b4f07f585" (UID: "49b3b5e7-c5e1-4a79-90a2-339b4f07f585"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.625067 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81937a04-32b8-4174-8e1c-bf16caabedd7-kube-api-access-lkgz5" (OuterVolumeSpecName: "kube-api-access-lkgz5") pod "81937a04-32b8-4174-8e1c-bf16caabedd7" (UID: "81937a04-32b8-4174-8e1c-bf16caabedd7"). InnerVolumeSpecName "kube-api-access-lkgz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.625232 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0235a4fc-a22c-493c-95a8-7b90423eab40-kube-api-access-bqsdr" (OuterVolumeSpecName: "kube-api-access-bqsdr") pod "0235a4fc-a22c-493c-95a8-7b90423eab40" (UID: "0235a4fc-a22c-493c-95a8-7b90423eab40"). InnerVolumeSpecName "kube-api-access-bqsdr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.625521 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0235a4fc-a22c-493c-95a8-7b90423eab40-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0235a4fc-a22c-493c-95a8-7b90423eab40" (UID: "0235a4fc-a22c-493c-95a8-7b90423eab40"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.636721 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49b3b5e7-c5e1-4a79-90a2-339b4f07f585-kube-api-access-6tpvt" (OuterVolumeSpecName: "kube-api-access-6tpvt") pod "49b3b5e7-c5e1-4a79-90a2-339b4f07f585" (UID: "49b3b5e7-c5e1-4a79-90a2-339b4f07f585"). InnerVolumeSpecName "kube-api-access-6tpvt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.640539 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b3a9bf8-9baa-4b5a-a321-1bff747279fd-kube-api-access-2kkl9" (OuterVolumeSpecName: "kube-api-access-2kkl9") pod "6b3a9bf8-9baa-4b5a-a321-1bff747279fd" (UID: "6b3a9bf8-9baa-4b5a-a321-1bff747279fd"). InnerVolumeSpecName "kube-api-access-2kkl9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.666145 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "81937a04-32b8-4174-8e1c-bf16caabedd7" (UID: "81937a04-32b8-4174-8e1c-bf16caabedd7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.670884 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "81937a04-32b8-4174-8e1c-bf16caabedd7" (UID: "81937a04-32b8-4174-8e1c-bf16caabedd7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.673658 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "81937a04-32b8-4174-8e1c-bf16caabedd7" (UID: "81937a04-32b8-4174-8e1c-bf16caabedd7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.675378 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "81937a04-32b8-4174-8e1c-bf16caabedd7" (UID: "81937a04-32b8-4174-8e1c-bf16caabedd7"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.687428 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-config" (OuterVolumeSpecName: "config") pod "81937a04-32b8-4174-8e1c-bf16caabedd7" (UID: "81937a04-32b8-4174-8e1c-bf16caabedd7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.722697 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0235a4fc-a22c-493c-95a8-7b90423eab40-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.722943 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.723064 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6b3a9bf8-9baa-4b5a-a321-1bff747279fd-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.723239 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49b3b5e7-c5e1-4a79-90a2-339b4f07f585-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.723326 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6tpvt\" (UniqueName: \"kubernetes.io/projected/49b3b5e7-c5e1-4a79-90a2-339b4f07f585-kube-api-access-6tpvt\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.723386 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.723439 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lkgz5\" (UniqueName: \"kubernetes.io/projected/81937a04-32b8-4174-8e1c-bf16caabedd7-kube-api-access-lkgz5\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.723500 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2kkl9\" (UniqueName: \"kubernetes.io/projected/6b3a9bf8-9baa-4b5a-a321-1bff747279fd-kube-api-access-2kkl9\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.723554 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.723610 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqsdr\" (UniqueName: \"kubernetes.io/projected/0235a4fc-a22c-493c-95a8-7b90423eab40-kube-api-access-bqsdr\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.723668 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.723720 4933 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/81937a04-32b8-4174-8e1c-bf16caabedd7-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.808700 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-sr5dw" event={"ID":"49b3b5e7-c5e1-4a79-90a2-339b4f07f585","Type":"ContainerDied","Data":"8bbf00b3dc601026a866828536f1692805a49f37a7823d3d8be2aaa844b8e7a4"} Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.808971 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8bbf00b3dc601026a866828536f1692805a49f37a7823d3d8be2aaa844b8e7a4" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.808713 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-sr5dw" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.809908 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e75b-account-create-update-2nrxw" event={"ID":"8b8bd705-8ba6-471f-8ed4-dbbd18816c6e","Type":"ContainerDied","Data":"d0e552db1a5f7ce474d2361098efe5651a7b6d1e8a942fa724ce20b96a0380a7"} Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.809937 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d0e552db1a5f7ce474d2361098efe5651a7b6d1e8a942fa724ce20b96a0380a7" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.809987 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e75b-account-create-update-2nrxw" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.815657 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" event={"ID":"81937a04-32b8-4174-8e1c-bf16caabedd7","Type":"ContainerDied","Data":"c431c57dea432274eb902c50547beb94279ceb213c0b6a5c6814aec2bae70844"} Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.815718 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8467b54bcc-qkldg" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.815733 4933 scope.go:117] "RemoveContainer" containerID="8ddacb03af848b402150cc024506ccb63fde80e81c0dbfaa56949d2a8d093167" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.820532 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-633c-account-create-update-xl2w2" event={"ID":"6b3a9bf8-9baa-4b5a-a321-1bff747279fd","Type":"ContainerDied","Data":"3e9587cf1963cc27bca37cd396199d0881a5d088e51aec2583a47051133f4461"} Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.820591 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e9587cf1963cc27bca37cd396199d0881a5d088e51aec2583a47051133f4461" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.821324 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-633c-account-create-update-xl2w2" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.827508 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-2fmwb" event={"ID":"0235a4fc-a22c-493c-95a8-7b90423eab40","Type":"ContainerDied","Data":"032177ec25f6337844bb1b47f1f8227d9da9f8e01c7fd1c523d6db67572a492a"} Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.827557 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="032177ec25f6337844bb1b47f1f8227d9da9f8e01c7fd1c523d6db67572a492a" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.827590 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-2fmwb" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.835610 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" event={"ID":"6ead38e6-cb9b-4cdf-8b68-2971113c1af1","Type":"ContainerStarted","Data":"f6c39fe8250cc962720265b6440d4dfb51250143572651845f0b1b49380f15da"} Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.835680 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.854973 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" podStartSLOduration=2.854949378 podStartE2EDuration="2.854949378s" podCreationTimestamp="2026-01-22 06:04:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:04:59.850752812 +0000 UTC m=+1147.687878165" watchObservedRunningTime="2026-01-22 06:04:59.854949378 +0000 UTC m=+1147.692074741" Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.898325 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8467b54bcc-qkldg"] Jan 22 06:04:59 crc kubenswrapper[4933]: I0122 06:04:59.904130 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8467b54bcc-qkldg"] Jan 22 06:05:00 crc kubenswrapper[4933]: I0122 06:05:00.501286 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81937a04-32b8-4174-8e1c-bf16caabedd7" path="/var/lib/kubelet/pods/81937a04-32b8-4174-8e1c-bf16caabedd7/volumes" Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.579359 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-98e3-account-create-update-7zvxp" Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.597277 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-9hsxt" Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.682701 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hphk\" (UniqueName: \"kubernetes.io/projected/a4587953-b9d4-470c-aaca-3cf9f80c8961-kube-api-access-5hphk\") pod \"a4587953-b9d4-470c-aaca-3cf9f80c8961\" (UID: \"a4587953-b9d4-470c-aaca-3cf9f80c8961\") " Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.682784 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4587953-b9d4-470c-aaca-3cf9f80c8961-operator-scripts\") pod \"a4587953-b9d4-470c-aaca-3cf9f80c8961\" (UID: \"a4587953-b9d4-470c-aaca-3cf9f80c8961\") " Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.682926 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mh7f2\" (UniqueName: \"kubernetes.io/projected/47f2883c-cf0b-4774-96ea-6a6e6ec8f335-kube-api-access-mh7f2\") pod \"47f2883c-cf0b-4774-96ea-6a6e6ec8f335\" (UID: \"47f2883c-cf0b-4774-96ea-6a6e6ec8f335\") " Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.683279 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47f2883c-cf0b-4774-96ea-6a6e6ec8f335-operator-scripts\") pod \"47f2883c-cf0b-4774-96ea-6a6e6ec8f335\" (UID: \"47f2883c-cf0b-4774-96ea-6a6e6ec8f335\") " Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.683527 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4587953-b9d4-470c-aaca-3cf9f80c8961-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a4587953-b9d4-470c-aaca-3cf9f80c8961" (UID: "a4587953-b9d4-470c-aaca-3cf9f80c8961"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.684022 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4587953-b9d4-470c-aaca-3cf9f80c8961-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.685031 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47f2883c-cf0b-4774-96ea-6a6e6ec8f335-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "47f2883c-cf0b-4774-96ea-6a6e6ec8f335" (UID: "47f2883c-cf0b-4774-96ea-6a6e6ec8f335"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.689082 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47f2883c-cf0b-4774-96ea-6a6e6ec8f335-kube-api-access-mh7f2" (OuterVolumeSpecName: "kube-api-access-mh7f2") pod "47f2883c-cf0b-4774-96ea-6a6e6ec8f335" (UID: "47f2883c-cf0b-4774-96ea-6a6e6ec8f335"). InnerVolumeSpecName "kube-api-access-mh7f2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.689146 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4587953-b9d4-470c-aaca-3cf9f80c8961-kube-api-access-5hphk" (OuterVolumeSpecName: "kube-api-access-5hphk") pod "a4587953-b9d4-470c-aaca-3cf9f80c8961" (UID: "a4587953-b9d4-470c-aaca-3cf9f80c8961"). InnerVolumeSpecName "kube-api-access-5hphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.785636 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47f2883c-cf0b-4774-96ea-6a6e6ec8f335-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.785664 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hphk\" (UniqueName: \"kubernetes.io/projected/a4587953-b9d4-470c-aaca-3cf9f80c8961-kube-api-access-5hphk\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.785677 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mh7f2\" (UniqueName: \"kubernetes.io/projected/47f2883c-cf0b-4774-96ea-6a6e6ec8f335-kube-api-access-mh7f2\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.864727 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-zstt6" event={"ID":"17180ad8-5646-46f7-ad5a-75608d596672","Type":"ContainerStarted","Data":"3917ee58ab8a64812cc4ae1ea20673598a33f8056af415a03f81cd87c862ee77"} Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.868597 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-9hsxt" event={"ID":"47f2883c-cf0b-4774-96ea-6a6e6ec8f335","Type":"ContainerDied","Data":"d78c3ac14445a4662892ca5f4f855209277fb8984eba8f8ed9920f5f0235406d"} Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.868657 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d78c3ac14445a4662892ca5f4f855209277fb8984eba8f8ed9920f5f0235406d" Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.868738 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-9hsxt" Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.877112 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-98e3-account-create-update-7zvxp" event={"ID":"a4587953-b9d4-470c-aaca-3cf9f80c8961","Type":"ContainerDied","Data":"96ddad987f1eb06c9b9f2a3e9d93d1a8981cee027bbb51352b0dd14033cea86a"} Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.877163 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-98e3-account-create-update-7zvxp" Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.877177 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="96ddad987f1eb06c9b9f2a3e9d93d1a8981cee027bbb51352b0dd14033cea86a" Jan 22 06:05:02 crc kubenswrapper[4933]: I0122 06:05:02.898172 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-zstt6" podStartSLOduration=2.476905225 podStartE2EDuration="7.898154348s" podCreationTimestamp="2026-01-22 06:04:55 +0000 UTC" firstStartedPulling="2026-01-22 06:04:56.978221303 +0000 UTC m=+1144.815346656" lastFinishedPulling="2026-01-22 06:05:02.399470426 +0000 UTC m=+1150.236595779" observedRunningTime="2026-01-22 06:05:02.895869622 +0000 UTC m=+1150.732994985" watchObservedRunningTime="2026-01-22 06:05:02.898154348 +0000 UTC m=+1150.735279701" Jan 22 06:05:05 crc kubenswrapper[4933]: I0122 06:05:05.911524 4933 generic.go:334] "Generic (PLEG): container finished" podID="17180ad8-5646-46f7-ad5a-75608d596672" containerID="3917ee58ab8a64812cc4ae1ea20673598a33f8056af415a03f81cd87c862ee77" exitCode=0 Jan 22 06:05:05 crc kubenswrapper[4933]: I0122 06:05:05.911599 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-zstt6" event={"ID":"17180ad8-5646-46f7-ad5a-75608d596672","Type":"ContainerDied","Data":"3917ee58ab8a64812cc4ae1ea20673598a33f8056af415a03f81cd87c862ee77"} Jan 22 06:05:07 crc kubenswrapper[4933]: I0122 06:05:07.318731 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-zstt6" Jan 22 06:05:07 crc kubenswrapper[4933]: I0122 06:05:07.373276 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17180ad8-5646-46f7-ad5a-75608d596672-combined-ca-bundle\") pod \"17180ad8-5646-46f7-ad5a-75608d596672\" (UID: \"17180ad8-5646-46f7-ad5a-75608d596672\") " Jan 22 06:05:07 crc kubenswrapper[4933]: I0122 06:05:07.373365 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2q9h5\" (UniqueName: \"kubernetes.io/projected/17180ad8-5646-46f7-ad5a-75608d596672-kube-api-access-2q9h5\") pod \"17180ad8-5646-46f7-ad5a-75608d596672\" (UID: \"17180ad8-5646-46f7-ad5a-75608d596672\") " Jan 22 06:05:07 crc kubenswrapper[4933]: I0122 06:05:07.373447 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17180ad8-5646-46f7-ad5a-75608d596672-config-data\") pod \"17180ad8-5646-46f7-ad5a-75608d596672\" (UID: \"17180ad8-5646-46f7-ad5a-75608d596672\") " Jan 22 06:05:07 crc kubenswrapper[4933]: I0122 06:05:07.383254 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17180ad8-5646-46f7-ad5a-75608d596672-kube-api-access-2q9h5" (OuterVolumeSpecName: "kube-api-access-2q9h5") pod "17180ad8-5646-46f7-ad5a-75608d596672" (UID: "17180ad8-5646-46f7-ad5a-75608d596672"). InnerVolumeSpecName "kube-api-access-2q9h5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:05:07 crc kubenswrapper[4933]: I0122 06:05:07.412810 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17180ad8-5646-46f7-ad5a-75608d596672-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "17180ad8-5646-46f7-ad5a-75608d596672" (UID: "17180ad8-5646-46f7-ad5a-75608d596672"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:07 crc kubenswrapper[4933]: I0122 06:05:07.452448 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17180ad8-5646-46f7-ad5a-75608d596672-config-data" (OuterVolumeSpecName: "config-data") pod "17180ad8-5646-46f7-ad5a-75608d596672" (UID: "17180ad8-5646-46f7-ad5a-75608d596672"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:07 crc kubenswrapper[4933]: I0122 06:05:07.475551 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17180ad8-5646-46f7-ad5a-75608d596672-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:07 crc kubenswrapper[4933]: I0122 06:05:07.475587 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2q9h5\" (UniqueName: \"kubernetes.io/projected/17180ad8-5646-46f7-ad5a-75608d596672-kube-api-access-2q9h5\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:07 crc kubenswrapper[4933]: I0122 06:05:07.475599 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17180ad8-5646-46f7-ad5a-75608d596672-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:07 crc kubenswrapper[4933]: I0122 06:05:07.505138 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:05:07 crc kubenswrapper[4933]: I0122 06:05:07.573355 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6cb545bd4c-r4w7m"] Jan 22 06:05:07 crc kubenswrapper[4933]: I0122 06:05:07.573681 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" podUID="a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852" containerName="dnsmasq-dns" containerID="cri-o://7392029f2f82ccea4bcf90f56fd558047283f0e7a8c30fc09dbe263b2c626149" gracePeriod=10 Jan 22 06:05:07 crc kubenswrapper[4933]: I0122 06:05:07.934842 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-zstt6" event={"ID":"17180ad8-5646-46f7-ad5a-75608d596672","Type":"ContainerDied","Data":"e9634ecc7b8680b07959fd9a67015adcf05f0b437bdb3730ac1908e05c268a20"} Jan 22 06:05:07 crc kubenswrapper[4933]: I0122 06:05:07.935171 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e9634ecc7b8680b07959fd9a67015adcf05f0b437bdb3730ac1908e05c268a20" Jan 22 06:05:07 crc kubenswrapper[4933]: I0122 06:05:07.935228 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-zstt6" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.124309 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-54b4bb76d5-lrr92"] Jan 22 06:05:08 crc kubenswrapper[4933]: E0122 06:05:08.126637 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17180ad8-5646-46f7-ad5a-75608d596672" containerName="keystone-db-sync" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.126668 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="17180ad8-5646-46f7-ad5a-75608d596672" containerName="keystone-db-sync" Jan 22 06:05:08 crc kubenswrapper[4933]: E0122 06:05:08.126684 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49b3b5e7-c5e1-4a79-90a2-339b4f07f585" containerName="mariadb-database-create" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.126690 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="49b3b5e7-c5e1-4a79-90a2-339b4f07f585" containerName="mariadb-database-create" Jan 22 06:05:08 crc kubenswrapper[4933]: E0122 06:05:08.126705 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4587953-b9d4-470c-aaca-3cf9f80c8961" containerName="mariadb-account-create-update" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.126711 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4587953-b9d4-470c-aaca-3cf9f80c8961" containerName="mariadb-account-create-update" Jan 22 06:05:08 crc kubenswrapper[4933]: E0122 06:05:08.126733 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47f2883c-cf0b-4774-96ea-6a6e6ec8f335" containerName="mariadb-database-create" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.126739 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="47f2883c-cf0b-4774-96ea-6a6e6ec8f335" containerName="mariadb-database-create" Jan 22 06:05:08 crc kubenswrapper[4933]: E0122 06:05:08.126757 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b8bd705-8ba6-471f-8ed4-dbbd18816c6e" containerName="mariadb-account-create-update" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.126772 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b8bd705-8ba6-471f-8ed4-dbbd18816c6e" containerName="mariadb-account-create-update" Jan 22 06:05:08 crc kubenswrapper[4933]: E0122 06:05:08.126784 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0235a4fc-a22c-493c-95a8-7b90423eab40" containerName="mariadb-database-create" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.126791 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="0235a4fc-a22c-493c-95a8-7b90423eab40" containerName="mariadb-database-create" Jan 22 06:05:08 crc kubenswrapper[4933]: E0122 06:05:08.126801 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b3a9bf8-9baa-4b5a-a321-1bff747279fd" containerName="mariadb-account-create-update" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.126807 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b3a9bf8-9baa-4b5a-a321-1bff747279fd" containerName="mariadb-account-create-update" Jan 22 06:05:08 crc kubenswrapper[4933]: E0122 06:05:08.126815 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81937a04-32b8-4174-8e1c-bf16caabedd7" containerName="init" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.126821 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="81937a04-32b8-4174-8e1c-bf16caabedd7" containerName="init" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.126986 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b8bd705-8ba6-471f-8ed4-dbbd18816c6e" containerName="mariadb-account-create-update" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.127004 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="49b3b5e7-c5e1-4a79-90a2-339b4f07f585" containerName="mariadb-database-create" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.127015 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b3a9bf8-9baa-4b5a-a321-1bff747279fd" containerName="mariadb-account-create-update" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.127030 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4587953-b9d4-470c-aaca-3cf9f80c8961" containerName="mariadb-account-create-update" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.127037 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="47f2883c-cf0b-4774-96ea-6a6e6ec8f335" containerName="mariadb-database-create" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.127048 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="0235a4fc-a22c-493c-95a8-7b90423eab40" containerName="mariadb-database-create" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.127063 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="81937a04-32b8-4174-8e1c-bf16caabedd7" containerName="init" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.127085 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="17180ad8-5646-46f7-ad5a-75608d596672" containerName="keystone-db-sync" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.128041 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.144827 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54b4bb76d5-lrr92"] Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.187882 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-pb2k9"] Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.194540 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-config\") pod \"dnsmasq-dns-54b4bb76d5-lrr92\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.194733 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-dns-svc\") pod \"dnsmasq-dns-54b4bb76d5-lrr92\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.194865 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-ovsdbserver-nb\") pod \"dnsmasq-dns-54b4bb76d5-lrr92\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.194976 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdfsg\" (UniqueName: \"kubernetes.io/projected/e9455ccf-cb44-4637-90e2-c48092ac7e20-kube-api-access-pdfsg\") pod \"dnsmasq-dns-54b4bb76d5-lrr92\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.197441 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-ovsdbserver-sb\") pod \"dnsmasq-dns-54b4bb76d5-lrr92\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.197624 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-dns-swift-storage-0\") pod \"dnsmasq-dns-54b4bb76d5-lrr92\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.217111 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.220946 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.221305 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.221388 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.221690 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.221889 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-vftwj" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.258297 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-pb2k9"] Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.298800 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-config-data\") pod \"keystone-bootstrap-pb2k9\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.298832 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-combined-ca-bundle\") pod \"keystone-bootstrap-pb2k9\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.298891 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-ovsdbserver-sb\") pod \"dnsmasq-dns-54b4bb76d5-lrr92\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.298922 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-credential-keys\") pod \"keystone-bootstrap-pb2k9\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.298948 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5nd4\" (UniqueName: \"kubernetes.io/projected/f20351db-9d8c-4b0e-9b28-b7628902dd80-kube-api-access-x5nd4\") pod \"keystone-bootstrap-pb2k9\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.298976 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-scripts\") pod \"keystone-bootstrap-pb2k9\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.298995 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-dns-swift-storage-0\") pod \"dnsmasq-dns-54b4bb76d5-lrr92\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.299029 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-config\") pod \"dnsmasq-dns-54b4bb76d5-lrr92\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.299054 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-dns-svc\") pod \"dnsmasq-dns-54b4bb76d5-lrr92\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.299067 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-ovsdbserver-nb\") pod \"dnsmasq-dns-54b4bb76d5-lrr92\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.299104 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdfsg\" (UniqueName: \"kubernetes.io/projected/e9455ccf-cb44-4637-90e2-c48092ac7e20-kube-api-access-pdfsg\") pod \"dnsmasq-dns-54b4bb76d5-lrr92\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.299137 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-fernet-keys\") pod \"keystone-bootstrap-pb2k9\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.300003 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-ovsdbserver-sb\") pod \"dnsmasq-dns-54b4bb76d5-lrr92\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.300548 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-dns-swift-storage-0\") pod \"dnsmasq-dns-54b4bb76d5-lrr92\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.303221 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-config\") pod \"dnsmasq-dns-54b4bb76d5-lrr92\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.303949 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-ovsdbserver-nb\") pod \"dnsmasq-dns-54b4bb76d5-lrr92\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.306221 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-dns-svc\") pod \"dnsmasq-dns-54b4bb76d5-lrr92\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.352936 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdfsg\" (UniqueName: \"kubernetes.io/projected/e9455ccf-cb44-4637-90e2-c48092ac7e20-kube-api-access-pdfsg\") pod \"dnsmasq-dns-54b4bb76d5-lrr92\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.400334 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-credential-keys\") pod \"keystone-bootstrap-pb2k9\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.400401 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5nd4\" (UniqueName: \"kubernetes.io/projected/f20351db-9d8c-4b0e-9b28-b7628902dd80-kube-api-access-x5nd4\") pod \"keystone-bootstrap-pb2k9\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.400441 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-scripts\") pod \"keystone-bootstrap-pb2k9\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.400515 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-fernet-keys\") pod \"keystone-bootstrap-pb2k9\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.400549 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-config-data\") pod \"keystone-bootstrap-pb2k9\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.400573 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-combined-ca-bundle\") pod \"keystone-bootstrap-pb2k9\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.417769 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-scripts\") pod \"keystone-bootstrap-pb2k9\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.418236 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-credential-keys\") pod \"keystone-bootstrap-pb2k9\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.418784 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-config-data\") pod \"keystone-bootstrap-pb2k9\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.423888 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-combined-ca-bundle\") pod \"keystone-bootstrap-pb2k9\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.439376 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-fernet-keys\") pod \"keystone-bootstrap-pb2k9\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.450314 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-zczgn"] Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.451559 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-zczgn" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.457598 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-gztxr" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.457821 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.458000 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.462929 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-92ghq"] Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.464174 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.473387 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-zczgn"] Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.477018 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-m2xwl" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.477173 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.477377 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.477874 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5nd4\" (UniqueName: \"kubernetes.io/projected/f20351db-9d8c-4b0e-9b28-b7628902dd80-kube-api-access-x5nd4\") pod \"keystone-bootstrap-pb2k9\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.511500 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-combined-ca-bundle\") pod \"cinder-db-sync-92ghq\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.511542 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhqfd\" (UniqueName: \"kubernetes.io/projected/112a4ef5-b86f-4258-84db-bef5e66f9674-kube-api-access-fhqfd\") pod \"neutron-db-sync-zczgn\" (UID: \"112a4ef5-b86f-4258-84db-bef5e66f9674\") " pod="openstack/neutron-db-sync-zczgn" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.511578 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-db-sync-config-data\") pod \"cinder-db-sync-92ghq\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.511608 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-scripts\") pod \"cinder-db-sync-92ghq\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.511643 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-config-data\") pod \"cinder-db-sync-92ghq\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.511664 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/112a4ef5-b86f-4258-84db-bef5e66f9674-combined-ca-bundle\") pod \"neutron-db-sync-zczgn\" (UID: \"112a4ef5-b86f-4258-84db-bef5e66f9674\") " pod="openstack/neutron-db-sync-zczgn" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.511686 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b88r9\" (UniqueName: \"kubernetes.io/projected/1086bd39-4637-4123-a7b2-d85d3a603dd5-kube-api-access-b88r9\") pod \"cinder-db-sync-92ghq\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.511707 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1086bd39-4637-4123-a7b2-d85d3a603dd5-etc-machine-id\") pod \"cinder-db-sync-92ghq\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.511759 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/112a4ef5-b86f-4258-84db-bef5e66f9674-config\") pod \"neutron-db-sync-zczgn\" (UID: \"112a4ef5-b86f-4258-84db-bef5e66f9674\") " pod="openstack/neutron-db-sync-zczgn" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.514061 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-92ghq"] Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.514168 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.515988 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.511614 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.533982 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.534023 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.537060 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-nkj4c"] Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.543799 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-nkj4c" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.552962 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.553209 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.553415 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-xrjz2" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.566148 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.604966 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-nkj4c"] Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.613696 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/913fa4df-79e3-40d8-8218-a869383e2a25-combined-ca-bundle\") pod \"placement-db-sync-nkj4c\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " pod="openstack/placement-db-sync-nkj4c" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.614683 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-combined-ca-bundle\") pod \"cinder-db-sync-92ghq\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.614793 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhqfd\" (UniqueName: \"kubernetes.io/projected/112a4ef5-b86f-4258-84db-bef5e66f9674-kube-api-access-fhqfd\") pod \"neutron-db-sync-zczgn\" (UID: \"112a4ef5-b86f-4258-84db-bef5e66f9674\") " pod="openstack/neutron-db-sync-zczgn" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.614880 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/913fa4df-79e3-40d8-8218-a869383e2a25-logs\") pod \"placement-db-sync-nkj4c\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " pod="openstack/placement-db-sync-nkj4c" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.614954 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-scripts\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.615022 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-db-sync-config-data\") pod \"cinder-db-sync-92ghq\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.615135 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/913fa4df-79e3-40d8-8218-a869383e2a25-scripts\") pod \"placement-db-sync-nkj4c\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " pod="openstack/placement-db-sync-nkj4c" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.615214 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-scripts\") pod \"cinder-db-sync-92ghq\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.615313 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-config-data\") pod \"cinder-db-sync-92ghq\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.615386 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.615455 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwtpf\" (UniqueName: \"kubernetes.io/projected/a7d72c18-1516-44a5-ad92-f367c93280b1-kube-api-access-hwtpf\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.615525 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/112a4ef5-b86f-4258-84db-bef5e66f9674-combined-ca-bundle\") pod \"neutron-db-sync-zczgn\" (UID: \"112a4ef5-b86f-4258-84db-bef5e66f9674\") " pod="openstack/neutron-db-sync-zczgn" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.615593 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/913fa4df-79e3-40d8-8218-a869383e2a25-config-data\") pod \"placement-db-sync-nkj4c\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " pod="openstack/placement-db-sync-nkj4c" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.615658 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-config-data\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.615733 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b88r9\" (UniqueName: \"kubernetes.io/projected/1086bd39-4637-4123-a7b2-d85d3a603dd5-kube-api-access-b88r9\") pod \"cinder-db-sync-92ghq\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.615810 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7d72c18-1516-44a5-ad92-f367c93280b1-log-httpd\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.615883 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1086bd39-4637-4123-a7b2-d85d3a603dd5-etc-machine-id\") pod \"cinder-db-sync-92ghq\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.615980 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.616054 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7d72c18-1516-44a5-ad92-f367c93280b1-run-httpd\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.616149 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2kv7\" (UniqueName: \"kubernetes.io/projected/913fa4df-79e3-40d8-8218-a869383e2a25-kube-api-access-w2kv7\") pod \"placement-db-sync-nkj4c\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " pod="openstack/placement-db-sync-nkj4c" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.616244 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/112a4ef5-b86f-4258-84db-bef5e66f9674-config\") pod \"neutron-db-sync-zczgn\" (UID: \"112a4ef5-b86f-4258-84db-bef5e66f9674\") " pod="openstack/neutron-db-sync-zczgn" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.617524 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.621877 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/112a4ef5-b86f-4258-84db-bef5e66f9674-combined-ca-bundle\") pod \"neutron-db-sync-zczgn\" (UID: \"112a4ef5-b86f-4258-84db-bef5e66f9674\") " pod="openstack/neutron-db-sync-zczgn" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.624604 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1086bd39-4637-4123-a7b2-d85d3a603dd5-etc-machine-id\") pod \"cinder-db-sync-92ghq\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.628245 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/112a4ef5-b86f-4258-84db-bef5e66f9674-config\") pod \"neutron-db-sync-zczgn\" (UID: \"112a4ef5-b86f-4258-84db-bef5e66f9674\") " pod="openstack/neutron-db-sync-zczgn" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.631162 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-gwqkb"] Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.632602 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-gwqkb" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.645768 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54b4bb76d5-lrr92"] Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.652221 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-gwqkb"] Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.661148 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.661400 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-5lkk6" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.668714 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-combined-ca-bundle\") pod \"cinder-db-sync-92ghq\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.670495 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-db-sync-config-data\") pod \"cinder-db-sync-92ghq\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.676323 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-config-data\") pod \"cinder-db-sync-92ghq\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.679639 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-scripts\") pod \"cinder-db-sync-92ghq\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.684652 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhqfd\" (UniqueName: \"kubernetes.io/projected/112a4ef5-b86f-4258-84db-bef5e66f9674-kube-api-access-fhqfd\") pod \"neutron-db-sync-zczgn\" (UID: \"112a4ef5-b86f-4258-84db-bef5e66f9674\") " pod="openstack/neutron-db-sync-zczgn" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.696840 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b88r9\" (UniqueName: \"kubernetes.io/projected/1086bd39-4637-4123-a7b2-d85d3a603dd5-kube-api-access-b88r9\") pod \"cinder-db-sync-92ghq\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.712164 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5dc4fcdbc-8rcfp"] Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.713614 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.717368 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-config-data\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.717413 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7d72c18-1516-44a5-ad92-f367c93280b1-log-httpd\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.717445 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6ae84cf-ec9a-42e4-9c55-035d9accb4b2-combined-ca-bundle\") pod \"barbican-db-sync-gwqkb\" (UID: \"c6ae84cf-ec9a-42e4-9c55-035d9accb4b2\") " pod="openstack/barbican-db-sync-gwqkb" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.717469 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.717487 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7d72c18-1516-44a5-ad92-f367c93280b1-run-httpd\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.717508 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c6ae84cf-ec9a-42e4-9c55-035d9accb4b2-db-sync-config-data\") pod \"barbican-db-sync-gwqkb\" (UID: \"c6ae84cf-ec9a-42e4-9c55-035d9accb4b2\") " pod="openstack/barbican-db-sync-gwqkb" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.717525 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2kv7\" (UniqueName: \"kubernetes.io/projected/913fa4df-79e3-40d8-8218-a869383e2a25-kube-api-access-w2kv7\") pod \"placement-db-sync-nkj4c\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " pod="openstack/placement-db-sync-nkj4c" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.717569 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/913fa4df-79e3-40d8-8218-a869383e2a25-combined-ca-bundle\") pod \"placement-db-sync-nkj4c\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " pod="openstack/placement-db-sync-nkj4c" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.717611 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/913fa4df-79e3-40d8-8218-a869383e2a25-logs\") pod \"placement-db-sync-nkj4c\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " pod="openstack/placement-db-sync-nkj4c" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.717629 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-scripts\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.717654 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/913fa4df-79e3-40d8-8218-a869383e2a25-scripts\") pod \"placement-db-sync-nkj4c\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " pod="openstack/placement-db-sync-nkj4c" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.717672 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9n4lp\" (UniqueName: \"kubernetes.io/projected/c6ae84cf-ec9a-42e4-9c55-035d9accb4b2-kube-api-access-9n4lp\") pod \"barbican-db-sync-gwqkb\" (UID: \"c6ae84cf-ec9a-42e4-9c55-035d9accb4b2\") " pod="openstack/barbican-db-sync-gwqkb" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.717713 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.717729 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwtpf\" (UniqueName: \"kubernetes.io/projected/a7d72c18-1516-44a5-ad92-f367c93280b1-kube-api-access-hwtpf\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.717763 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/913fa4df-79e3-40d8-8218-a869383e2a25-config-data\") pod \"placement-db-sync-nkj4c\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " pod="openstack/placement-db-sync-nkj4c" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.718212 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/913fa4df-79e3-40d8-8218-a869383e2a25-logs\") pod \"placement-db-sync-nkj4c\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " pod="openstack/placement-db-sync-nkj4c" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.719244 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7d72c18-1516-44a5-ad92-f367c93280b1-log-httpd\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.720965 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7d72c18-1516-44a5-ad92-f367c93280b1-run-httpd\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.722276 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-scripts\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.722513 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-config-data\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.723443 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/913fa4df-79e3-40d8-8218-a869383e2a25-config-data\") pod \"placement-db-sync-nkj4c\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " pod="openstack/placement-db-sync-nkj4c" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.725666 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.729547 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/913fa4df-79e3-40d8-8218-a869383e2a25-combined-ca-bundle\") pod \"placement-db-sync-nkj4c\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " pod="openstack/placement-db-sync-nkj4c" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.731574 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/913fa4df-79e3-40d8-8218-a869383e2a25-scripts\") pod \"placement-db-sync-nkj4c\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " pod="openstack/placement-db-sync-nkj4c" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.744711 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.756490 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwtpf\" (UniqueName: \"kubernetes.io/projected/a7d72c18-1516-44a5-ad92-f367c93280b1-kube-api-access-hwtpf\") pod \"ceilometer-0\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.763448 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5dc4fcdbc-8rcfp"] Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.775673 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2kv7\" (UniqueName: \"kubernetes.io/projected/913fa4df-79e3-40d8-8218-a869383e2a25-kube-api-access-w2kv7\") pod \"placement-db-sync-nkj4c\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " pod="openstack/placement-db-sync-nkj4c" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.819176 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c6ae84cf-ec9a-42e4-9c55-035d9accb4b2-db-sync-config-data\") pod \"barbican-db-sync-gwqkb\" (UID: \"c6ae84cf-ec9a-42e4-9c55-035d9accb4b2\") " pod="openstack/barbican-db-sync-gwqkb" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.819231 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-ovsdbserver-nb\") pod \"dnsmasq-dns-5dc4fcdbc-8rcfp\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.819301 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-dns-swift-storage-0\") pod \"dnsmasq-dns-5dc4fcdbc-8rcfp\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.819323 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-dns-svc\") pod \"dnsmasq-dns-5dc4fcdbc-8rcfp\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.819451 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbgh2\" (UniqueName: \"kubernetes.io/projected/db9370fd-7e89-47ab-8238-7d24abeb981f-kube-api-access-bbgh2\") pod \"dnsmasq-dns-5dc4fcdbc-8rcfp\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.819475 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-config\") pod \"dnsmasq-dns-5dc4fcdbc-8rcfp\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.819503 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9n4lp\" (UniqueName: \"kubernetes.io/projected/c6ae84cf-ec9a-42e4-9c55-035d9accb4b2-kube-api-access-9n4lp\") pod \"barbican-db-sync-gwqkb\" (UID: \"c6ae84cf-ec9a-42e4-9c55-035d9accb4b2\") " pod="openstack/barbican-db-sync-gwqkb" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.819531 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-ovsdbserver-sb\") pod \"dnsmasq-dns-5dc4fcdbc-8rcfp\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.819582 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6ae84cf-ec9a-42e4-9c55-035d9accb4b2-combined-ca-bundle\") pod \"barbican-db-sync-gwqkb\" (UID: \"c6ae84cf-ec9a-42e4-9c55-035d9accb4b2\") " pod="openstack/barbican-db-sync-gwqkb" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.824662 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c6ae84cf-ec9a-42e4-9c55-035d9accb4b2-db-sync-config-data\") pod \"barbican-db-sync-gwqkb\" (UID: \"c6ae84cf-ec9a-42e4-9c55-035d9accb4b2\") " pod="openstack/barbican-db-sync-gwqkb" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.827635 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6ae84cf-ec9a-42e4-9c55-035d9accb4b2-combined-ca-bundle\") pod \"barbican-db-sync-gwqkb\" (UID: \"c6ae84cf-ec9a-42e4-9c55-035d9accb4b2\") " pod="openstack/barbican-db-sync-gwqkb" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.863903 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9n4lp\" (UniqueName: \"kubernetes.io/projected/c6ae84cf-ec9a-42e4-9c55-035d9accb4b2-kube-api-access-9n4lp\") pod \"barbican-db-sync-gwqkb\" (UID: \"c6ae84cf-ec9a-42e4-9c55-035d9accb4b2\") " pod="openstack/barbican-db-sync-gwqkb" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.905367 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-zczgn" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.921897 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-ovsdbserver-nb\") pod \"dnsmasq-dns-5dc4fcdbc-8rcfp\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.921963 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-dns-swift-storage-0\") pod \"dnsmasq-dns-5dc4fcdbc-8rcfp\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.921988 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-dns-svc\") pod \"dnsmasq-dns-5dc4fcdbc-8rcfp\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.922020 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbgh2\" (UniqueName: \"kubernetes.io/projected/db9370fd-7e89-47ab-8238-7d24abeb981f-kube-api-access-bbgh2\") pod \"dnsmasq-dns-5dc4fcdbc-8rcfp\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.922040 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-config\") pod \"dnsmasq-dns-5dc4fcdbc-8rcfp\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.922069 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-ovsdbserver-sb\") pod \"dnsmasq-dns-5dc4fcdbc-8rcfp\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.922900 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-ovsdbserver-sb\") pod \"dnsmasq-dns-5dc4fcdbc-8rcfp\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.923458 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-ovsdbserver-nb\") pod \"dnsmasq-dns-5dc4fcdbc-8rcfp\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.923968 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-dns-svc\") pod \"dnsmasq-dns-5dc4fcdbc-8rcfp\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.924505 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-dns-swift-storage-0\") pod \"dnsmasq-dns-5dc4fcdbc-8rcfp\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.925056 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-config\") pod \"dnsmasq-dns-5dc4fcdbc-8rcfp\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.941926 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbgh2\" (UniqueName: \"kubernetes.io/projected/db9370fd-7e89-47ab-8238-7d24abeb981f-kube-api-access-bbgh2\") pod \"dnsmasq-dns-5dc4fcdbc-8rcfp\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.957970 4933 generic.go:334] "Generic (PLEG): container finished" podID="a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852" containerID="7392029f2f82ccea4bcf90f56fd558047283f0e7a8c30fc09dbe263b2c626149" exitCode=0 Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.958033 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" event={"ID":"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852","Type":"ContainerDied","Data":"7392029f2f82ccea4bcf90f56fd558047283f0e7a8c30fc09dbe263b2c626149"} Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.962832 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-92ghq" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.965686 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.977786 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-nkj4c" Jan 22 06:05:08 crc kubenswrapper[4933]: I0122 06:05:08.990317 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-gwqkb" Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.036638 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.205789 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54b4bb76d5-lrr92"] Jan 22 06:05:09 crc kubenswrapper[4933]: W0122 06:05:09.246514 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode9455ccf_cb44_4637_90e2_c48092ac7e20.slice/crio-291eb253eb9f89eea21fcdd2951ed90f7b96eab0e18ef14bc6d186b5c4aceda9 WatchSource:0}: Error finding container 291eb253eb9f89eea21fcdd2951ed90f7b96eab0e18ef14bc6d186b5c4aceda9: Status 404 returned error can't find the container with id 291eb253eb9f89eea21fcdd2951ed90f7b96eab0e18ef14bc6d186b5c4aceda9 Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.332585 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-pb2k9"] Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.353896 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.355803 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.360422 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.361292 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.361651 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-flfkl" Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.364236 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.377153 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.435465 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.437008 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.441057 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.445000 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-scripts\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.445043 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9r92\" (UniqueName: \"kubernetes.io/projected/52975e6a-56b7-4d58-952c-feb45cd89939-kube-api-access-x9r92\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.445083 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/52975e6a-56b7-4d58-952c-feb45cd89939-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.445108 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52975e6a-56b7-4d58-952c-feb45cd89939-logs\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.445122 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-config-data\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.445152 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.445170 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.445521 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.446413 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 22 06:05:09 crc kubenswrapper[4933]: I0122 06:05:09.460525 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.546913 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.547166 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-scripts\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.547258 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-scripts\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.547333 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9r92\" (UniqueName: \"kubernetes.io/projected/52975e6a-56b7-4d58-952c-feb45cd89939-kube-api-access-x9r92\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.547409 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/52975e6a-56b7-4d58-952c-feb45cd89939-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.547450 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52975e6a-56b7-4d58-952c-feb45cd89939-logs\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.547475 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-config-data\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.547539 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5b708132-acea-4f3a-913b-fc2a351d82df-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.547578 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.547600 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b708132-acea-4f3a-913b-fc2a351d82df-logs\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.547633 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.547697 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.547777 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5tnw\" (UniqueName: \"kubernetes.io/projected/5b708132-acea-4f3a-913b-fc2a351d82df-kube-api-access-q5tnw\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.547875 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-config-data\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.547909 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.547983 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.550989 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/52975e6a-56b7-4d58-952c-feb45cd89939-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.551933 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-zczgn"] Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.552238 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52975e6a-56b7-4d58-952c-feb45cd89939-logs\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.555304 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-config-data\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.555715 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-scripts\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.558620 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.558879 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.572664 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.572866 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9r92\" (UniqueName: \"kubernetes.io/projected/52975e6a-56b7-4d58-952c-feb45cd89939-kube-api-access-x9r92\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.621556 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.648728 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5b708132-acea-4f3a-913b-fc2a351d82df-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.649065 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b708132-acea-4f3a-913b-fc2a351d82df-logs\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.649168 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5tnw\" (UniqueName: \"kubernetes.io/projected/5b708132-acea-4f3a-913b-fc2a351d82df-kube-api-access-q5tnw\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.649195 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-config-data\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.649213 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.649242 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.649269 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.649305 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-scripts\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.649333 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5b708132-acea-4f3a-913b-fc2a351d82df-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.649513 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b708132-acea-4f3a-913b-fc2a351d82df-logs\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.649808 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.653002 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-scripts\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.658444 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-config-data\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.658584 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.658751 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.668729 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5tnw\" (UniqueName: \"kubernetes.io/projected/5b708132-acea-4f3a-913b-fc2a351d82df-kube-api-access-q5tnw\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.683770 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.791858 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.817848 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.975185 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-zczgn" event={"ID":"112a4ef5-b86f-4258-84db-bef5e66f9674","Type":"ContainerStarted","Data":"2925016a4526e711c7a13ca9b27ae95ab14072a5756f21964d4b6cb4c8838060"} Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.975569 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-zczgn" event={"ID":"112a4ef5-b86f-4258-84db-bef5e66f9674","Type":"ContainerStarted","Data":"f8e055833cd2123eb22082e60be88e963d3bad64f12dab520fd937d46612b8ca"} Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.983475 4933 generic.go:334] "Generic (PLEG): container finished" podID="e9455ccf-cb44-4637-90e2-c48092ac7e20" containerID="1ee0666f6fcf42337f0317ead1f7eae37dfd7479040c9eaa55cc4e3f5ac7bb4f" exitCode=0 Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.983553 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" event={"ID":"e9455ccf-cb44-4637-90e2-c48092ac7e20","Type":"ContainerDied","Data":"1ee0666f6fcf42337f0317ead1f7eae37dfd7479040c9eaa55cc4e3f5ac7bb4f"} Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.983583 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" event={"ID":"e9455ccf-cb44-4637-90e2-c48092ac7e20","Type":"ContainerStarted","Data":"291eb253eb9f89eea21fcdd2951ed90f7b96eab0e18ef14bc6d186b5c4aceda9"} Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:09.994867 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-zczgn" podStartSLOduration=1.994851089 podStartE2EDuration="1.994851089s" podCreationTimestamp="2026-01-22 06:05:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:05:09.991280889 +0000 UTC m=+1157.828406242" watchObservedRunningTime="2026-01-22 06:05:09.994851089 +0000 UTC m=+1157.831976442" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.004748 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pb2k9" event={"ID":"f20351db-9d8c-4b0e-9b28-b7628902dd80","Type":"ContainerStarted","Data":"6407112093f3e5136e3386e9fa21f2e430a1ab957e3150228ee38dbdebba2e14"} Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.004784 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pb2k9" event={"ID":"f20351db-9d8c-4b0e-9b28-b7628902dd80","Type":"ContainerStarted","Data":"823a8a532f5738dd04885d2a930e51cd50496e8a60ddf70f13562ca23c38e076"} Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.048351 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-pb2k9" podStartSLOduration=2.048329227 podStartE2EDuration="2.048329227s" podCreationTimestamp="2026-01-22 06:05:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:05:10.042091751 +0000 UTC m=+1157.879217124" watchObservedRunningTime="2026-01-22 06:05:10.048329227 +0000 UTC m=+1157.885454580" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.308424 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-nkj4c"] Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.314238 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-92ghq"] Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.321204 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.725072 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5dc4fcdbc-8rcfp"] Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.743501 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.795676 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.833108 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-ovsdbserver-sb\") pod \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.833260 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-dns-swift-storage-0\") pod \"e9455ccf-cb44-4637-90e2-c48092ac7e20\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.833311 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-ovsdbserver-nb\") pod \"e9455ccf-cb44-4637-90e2-c48092ac7e20\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.833333 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pdfsg\" (UniqueName: \"kubernetes.io/projected/e9455ccf-cb44-4637-90e2-c48092ac7e20-kube-api-access-pdfsg\") pod \"e9455ccf-cb44-4637-90e2-c48092ac7e20\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.833378 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thbl5\" (UniqueName: \"kubernetes.io/projected/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-kube-api-access-thbl5\") pod \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.833414 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-config\") pod \"e9455ccf-cb44-4637-90e2-c48092ac7e20\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.833453 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-dns-svc\") pod \"e9455ccf-cb44-4637-90e2-c48092ac7e20\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.833486 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-ovsdbserver-nb\") pod \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.833501 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-dns-svc\") pod \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.833538 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-ovsdbserver-sb\") pod \"e9455ccf-cb44-4637-90e2-c48092ac7e20\" (UID: \"e9455ccf-cb44-4637-90e2-c48092ac7e20\") " Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.833574 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-config\") pod \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\" (UID: \"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852\") " Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.865280 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9455ccf-cb44-4637-90e2-c48092ac7e20-kube-api-access-pdfsg" (OuterVolumeSpecName: "kube-api-access-pdfsg") pod "e9455ccf-cb44-4637-90e2-c48092ac7e20" (UID: "e9455ccf-cb44-4637-90e2-c48092ac7e20"). InnerVolumeSpecName "kube-api-access-pdfsg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.879482 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-kube-api-access-thbl5" (OuterVolumeSpecName: "kube-api-access-thbl5") pod "a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852" (UID: "a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852"). InnerVolumeSpecName "kube-api-access-thbl5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.894239 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-gwqkb"] Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.939161 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pdfsg\" (UniqueName: \"kubernetes.io/projected/e9455ccf-cb44-4637-90e2-c48092ac7e20-kube-api-access-pdfsg\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.939189 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thbl5\" (UniqueName: \"kubernetes.io/projected/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-kube-api-access-thbl5\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.965689 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e9455ccf-cb44-4637-90e2-c48092ac7e20" (UID: "e9455ccf-cb44-4637-90e2-c48092ac7e20"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:10 crc kubenswrapper[4933]: I0122 06:05:10.976966 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e9455ccf-cb44-4637-90e2-c48092ac7e20" (UID: "e9455ccf-cb44-4637-90e2-c48092ac7e20"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.002620 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.014508 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e9455ccf-cb44-4637-90e2-c48092ac7e20" (UID: "e9455ccf-cb44-4637-90e2-c48092ac7e20"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.023628 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-config" (OuterVolumeSpecName: "config") pod "e9455ccf-cb44-4637-90e2-c48092ac7e20" (UID: "e9455ccf-cb44-4637-90e2-c48092ac7e20"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.044620 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.044658 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.044669 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.044681 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.056133 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.059026 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e9455ccf-cb44-4637-90e2-c48092ac7e20" (UID: "e9455ccf-cb44-4637-90e2-c48092ac7e20"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.065638 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" event={"ID":"a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852","Type":"ContainerDied","Data":"95de7478a6c34d7821ff8677a21d8dd635741436e44f393713453c39a5dd8a48"} Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.065688 4933 scope.go:117] "RemoveContainer" containerID="7392029f2f82ccea4bcf90f56fd558047283f0e7a8c30fc09dbe263b2c626149" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.065813 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.068718 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-config" (OuterVolumeSpecName: "config") pod "a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852" (UID: "a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.071636 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852" (UID: "a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.079225 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-nkj4c" event={"ID":"913fa4df-79e3-40d8-8218-a869383e2a25","Type":"ContainerStarted","Data":"9b8265d4ed45940a59c0ebc8bb62946288b89fa70176dbdb1b80138ab5dd537b"} Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.094323 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852" (UID: "a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.101956 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" event={"ID":"e9455ccf-cb44-4637-90e2-c48092ac7e20","Type":"ContainerDied","Data":"291eb253eb9f89eea21fcdd2951ed90f7b96eab0e18ef14bc6d186b5c4aceda9"} Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.102058 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54b4bb76d5-lrr92" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.102687 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852" (UID: "a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.110307 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7d72c18-1516-44a5-ad92-f367c93280b1","Type":"ContainerStarted","Data":"ff3e6ec91ffea38b7c3966a2d7f8f278107f99bdd327ca1480363d6b370a90d1"} Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.112684 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.118506 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-gwqkb" event={"ID":"c6ae84cf-ec9a-42e4-9c55-035d9accb4b2","Type":"ContainerStarted","Data":"fb37ed81c499d94bfb8a688b10004edae6292c43b1faa1238ca7be6a0916daf0"} Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.119493 4933 scope.go:117] "RemoveContainer" containerID="dd212c2f54dda27e5adf5806d0bbfdd671706e39d841f72d2ebc94dbe62fba15" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.130710 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-92ghq" event={"ID":"1086bd39-4637-4123-a7b2-d85d3a603dd5","Type":"ContainerStarted","Data":"d142b23612449fcd3d950de4f910d8fe2e0b01d2d660279f5adfea23cdafcc7c"} Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.134976 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.148164 4933 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e9455ccf-cb44-4637-90e2-c48092ac7e20-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.148190 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.148200 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.148208 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.148244 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.155176 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" event={"ID":"db9370fd-7e89-47ab-8238-7d24abeb981f","Type":"ContainerStarted","Data":"8c94eb1e4f9b229976e510ec63a177c75444d8b05c76ca65d8c57488369f3093"} Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.189229 4933 scope.go:117] "RemoveContainer" containerID="1ee0666f6fcf42337f0317ead1f7eae37dfd7479040c9eaa55cc4e3f5ac7bb4f" Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.192128 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54b4bb76d5-lrr92"] Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.200390 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-54b4bb76d5-lrr92"] Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.408150 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6cb545bd4c-r4w7m"] Jan 22 06:05:11 crc kubenswrapper[4933]: I0122 06:05:11.415955 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6cb545bd4c-r4w7m"] Jan 22 06:05:12 crc kubenswrapper[4933]: I0122 06:05:12.016629 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 06:05:12 crc kubenswrapper[4933]: W0122 06:05:12.076651 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5b708132_acea_4f3a_913b_fc2a351d82df.slice/crio-d5aba8e6f82d48be1b9d29ce364ba9e1f2037bdb9ccfae23c64b0dc84905560f WatchSource:0}: Error finding container d5aba8e6f82d48be1b9d29ce364ba9e1f2037bdb9ccfae23c64b0dc84905560f: Status 404 returned error can't find the container with id d5aba8e6f82d48be1b9d29ce364ba9e1f2037bdb9ccfae23c64b0dc84905560f Jan 22 06:05:12 crc kubenswrapper[4933]: I0122 06:05:12.178531 4933 generic.go:334] "Generic (PLEG): container finished" podID="db9370fd-7e89-47ab-8238-7d24abeb981f" containerID="a4f3ce4e811b73a4d05b28e37fa88419dff7ac4f35d8a912a0bf8ee52f0f3503" exitCode=0 Jan 22 06:05:12 crc kubenswrapper[4933]: I0122 06:05:12.178663 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" event={"ID":"db9370fd-7e89-47ab-8238-7d24abeb981f","Type":"ContainerDied","Data":"a4f3ce4e811b73a4d05b28e37fa88419dff7ac4f35d8a912a0bf8ee52f0f3503"} Jan 22 06:05:12 crc kubenswrapper[4933]: I0122 06:05:12.189519 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"52975e6a-56b7-4d58-952c-feb45cd89939","Type":"ContainerStarted","Data":"a3460dc884d70f6bf73627a5f4d172d4b3ca649e2192b95b6b435724720b8a15"} Jan 22 06:05:12 crc kubenswrapper[4933]: I0122 06:05:12.189554 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"52975e6a-56b7-4d58-952c-feb45cd89939","Type":"ContainerStarted","Data":"5f821a7e3e50411fcde57ef38b02fb5d307651d639632ec4ebbd981b7e7da84b"} Jan 22 06:05:12 crc kubenswrapper[4933]: I0122 06:05:12.191636 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5b708132-acea-4f3a-913b-fc2a351d82df","Type":"ContainerStarted","Data":"d5aba8e6f82d48be1b9d29ce364ba9e1f2037bdb9ccfae23c64b0dc84905560f"} Jan 22 06:05:12 crc kubenswrapper[4933]: I0122 06:05:12.506226 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852" path="/var/lib/kubelet/pods/a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852/volumes" Jan 22 06:05:12 crc kubenswrapper[4933]: I0122 06:05:12.506812 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9455ccf-cb44-4637-90e2-c48092ac7e20" path="/var/lib/kubelet/pods/e9455ccf-cb44-4637-90e2-c48092ac7e20/volumes" Jan 22 06:05:13 crc kubenswrapper[4933]: I0122 06:05:13.202143 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5b708132-acea-4f3a-913b-fc2a351d82df","Type":"ContainerStarted","Data":"7562093ead744535cf2797de58f129900d9f3bf9f9519fce745f0fd014e4865f"} Jan 22 06:05:13 crc kubenswrapper[4933]: I0122 06:05:13.204579 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" event={"ID":"db9370fd-7e89-47ab-8238-7d24abeb981f","Type":"ContainerStarted","Data":"8fd13567d632d5099d30b19499d649a020aef880efa93a39cddc58f7ca281d15"} Jan 22 06:05:13 crc kubenswrapper[4933]: I0122 06:05:13.205623 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:13 crc kubenswrapper[4933]: I0122 06:05:13.223163 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" podStartSLOduration=5.223148523 podStartE2EDuration="5.223148523s" podCreationTimestamp="2026-01-22 06:05:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:05:13.221683706 +0000 UTC m=+1161.058809059" watchObservedRunningTime="2026-01-22 06:05:13.223148523 +0000 UTC m=+1161.060273876" Jan 22 06:05:14 crc kubenswrapper[4933]: I0122 06:05:14.218932 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5b708132-acea-4f3a-913b-fc2a351d82df","Type":"ContainerStarted","Data":"80df65b4457a5bafa7f4af6c75524ee0a004eaf90f3e4697d105eb9b7e8936f4"} Jan 22 06:05:14 crc kubenswrapper[4933]: I0122 06:05:14.219026 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="5b708132-acea-4f3a-913b-fc2a351d82df" containerName="glance-log" containerID="cri-o://7562093ead744535cf2797de58f129900d9f3bf9f9519fce745f0fd014e4865f" gracePeriod=30 Jan 22 06:05:14 crc kubenswrapper[4933]: I0122 06:05:14.219046 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="5b708132-acea-4f3a-913b-fc2a351d82df" containerName="glance-httpd" containerID="cri-o://80df65b4457a5bafa7f4af6c75524ee0a004eaf90f3e4697d105eb9b7e8936f4" gracePeriod=30 Jan 22 06:05:14 crc kubenswrapper[4933]: I0122 06:05:14.226658 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="52975e6a-56b7-4d58-952c-feb45cd89939" containerName="glance-log" containerID="cri-o://a3460dc884d70f6bf73627a5f4d172d4b3ca649e2192b95b6b435724720b8a15" gracePeriod=30 Jan 22 06:05:14 crc kubenswrapper[4933]: I0122 06:05:14.226822 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="52975e6a-56b7-4d58-952c-feb45cd89939" containerName="glance-httpd" containerID="cri-o://9407b4f1b2e23d0f3791765c2db0211a95e2ee94beab9f227cc4f14e730d7d4b" gracePeriod=30 Jan 22 06:05:14 crc kubenswrapper[4933]: I0122 06:05:14.226894 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"52975e6a-56b7-4d58-952c-feb45cd89939","Type":"ContainerStarted","Data":"9407b4f1b2e23d0f3791765c2db0211a95e2ee94beab9f227cc4f14e730d7d4b"} Jan 22 06:05:14 crc kubenswrapper[4933]: I0122 06:05:14.249573 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.249557014 podStartE2EDuration="6.249557014s" podCreationTimestamp="2026-01-22 06:05:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:05:14.248694722 +0000 UTC m=+1162.085820075" watchObservedRunningTime="2026-01-22 06:05:14.249557014 +0000 UTC m=+1162.086682367" Jan 22 06:05:14 crc kubenswrapper[4933]: I0122 06:05:14.272398 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.272380385 podStartE2EDuration="6.272380385s" podCreationTimestamp="2026-01-22 06:05:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:05:14.268062317 +0000 UTC m=+1162.105187690" watchObservedRunningTime="2026-01-22 06:05:14.272380385 +0000 UTC m=+1162.109505738" Jan 22 06:05:15 crc kubenswrapper[4933]: I0122 06:05:15.141765 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6cb545bd4c-r4w7m" podUID="a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: i/o timeout" Jan 22 06:05:15 crc kubenswrapper[4933]: I0122 06:05:15.240837 4933 generic.go:334] "Generic (PLEG): container finished" podID="f20351db-9d8c-4b0e-9b28-b7628902dd80" containerID="6407112093f3e5136e3386e9fa21f2e430a1ab957e3150228ee38dbdebba2e14" exitCode=0 Jan 22 06:05:15 crc kubenswrapper[4933]: I0122 06:05:15.240914 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pb2k9" event={"ID":"f20351db-9d8c-4b0e-9b28-b7628902dd80","Type":"ContainerDied","Data":"6407112093f3e5136e3386e9fa21f2e430a1ab957e3150228ee38dbdebba2e14"} Jan 22 06:05:15 crc kubenswrapper[4933]: I0122 06:05:15.245212 4933 generic.go:334] "Generic (PLEG): container finished" podID="52975e6a-56b7-4d58-952c-feb45cd89939" containerID="9407b4f1b2e23d0f3791765c2db0211a95e2ee94beab9f227cc4f14e730d7d4b" exitCode=0 Jan 22 06:05:15 crc kubenswrapper[4933]: I0122 06:05:15.245265 4933 generic.go:334] "Generic (PLEG): container finished" podID="52975e6a-56b7-4d58-952c-feb45cd89939" containerID="a3460dc884d70f6bf73627a5f4d172d4b3ca649e2192b95b6b435724720b8a15" exitCode=143 Jan 22 06:05:15 crc kubenswrapper[4933]: I0122 06:05:15.245283 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"52975e6a-56b7-4d58-952c-feb45cd89939","Type":"ContainerDied","Data":"9407b4f1b2e23d0f3791765c2db0211a95e2ee94beab9f227cc4f14e730d7d4b"} Jan 22 06:05:15 crc kubenswrapper[4933]: I0122 06:05:15.245309 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"52975e6a-56b7-4d58-952c-feb45cd89939","Type":"ContainerDied","Data":"a3460dc884d70f6bf73627a5f4d172d4b3ca649e2192b95b6b435724720b8a15"} Jan 22 06:05:15 crc kubenswrapper[4933]: I0122 06:05:15.247926 4933 generic.go:334] "Generic (PLEG): container finished" podID="5b708132-acea-4f3a-913b-fc2a351d82df" containerID="80df65b4457a5bafa7f4af6c75524ee0a004eaf90f3e4697d105eb9b7e8936f4" exitCode=0 Jan 22 06:05:15 crc kubenswrapper[4933]: I0122 06:05:15.247941 4933 generic.go:334] "Generic (PLEG): container finished" podID="5b708132-acea-4f3a-913b-fc2a351d82df" containerID="7562093ead744535cf2797de58f129900d9f3bf9f9519fce745f0fd014e4865f" exitCode=143 Jan 22 06:05:15 crc kubenswrapper[4933]: I0122 06:05:15.247983 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5b708132-acea-4f3a-913b-fc2a351d82df","Type":"ContainerDied","Data":"80df65b4457a5bafa7f4af6c75524ee0a004eaf90f3e4697d105eb9b7e8936f4"} Jan 22 06:05:15 crc kubenswrapper[4933]: I0122 06:05:15.248034 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5b708132-acea-4f3a-913b-fc2a351d82df","Type":"ContainerDied","Data":"7562093ead744535cf2797de58f129900d9f3bf9f9519fce745f0fd014e4865f"} Jan 22 06:05:19 crc kubenswrapper[4933]: I0122 06:05:19.040197 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:19 crc kubenswrapper[4933]: I0122 06:05:19.147561 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56c9bc6f5c-qmln7"] Jan 22 06:05:19 crc kubenswrapper[4933]: I0122 06:05:19.147776 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" podUID="6ead38e6-cb9b-4cdf-8b68-2971113c1af1" containerName="dnsmasq-dns" containerID="cri-o://f6c39fe8250cc962720265b6440d4dfb51250143572651845f0b1b49380f15da" gracePeriod=10 Jan 22 06:05:21 crc kubenswrapper[4933]: I0122 06:05:21.428147 4933 generic.go:334] "Generic (PLEG): container finished" podID="6ead38e6-cb9b-4cdf-8b68-2971113c1af1" containerID="f6c39fe8250cc962720265b6440d4dfb51250143572651845f0b1b49380f15da" exitCode=0 Jan 22 06:05:21 crc kubenswrapper[4933]: I0122 06:05:21.428247 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" event={"ID":"6ead38e6-cb9b-4cdf-8b68-2971113c1af1","Type":"ContainerDied","Data":"f6c39fe8250cc962720265b6440d4dfb51250143572651845f0b1b49380f15da"} Jan 22 06:05:22 crc kubenswrapper[4933]: I0122 06:05:22.503652 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" podUID="6ead38e6-cb9b-4cdf-8b68-2971113c1af1" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.137:5353: connect: connection refused" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.151545 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.211994 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b708132-acea-4f3a-913b-fc2a351d82df-logs\") pod \"5b708132-acea-4f3a-913b-fc2a351d82df\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.212173 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-combined-ca-bundle\") pod \"5b708132-acea-4f3a-913b-fc2a351d82df\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.212210 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-config-data\") pod \"5b708132-acea-4f3a-913b-fc2a351d82df\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.212331 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-public-tls-certs\") pod \"5b708132-acea-4f3a-913b-fc2a351d82df\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.212373 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5tnw\" (UniqueName: \"kubernetes.io/projected/5b708132-acea-4f3a-913b-fc2a351d82df-kube-api-access-q5tnw\") pod \"5b708132-acea-4f3a-913b-fc2a351d82df\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.212416 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"5b708132-acea-4f3a-913b-fc2a351d82df\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.212472 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-scripts\") pod \"5b708132-acea-4f3a-913b-fc2a351d82df\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.212514 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5b708132-acea-4f3a-913b-fc2a351d82df-httpd-run\") pod \"5b708132-acea-4f3a-913b-fc2a351d82df\" (UID: \"5b708132-acea-4f3a-913b-fc2a351d82df\") " Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.212526 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b708132-acea-4f3a-913b-fc2a351d82df-logs" (OuterVolumeSpecName: "logs") pod "5b708132-acea-4f3a-913b-fc2a351d82df" (UID: "5b708132-acea-4f3a-913b-fc2a351d82df"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.212929 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b708132-acea-4f3a-913b-fc2a351d82df-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.213192 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b708132-acea-4f3a-913b-fc2a351d82df-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "5b708132-acea-4f3a-913b-fc2a351d82df" (UID: "5b708132-acea-4f3a-913b-fc2a351d82df"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.218242 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b708132-acea-4f3a-913b-fc2a351d82df-kube-api-access-q5tnw" (OuterVolumeSpecName: "kube-api-access-q5tnw") pod "5b708132-acea-4f3a-913b-fc2a351d82df" (UID: "5b708132-acea-4f3a-913b-fc2a351d82df"). InnerVolumeSpecName "kube-api-access-q5tnw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.219516 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-scripts" (OuterVolumeSpecName: "scripts") pod "5b708132-acea-4f3a-913b-fc2a351d82df" (UID: "5b708132-acea-4f3a-913b-fc2a351d82df"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.226349 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "5b708132-acea-4f3a-913b-fc2a351d82df" (UID: "5b708132-acea-4f3a-913b-fc2a351d82df"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.237412 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5b708132-acea-4f3a-913b-fc2a351d82df" (UID: "5b708132-acea-4f3a-913b-fc2a351d82df"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.259003 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "5b708132-acea-4f3a-913b-fc2a351d82df" (UID: "5b708132-acea-4f3a-913b-fc2a351d82df"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.261144 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-config-data" (OuterVolumeSpecName: "config-data") pod "5b708132-acea-4f3a-913b-fc2a351d82df" (UID: "5b708132-acea-4f3a-913b-fc2a351d82df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.314680 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.315047 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.315056 4933 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.315067 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5tnw\" (UniqueName: \"kubernetes.io/projected/5b708132-acea-4f3a-913b-fc2a351d82df-kube-api-access-q5tnw\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.315109 4933 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.315118 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b708132-acea-4f3a-913b-fc2a351d82df-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.315127 4933 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5b708132-acea-4f3a-913b-fc2a351d82df-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.333374 4933 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.416730 4933 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.449373 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5b708132-acea-4f3a-913b-fc2a351d82df","Type":"ContainerDied","Data":"d5aba8e6f82d48be1b9d29ce364ba9e1f2037bdb9ccfae23c64b0dc84905560f"} Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.449428 4933 scope.go:117] "RemoveContainer" containerID="80df65b4457a5bafa7f4af6c75524ee0a004eaf90f3e4697d105eb9b7e8936f4" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.449434 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.488008 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.506975 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.517014 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 06:05:23 crc kubenswrapper[4933]: E0122 06:05:23.517575 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b708132-acea-4f3a-913b-fc2a351d82df" containerName="glance-log" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.517603 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b708132-acea-4f3a-913b-fc2a351d82df" containerName="glance-log" Jan 22 06:05:23 crc kubenswrapper[4933]: E0122 06:05:23.517628 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852" containerName="init" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.517637 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852" containerName="init" Jan 22 06:05:23 crc kubenswrapper[4933]: E0122 06:05:23.517655 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9455ccf-cb44-4637-90e2-c48092ac7e20" containerName="init" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.517663 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9455ccf-cb44-4637-90e2-c48092ac7e20" containerName="init" Jan 22 06:05:23 crc kubenswrapper[4933]: E0122 06:05:23.517677 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b708132-acea-4f3a-913b-fc2a351d82df" containerName="glance-httpd" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.517687 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b708132-acea-4f3a-913b-fc2a351d82df" containerName="glance-httpd" Jan 22 06:05:23 crc kubenswrapper[4933]: E0122 06:05:23.517698 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852" containerName="dnsmasq-dns" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.517706 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852" containerName="dnsmasq-dns" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.517913 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9455ccf-cb44-4637-90e2-c48092ac7e20" containerName="init" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.517941 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b708132-acea-4f3a-913b-fc2a351d82df" containerName="glance-httpd" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.517961 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b708132-acea-4f3a-913b-fc2a351d82df" containerName="glance-log" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.517971 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a411bd7e-0d4a-4a5a-8a99-3acdcb3a5852" containerName="dnsmasq-dns" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.518984 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.523887 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.524176 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.525881 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.620282 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a93245a9-0cb1-4b24-810e-ad085418a134-logs\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.620353 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.620415 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.620442 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-scripts\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.620543 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a93245a9-0cb1-4b24-810e-ad085418a134-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.620629 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.620951 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8dbd\" (UniqueName: \"kubernetes.io/projected/a93245a9-0cb1-4b24-810e-ad085418a134-kube-api-access-q8dbd\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.620997 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-config-data\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.642408 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.722248 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-scripts\") pod \"52975e6a-56b7-4d58-952c-feb45cd89939\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.722321 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-internal-tls-certs\") pod \"52975e6a-56b7-4d58-952c-feb45cd89939\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.722343 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x9r92\" (UniqueName: \"kubernetes.io/projected/52975e6a-56b7-4d58-952c-feb45cd89939-kube-api-access-x9r92\") pod \"52975e6a-56b7-4d58-952c-feb45cd89939\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.722476 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52975e6a-56b7-4d58-952c-feb45cd89939-logs\") pod \"52975e6a-56b7-4d58-952c-feb45cd89939\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.722494 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"52975e6a-56b7-4d58-952c-feb45cd89939\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.722525 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/52975e6a-56b7-4d58-952c-feb45cd89939-httpd-run\") pod \"52975e6a-56b7-4d58-952c-feb45cd89939\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.722591 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-combined-ca-bundle\") pod \"52975e6a-56b7-4d58-952c-feb45cd89939\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.722610 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-config-data\") pod \"52975e6a-56b7-4d58-952c-feb45cd89939\" (UID: \"52975e6a-56b7-4d58-952c-feb45cd89939\") " Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.722807 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a93245a9-0cb1-4b24-810e-ad085418a134-logs\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.722828 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.722861 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.722876 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-scripts\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.722922 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a93245a9-0cb1-4b24-810e-ad085418a134-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.722956 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.722991 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8dbd\" (UniqueName: \"kubernetes.io/projected/a93245a9-0cb1-4b24-810e-ad085418a134-kube-api-access-q8dbd\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.723007 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-config-data\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.723645 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a93245a9-0cb1-4b24-810e-ad085418a134-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.724749 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52975e6a-56b7-4d58-952c-feb45cd89939-logs" (OuterVolumeSpecName: "logs") pod "52975e6a-56b7-4d58-952c-feb45cd89939" (UID: "52975e6a-56b7-4d58-952c-feb45cd89939"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.727254 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "52975e6a-56b7-4d58-952c-feb45cd89939" (UID: "52975e6a-56b7-4d58-952c-feb45cd89939"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.727731 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.727833 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-scripts" (OuterVolumeSpecName: "scripts") pod "52975e6a-56b7-4d58-952c-feb45cd89939" (UID: "52975e6a-56b7-4d58-952c-feb45cd89939"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.732492 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52975e6a-56b7-4d58-952c-feb45cd89939-kube-api-access-x9r92" (OuterVolumeSpecName: "kube-api-access-x9r92") pod "52975e6a-56b7-4d58-952c-feb45cd89939" (UID: "52975e6a-56b7-4d58-952c-feb45cd89939"). InnerVolumeSpecName "kube-api-access-x9r92". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.733250 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.740599 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52975e6a-56b7-4d58-952c-feb45cd89939-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "52975e6a-56b7-4d58-952c-feb45cd89939" (UID: "52975e6a-56b7-4d58-952c-feb45cd89939"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.742652 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a93245a9-0cb1-4b24-810e-ad085418a134-logs\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.751136 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-scripts\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.751851 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.759284 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-config-data\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.764270 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8dbd\" (UniqueName: \"kubernetes.io/projected/a93245a9-0cb1-4b24-810e-ad085418a134-kube-api-access-q8dbd\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.773813 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.782500 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "52975e6a-56b7-4d58-952c-feb45cd89939" (UID: "52975e6a-56b7-4d58-952c-feb45cd89939"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.808730 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "52975e6a-56b7-4d58-952c-feb45cd89939" (UID: "52975e6a-56b7-4d58-952c-feb45cd89939"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.809300 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-config-data" (OuterVolumeSpecName: "config-data") pod "52975e6a-56b7-4d58-952c-feb45cd89939" (UID: "52975e6a-56b7-4d58-952c-feb45cd89939"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.824938 4933 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.825026 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x9r92\" (UniqueName: \"kubernetes.io/projected/52975e6a-56b7-4d58-952c-feb45cd89939-kube-api-access-x9r92\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.825040 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52975e6a-56b7-4d58-952c-feb45cd89939-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.825084 4933 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.825093 4933 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/52975e6a-56b7-4d58-952c-feb45cd89939-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.825101 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.825109 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.825116 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52975e6a-56b7-4d58-952c-feb45cd89939-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.844445 4933 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.847115 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 06:05:23 crc kubenswrapper[4933]: I0122 06:05:23.926642 4933 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.466118 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"52975e6a-56b7-4d58-952c-feb45cd89939","Type":"ContainerDied","Data":"5f821a7e3e50411fcde57ef38b02fb5d307651d639632ec4ebbd981b7e7da84b"} Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.466219 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.531676 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b708132-acea-4f3a-913b-fc2a351d82df" path="/var/lib/kubelet/pods/5b708132-acea-4f3a-913b-fc2a351d82df/volumes" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.532837 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.532862 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.539182 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 06:05:24 crc kubenswrapper[4933]: E0122 06:05:24.539549 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52975e6a-56b7-4d58-952c-feb45cd89939" containerName="glance-log" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.539569 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="52975e6a-56b7-4d58-952c-feb45cd89939" containerName="glance-log" Jan 22 06:05:24 crc kubenswrapper[4933]: E0122 06:05:24.539588 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52975e6a-56b7-4d58-952c-feb45cd89939" containerName="glance-httpd" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.539595 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="52975e6a-56b7-4d58-952c-feb45cd89939" containerName="glance-httpd" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.539763 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="52975e6a-56b7-4d58-952c-feb45cd89939" containerName="glance-httpd" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.539783 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="52975e6a-56b7-4d58-952c-feb45cd89939" containerName="glance-log" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.540685 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.546613 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.547070 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.554367 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.639263 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.639329 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2x7z\" (UniqueName: \"kubernetes.io/projected/f332d598-c2fa-4ed1-9b08-9245f85185af-kube-api-access-z2x7z\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.639378 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f332d598-c2fa-4ed1-9b08-9245f85185af-logs\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.639434 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.639458 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.639482 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f332d598-c2fa-4ed1-9b08-9245f85185af-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.639509 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.639565 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.740980 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.741303 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.741347 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.741379 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2x7z\" (UniqueName: \"kubernetes.io/projected/f332d598-c2fa-4ed1-9b08-9245f85185af-kube-api-access-z2x7z\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.741411 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f332d598-c2fa-4ed1-9b08-9245f85185af-logs\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.741455 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.741472 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.741489 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f332d598-c2fa-4ed1-9b08-9245f85185af-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.742059 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f332d598-c2fa-4ed1-9b08-9245f85185af-logs\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.742109 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f332d598-c2fa-4ed1-9b08-9245f85185af-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.742112 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.748004 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.748116 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.748292 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.748601 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.764550 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2x7z\" (UniqueName: \"kubernetes.io/projected/f332d598-c2fa-4ed1-9b08-9245f85185af-kube-api-access-z2x7z\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.767933 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:05:24 crc kubenswrapper[4933]: I0122 06:05:24.866584 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 06:05:26 crc kubenswrapper[4933]: I0122 06:05:26.503403 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52975e6a-56b7-4d58-952c-feb45cd89939" path="/var/lib/kubelet/pods/52975e6a-56b7-4d58-952c-feb45cd89939/volumes" Jan 22 06:05:27 crc kubenswrapper[4933]: I0122 06:05:27.503448 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" podUID="6ead38e6-cb9b-4cdf-8b68-2971113c1af1" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.137:5353: connect: connection refused" Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.156594 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.262301 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5nd4\" (UniqueName: \"kubernetes.io/projected/f20351db-9d8c-4b0e-9b28-b7628902dd80-kube-api-access-x5nd4\") pod \"f20351db-9d8c-4b0e-9b28-b7628902dd80\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.262442 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-scripts\") pod \"f20351db-9d8c-4b0e-9b28-b7628902dd80\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.262561 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-config-data\") pod \"f20351db-9d8c-4b0e-9b28-b7628902dd80\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.262602 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-combined-ca-bundle\") pod \"f20351db-9d8c-4b0e-9b28-b7628902dd80\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.262688 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-fernet-keys\") pod \"f20351db-9d8c-4b0e-9b28-b7628902dd80\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.262753 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-credential-keys\") pod \"f20351db-9d8c-4b0e-9b28-b7628902dd80\" (UID: \"f20351db-9d8c-4b0e-9b28-b7628902dd80\") " Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.269267 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-scripts" (OuterVolumeSpecName: "scripts") pod "f20351db-9d8c-4b0e-9b28-b7628902dd80" (UID: "f20351db-9d8c-4b0e-9b28-b7628902dd80"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.269527 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "f20351db-9d8c-4b0e-9b28-b7628902dd80" (UID: "f20351db-9d8c-4b0e-9b28-b7628902dd80"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.270190 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f20351db-9d8c-4b0e-9b28-b7628902dd80-kube-api-access-x5nd4" (OuterVolumeSpecName: "kube-api-access-x5nd4") pod "f20351db-9d8c-4b0e-9b28-b7628902dd80" (UID: "f20351db-9d8c-4b0e-9b28-b7628902dd80"). InnerVolumeSpecName "kube-api-access-x5nd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.270596 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "f20351db-9d8c-4b0e-9b28-b7628902dd80" (UID: "f20351db-9d8c-4b0e-9b28-b7628902dd80"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.293016 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-config-data" (OuterVolumeSpecName: "config-data") pod "f20351db-9d8c-4b0e-9b28-b7628902dd80" (UID: "f20351db-9d8c-4b0e-9b28-b7628902dd80"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.293989 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f20351db-9d8c-4b0e-9b28-b7628902dd80" (UID: "f20351db-9d8c-4b0e-9b28-b7628902dd80"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.364453 4933 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.364499 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5nd4\" (UniqueName: \"kubernetes.io/projected/f20351db-9d8c-4b0e-9b28-b7628902dd80-kube-api-access-x5nd4\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.364512 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.364523 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.364534 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.364545 4933 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f20351db-9d8c-4b0e-9b28-b7628902dd80-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.524924 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pb2k9" event={"ID":"f20351db-9d8c-4b0e-9b28-b7628902dd80","Type":"ContainerDied","Data":"823a8a532f5738dd04885d2a930e51cd50496e8a60ddf70f13562ca23c38e076"} Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.524961 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="823a8a532f5738dd04885d2a930e51cd50496e8a60ddf70f13562ca23c38e076" Jan 22 06:05:31 crc kubenswrapper[4933]: I0122 06:05:31.525012 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pb2k9" Jan 22 06:05:31 crc kubenswrapper[4933]: E0122 06:05:31.588975 4933 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:fe32d3ea620f0c7ecfdde9bbf28417fde03bc18c6f60b1408fa8da24d8188f16" Jan 22 06:05:31 crc kubenswrapper[4933]: E0122 06:05:31.589188 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:fe32d3ea620f0c7ecfdde9bbf28417fde03bc18c6f60b1408fa8da24d8188f16,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9n4lp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-gwqkb_openstack(c6ae84cf-ec9a-42e4-9c55-035d9accb4b2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 06:05:31 crc kubenswrapper[4933]: E0122 06:05:31.590427 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-gwqkb" podUID="c6ae84cf-ec9a-42e4-9c55-035d9accb4b2" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.246155 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-pb2k9"] Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.253240 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-pb2k9"] Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.343921 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-bvb58"] Jan 22 06:05:32 crc kubenswrapper[4933]: E0122 06:05:32.344338 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f20351db-9d8c-4b0e-9b28-b7628902dd80" containerName="keystone-bootstrap" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.344352 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f20351db-9d8c-4b0e-9b28-b7628902dd80" containerName="keystone-bootstrap" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.344507 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f20351db-9d8c-4b0e-9b28-b7628902dd80" containerName="keystone-bootstrap" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.345025 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.351341 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-bvb58"] Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.377435 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-vftwj" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.378229 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.378393 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.378629 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.378655 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.499332 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-scripts\") pod \"keystone-bootstrap-bvb58\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.499417 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-credential-keys\") pod \"keystone-bootstrap-bvb58\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.499480 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ttbj\" (UniqueName: \"kubernetes.io/projected/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-kube-api-access-5ttbj\") pod \"keystone-bootstrap-bvb58\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.499509 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-config-data\") pod \"keystone-bootstrap-bvb58\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.499545 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-combined-ca-bundle\") pod \"keystone-bootstrap-bvb58\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.499577 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-fernet-keys\") pod \"keystone-bootstrap-bvb58\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.520953 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f20351db-9d8c-4b0e-9b28-b7628902dd80" path="/var/lib/kubelet/pods/f20351db-9d8c-4b0e-9b28-b7628902dd80/volumes" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.533749 4933 generic.go:334] "Generic (PLEG): container finished" podID="112a4ef5-b86f-4258-84db-bef5e66f9674" containerID="2925016a4526e711c7a13ca9b27ae95ab14072a5756f21964d4b6cb4c8838060" exitCode=0 Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.534423 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-zczgn" event={"ID":"112a4ef5-b86f-4258-84db-bef5e66f9674","Type":"ContainerDied","Data":"2925016a4526e711c7a13ca9b27ae95ab14072a5756f21964d4b6cb4c8838060"} Jan 22 06:05:32 crc kubenswrapper[4933]: E0122 06:05:32.535284 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:fe32d3ea620f0c7ecfdde9bbf28417fde03bc18c6f60b1408fa8da24d8188f16\\\"\"" pod="openstack/barbican-db-sync-gwqkb" podUID="c6ae84cf-ec9a-42e4-9c55-035d9accb4b2" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.601057 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-scripts\") pod \"keystone-bootstrap-bvb58\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.601170 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-credential-keys\") pod \"keystone-bootstrap-bvb58\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.601275 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ttbj\" (UniqueName: \"kubernetes.io/projected/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-kube-api-access-5ttbj\") pod \"keystone-bootstrap-bvb58\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.601302 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-config-data\") pod \"keystone-bootstrap-bvb58\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.601347 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-combined-ca-bundle\") pod \"keystone-bootstrap-bvb58\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.601439 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-fernet-keys\") pod \"keystone-bootstrap-bvb58\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.605931 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-combined-ca-bundle\") pod \"keystone-bootstrap-bvb58\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.606382 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-config-data\") pod \"keystone-bootstrap-bvb58\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.607794 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-credential-keys\") pod \"keystone-bootstrap-bvb58\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.607990 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-fernet-keys\") pod \"keystone-bootstrap-bvb58\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.613377 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-scripts\") pod \"keystone-bootstrap-bvb58\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.615850 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ttbj\" (UniqueName: \"kubernetes.io/projected/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-kube-api-access-5ttbj\") pod \"keystone-bootstrap-bvb58\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.721410 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.748759 4933 scope.go:117] "RemoveContainer" containerID="7562093ead744535cf2797de58f129900d9f3bf9f9519fce745f0fd014e4865f" Jan 22 06:05:32 crc kubenswrapper[4933]: E0122 06:05:32.781925 4933 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b59b7445e581cc720038107e421371c86c5765b2967e77d884ef29b1d9fd0f49" Jan 22 06:05:32 crc kubenswrapper[4933]: E0122 06:05:32.782104 4933 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b59b7445e581cc720038107e421371c86c5765b2967e77d884ef29b1d9fd0f49,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-b88r9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-92ghq_openstack(1086bd39-4637-4123-a7b2-d85d3a603dd5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 06:05:32 crc kubenswrapper[4933]: E0122 06:05:32.783830 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-92ghq" podUID="1086bd39-4637-4123-a7b2-d85d3a603dd5" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.812289 4933 scope.go:117] "RemoveContainer" containerID="9407b4f1b2e23d0f3791765c2db0211a95e2ee94beab9f227cc4f14e730d7d4b" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.876290 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:05:32 crc kubenswrapper[4933]: I0122 06:05:32.933966 4933 scope.go:117] "RemoveContainer" containerID="a3460dc884d70f6bf73627a5f4d172d4b3ca649e2192b95b6b435724720b8a15" Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.007226 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-config\") pod \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.007370 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-ovsdbserver-nb\") pod \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.007436 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hvqt8\" (UniqueName: \"kubernetes.io/projected/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-kube-api-access-hvqt8\") pod \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.007512 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-dns-svc\") pod \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.007575 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-ovsdbserver-sb\") pod \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.007616 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-dns-swift-storage-0\") pod \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\" (UID: \"6ead38e6-cb9b-4cdf-8b68-2971113c1af1\") " Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.032046 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-kube-api-access-hvqt8" (OuterVolumeSpecName: "kube-api-access-hvqt8") pod "6ead38e6-cb9b-4cdf-8b68-2971113c1af1" (UID: "6ead38e6-cb9b-4cdf-8b68-2971113c1af1"). InnerVolumeSpecName "kube-api-access-hvqt8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.110568 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hvqt8\" (UniqueName: \"kubernetes.io/projected/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-kube-api-access-hvqt8\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.120825 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6ead38e6-cb9b-4cdf-8b68-2971113c1af1" (UID: "6ead38e6-cb9b-4cdf-8b68-2971113c1af1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.127458 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6ead38e6-cb9b-4cdf-8b68-2971113c1af1" (UID: "6ead38e6-cb9b-4cdf-8b68-2971113c1af1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.131669 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6ead38e6-cb9b-4cdf-8b68-2971113c1af1" (UID: "6ead38e6-cb9b-4cdf-8b68-2971113c1af1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.139692 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6ead38e6-cb9b-4cdf-8b68-2971113c1af1" (UID: "6ead38e6-cb9b-4cdf-8b68-2971113c1af1"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.159618 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-config" (OuterVolumeSpecName: "config") pod "6ead38e6-cb9b-4cdf-8b68-2971113c1af1" (UID: "6ead38e6-cb9b-4cdf-8b68-2971113c1af1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.213684 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.213724 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.213739 4933 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.213751 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.213760 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6ead38e6-cb9b-4cdf-8b68-2971113c1af1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.353770 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 06:05:33 crc kubenswrapper[4933]: W0122 06:05:33.358721 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda93245a9_0cb1_4b24_810e_ad085418a134.slice/crio-75900f9e7c11ae16a5f72cd6dd5795ce7a97d5dbc045fafdda9c320a69d2551a WatchSource:0}: Error finding container 75900f9e7c11ae16a5f72cd6dd5795ce7a97d5dbc045fafdda9c320a69d2551a: Status 404 returned error can't find the container with id 75900f9e7c11ae16a5f72cd6dd5795ce7a97d5dbc045fafdda9c320a69d2551a Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.484675 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 06:05:33 crc kubenswrapper[4933]: W0122 06:05:33.491468 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf332d598_c2fa_4ed1_9b08_9245f85185af.slice/crio-7b11e623a7157361cfaeb2a2b55d3c72f3c573ad4d40fcc2992cb088ba1d8711 WatchSource:0}: Error finding container 7b11e623a7157361cfaeb2a2b55d3c72f3c573ad4d40fcc2992cb088ba1d8711: Status 404 returned error can't find the container with id 7b11e623a7157361cfaeb2a2b55d3c72f3c573ad4d40fcc2992cb088ba1d8711 Jan 22 06:05:33 crc kubenswrapper[4933]: W0122 06:05:33.491902 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b555a2a_0c59_4b4c_901e_6b160e5d57a1.slice/crio-5ed27c1901802da6df63e4a53c32da58d4f7510ad015b8758249439f0dd65d5f WatchSource:0}: Error finding container 5ed27c1901802da6df63e4a53c32da58d4f7510ad015b8758249439f0dd65d5f: Status 404 returned error can't find the container with id 5ed27c1901802da6df63e4a53c32da58d4f7510ad015b8758249439f0dd65d5f Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.494684 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-bvb58"] Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.552701 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7d72c18-1516-44a5-ad92-f367c93280b1","Type":"ContainerStarted","Data":"b29d41c6c3906eb46f5e625db215c2c233737c530c75840950c73cff63017a43"} Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.565266 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" event={"ID":"6ead38e6-cb9b-4cdf-8b68-2971113c1af1","Type":"ContainerDied","Data":"770d54959538a5094af045c88c13c9801a6b3d1884d0788f3482f469b0d27eca"} Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.565332 4933 scope.go:117] "RemoveContainer" containerID="f6c39fe8250cc962720265b6440d4dfb51250143572651845f0b1b49380f15da" Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.565284 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.566545 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bvb58" event={"ID":"1b555a2a-0c59-4b4c-901e-6b160e5d57a1","Type":"ContainerStarted","Data":"5ed27c1901802da6df63e4a53c32da58d4f7510ad015b8758249439f0dd65d5f"} Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.569199 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f332d598-c2fa-4ed1-9b08-9245f85185af","Type":"ContainerStarted","Data":"7b11e623a7157361cfaeb2a2b55d3c72f3c573ad4d40fcc2992cb088ba1d8711"} Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.573136 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-nkj4c" event={"ID":"913fa4df-79e3-40d8-8218-a869383e2a25","Type":"ContainerStarted","Data":"d8f447e83a7dace27b0cfb5535961cb963b243a669c28f5456502fec405aa42c"} Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.577175 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a93245a9-0cb1-4b24-810e-ad085418a134","Type":"ContainerStarted","Data":"75900f9e7c11ae16a5f72cd6dd5795ce7a97d5dbc045fafdda9c320a69d2551a"} Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.597157 4933 scope.go:117] "RemoveContainer" containerID="8dcfdaade288e4a4e857f537eb4fcbd1fc6851641e77ba9d59db534de87735eb" Jan 22 06:05:33 crc kubenswrapper[4933]: E0122 06:05:33.597168 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b59b7445e581cc720038107e421371c86c5765b2967e77d884ef29b1d9fd0f49\\\"\"" pod="openstack/cinder-db-sync-92ghq" podUID="1086bd39-4637-4123-a7b2-d85d3a603dd5" Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.598018 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-nkj4c" podStartSLOduration=4.379380334 podStartE2EDuration="25.598005075s" podCreationTimestamp="2026-01-22 06:05:08 +0000 UTC" firstStartedPulling="2026-01-22 06:05:10.344571702 +0000 UTC m=+1158.181697055" lastFinishedPulling="2026-01-22 06:05:31.563196453 +0000 UTC m=+1179.400321796" observedRunningTime="2026-01-22 06:05:33.591472141 +0000 UTC m=+1181.428597504" watchObservedRunningTime="2026-01-22 06:05:33.598005075 +0000 UTC m=+1181.435130428" Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.634821 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56c9bc6f5c-qmln7"] Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.673387 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56c9bc6f5c-qmln7"] Jan 22 06:05:33 crc kubenswrapper[4933]: I0122 06:05:33.924301 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-zczgn" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.026279 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/112a4ef5-b86f-4258-84db-bef5e66f9674-combined-ca-bundle\") pod \"112a4ef5-b86f-4258-84db-bef5e66f9674\" (UID: \"112a4ef5-b86f-4258-84db-bef5e66f9674\") " Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.026340 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fhqfd\" (UniqueName: \"kubernetes.io/projected/112a4ef5-b86f-4258-84db-bef5e66f9674-kube-api-access-fhqfd\") pod \"112a4ef5-b86f-4258-84db-bef5e66f9674\" (UID: \"112a4ef5-b86f-4258-84db-bef5e66f9674\") " Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.026520 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/112a4ef5-b86f-4258-84db-bef5e66f9674-config\") pod \"112a4ef5-b86f-4258-84db-bef5e66f9674\" (UID: \"112a4ef5-b86f-4258-84db-bef5e66f9674\") " Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.033266 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/112a4ef5-b86f-4258-84db-bef5e66f9674-kube-api-access-fhqfd" (OuterVolumeSpecName: "kube-api-access-fhqfd") pod "112a4ef5-b86f-4258-84db-bef5e66f9674" (UID: "112a4ef5-b86f-4258-84db-bef5e66f9674"). InnerVolumeSpecName "kube-api-access-fhqfd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.096508 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/112a4ef5-b86f-4258-84db-bef5e66f9674-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "112a4ef5-b86f-4258-84db-bef5e66f9674" (UID: "112a4ef5-b86f-4258-84db-bef5e66f9674"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.110276 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/112a4ef5-b86f-4258-84db-bef5e66f9674-config" (OuterVolumeSpecName: "config") pod "112a4ef5-b86f-4258-84db-bef5e66f9674" (UID: "112a4ef5-b86f-4258-84db-bef5e66f9674"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.128401 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/112a4ef5-b86f-4258-84db-bef5e66f9674-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.128552 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/112a4ef5-b86f-4258-84db-bef5e66f9674-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.128617 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fhqfd\" (UniqueName: \"kubernetes.io/projected/112a4ef5-b86f-4258-84db-bef5e66f9674-kube-api-access-fhqfd\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.505482 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ead38e6-cb9b-4cdf-8b68-2971113c1af1" path="/var/lib/kubelet/pods/6ead38e6-cb9b-4cdf-8b68-2971113c1af1/volumes" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.594662 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bvb58" event={"ID":"1b555a2a-0c59-4b4c-901e-6b160e5d57a1","Type":"ContainerStarted","Data":"25256a7adb222867f28f0101189b07c7deb83968a46968e6cf7492f82b35018b"} Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.604048 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f332d598-c2fa-4ed1-9b08-9245f85185af","Type":"ContainerStarted","Data":"460c308d9a9fca5408912df645075971fee07fee0e34840d93858c38da9642d9"} Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.607663 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-zczgn" event={"ID":"112a4ef5-b86f-4258-84db-bef5e66f9674","Type":"ContainerDied","Data":"f8e055833cd2123eb22082e60be88e963d3bad64f12dab520fd937d46612b8ca"} Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.607686 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-zczgn" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.607701 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8e055833cd2123eb22082e60be88e963d3bad64f12dab520fd937d46612b8ca" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.614366 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a93245a9-0cb1-4b24-810e-ad085418a134","Type":"ContainerStarted","Data":"0e3dae9acf13e4c101ed9038cebd3f5dfa8ae112611c63de76c052a41cacaa61"} Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.614409 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a93245a9-0cb1-4b24-810e-ad085418a134","Type":"ContainerStarted","Data":"b9b670df062fab12248a75e5b85ce25fafa409f7873fbafd6c269b8edfd15364"} Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.641320 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=11.641303367999999 podStartE2EDuration="11.641303368s" podCreationTimestamp="2026-01-22 06:05:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:05:34.633911554 +0000 UTC m=+1182.471036907" watchObservedRunningTime="2026-01-22 06:05:34.641303368 +0000 UTC m=+1182.478428721" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.641426 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-bvb58" podStartSLOduration=2.641421231 podStartE2EDuration="2.641421231s" podCreationTimestamp="2026-01-22 06:05:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:05:34.612725553 +0000 UTC m=+1182.449850896" watchObservedRunningTime="2026-01-22 06:05:34.641421231 +0000 UTC m=+1182.478546574" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.801996 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b9c8b59c-cbvvm"] Jan 22 06:05:34 crc kubenswrapper[4933]: E0122 06:05:34.802813 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ead38e6-cb9b-4cdf-8b68-2971113c1af1" containerName="dnsmasq-dns" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.802837 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ead38e6-cb9b-4cdf-8b68-2971113c1af1" containerName="dnsmasq-dns" Jan 22 06:05:34 crc kubenswrapper[4933]: E0122 06:05:34.802852 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="112a4ef5-b86f-4258-84db-bef5e66f9674" containerName="neutron-db-sync" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.802862 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="112a4ef5-b86f-4258-84db-bef5e66f9674" containerName="neutron-db-sync" Jan 22 06:05:34 crc kubenswrapper[4933]: E0122 06:05:34.802894 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ead38e6-cb9b-4cdf-8b68-2971113c1af1" containerName="init" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.802902 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ead38e6-cb9b-4cdf-8b68-2971113c1af1" containerName="init" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.803326 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ead38e6-cb9b-4cdf-8b68-2971113c1af1" containerName="dnsmasq-dns" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.803346 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="112a4ef5-b86f-4258-84db-bef5e66f9674" containerName="neutron-db-sync" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.805618 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.838970 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b9c8b59c-cbvvm"] Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.886893 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5fc9d4fd76-k4qh6"] Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.888643 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.899223 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.899568 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.899726 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-gztxr" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.899767 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.932151 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5fc9d4fd76-k4qh6"] Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.981741 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-ovsdbserver-sb\") pod \"dnsmasq-dns-6b9c8b59c-cbvvm\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.981820 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-config\") pod \"dnsmasq-dns-6b9c8b59c-cbvvm\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.981848 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-dns-swift-storage-0\") pod \"dnsmasq-dns-6b9c8b59c-cbvvm\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.981871 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4ktv\" (UniqueName: \"kubernetes.io/projected/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-kube-api-access-w4ktv\") pod \"dnsmasq-dns-6b9c8b59c-cbvvm\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.981891 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-ovsdbserver-nb\") pod \"dnsmasq-dns-6b9c8b59c-cbvvm\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:34 crc kubenswrapper[4933]: I0122 06:05:34.981927 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-dns-svc\") pod \"dnsmasq-dns-6b9c8b59c-cbvvm\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.083596 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-ovndb-tls-certs\") pod \"neutron-5fc9d4fd76-k4qh6\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.084807 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4ktv\" (UniqueName: \"kubernetes.io/projected/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-kube-api-access-w4ktv\") pod \"dnsmasq-dns-6b9c8b59c-cbvvm\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.086841 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-ovsdbserver-nb\") pod \"dnsmasq-dns-6b9c8b59c-cbvvm\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.085415 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-ovsdbserver-nb\") pod \"dnsmasq-dns-6b9c8b59c-cbvvm\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.086952 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nv4ln\" (UniqueName: \"kubernetes.io/projected/3945a3af-6972-419a-a60a-9f7b4b329fb1-kube-api-access-nv4ln\") pod \"neutron-5fc9d4fd76-k4qh6\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.087174 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-dns-svc\") pod \"dnsmasq-dns-6b9c8b59c-cbvvm\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.087289 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-httpd-config\") pod \"neutron-5fc9d4fd76-k4qh6\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.087527 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-ovsdbserver-sb\") pod \"dnsmasq-dns-6b9c8b59c-cbvvm\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.087633 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-combined-ca-bundle\") pod \"neutron-5fc9d4fd76-k4qh6\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.087678 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-config\") pod \"neutron-5fc9d4fd76-k4qh6\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.087851 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-config\") pod \"dnsmasq-dns-6b9c8b59c-cbvvm\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.087938 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-dns-swift-storage-0\") pod \"dnsmasq-dns-6b9c8b59c-cbvvm\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.088018 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-dns-svc\") pod \"dnsmasq-dns-6b9c8b59c-cbvvm\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.088435 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-ovsdbserver-sb\") pod \"dnsmasq-dns-6b9c8b59c-cbvvm\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.088793 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-dns-swift-storage-0\") pod \"dnsmasq-dns-6b9c8b59c-cbvvm\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.088942 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-config\") pod \"dnsmasq-dns-6b9c8b59c-cbvvm\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.108790 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4ktv\" (UniqueName: \"kubernetes.io/projected/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-kube-api-access-w4ktv\") pod \"dnsmasq-dns-6b9c8b59c-cbvvm\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.189600 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-combined-ca-bundle\") pod \"neutron-5fc9d4fd76-k4qh6\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.190135 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-config\") pod \"neutron-5fc9d4fd76-k4qh6\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.190218 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-ovndb-tls-certs\") pod \"neutron-5fc9d4fd76-k4qh6\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.190263 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nv4ln\" (UniqueName: \"kubernetes.io/projected/3945a3af-6972-419a-a60a-9f7b4b329fb1-kube-api-access-nv4ln\") pod \"neutron-5fc9d4fd76-k4qh6\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.190402 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-httpd-config\") pod \"neutron-5fc9d4fd76-k4qh6\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.194473 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-ovndb-tls-certs\") pod \"neutron-5fc9d4fd76-k4qh6\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.194527 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-combined-ca-bundle\") pod \"neutron-5fc9d4fd76-k4qh6\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.195101 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-config\") pod \"neutron-5fc9d4fd76-k4qh6\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.195845 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-httpd-config\") pod \"neutron-5fc9d4fd76-k4qh6\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.208820 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nv4ln\" (UniqueName: \"kubernetes.io/projected/3945a3af-6972-419a-a60a-9f7b4b329fb1-kube-api-access-nv4ln\") pod \"neutron-5fc9d4fd76-k4qh6\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.222317 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.244136 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.633747 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f332d598-c2fa-4ed1-9b08-9245f85185af","Type":"ContainerStarted","Data":"97322d5aa3a394dfde9104353fcf318859f9b7d5d72a2357a8c11f88a0684f57"} Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.636343 4933 generic.go:334] "Generic (PLEG): container finished" podID="913fa4df-79e3-40d8-8218-a869383e2a25" containerID="d8f447e83a7dace27b0cfb5535961cb963b243a669c28f5456502fec405aa42c" exitCode=0 Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.636406 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-nkj4c" event={"ID":"913fa4df-79e3-40d8-8218-a869383e2a25","Type":"ContainerDied","Data":"d8f447e83a7dace27b0cfb5535961cb963b243a669c28f5456502fec405aa42c"} Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.638218 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7d72c18-1516-44a5-ad92-f367c93280b1","Type":"ContainerStarted","Data":"396cdd5329e8a64271bc121815e8795ec8443f663a5b2fa0cebe713443617e70"} Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.674264 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=11.674244263 podStartE2EDuration="11.674244263s" podCreationTimestamp="2026-01-22 06:05:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:05:35.664197891 +0000 UTC m=+1183.501323264" watchObservedRunningTime="2026-01-22 06:05:35.674244263 +0000 UTC m=+1183.511369626" Jan 22 06:05:35 crc kubenswrapper[4933]: I0122 06:05:35.761838 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b9c8b59c-cbvvm"] Jan 22 06:05:35 crc kubenswrapper[4933]: W0122 06:05:35.766841 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode98f5ce4_ac57_4a9e_9ffa_8d48a238e064.slice/crio-88293620d576b74ef169edbb8151035e945734fede38301bbc82ce5fab6843f3 WatchSource:0}: Error finding container 88293620d576b74ef169edbb8151035e945734fede38301bbc82ce5fab6843f3: Status 404 returned error can't find the container with id 88293620d576b74ef169edbb8151035e945734fede38301bbc82ce5fab6843f3 Jan 22 06:05:36 crc kubenswrapper[4933]: I0122 06:05:36.027791 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5fc9d4fd76-k4qh6"] Jan 22 06:05:36 crc kubenswrapper[4933]: I0122 06:05:36.651340 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fc9d4fd76-k4qh6" event={"ID":"3945a3af-6972-419a-a60a-9f7b4b329fb1","Type":"ContainerStarted","Data":"be9888b51e1450feb8c2ff1783a022dd5a5c8ddb8ef679f21b8c63b678dafbea"} Jan 22 06:05:36 crc kubenswrapper[4933]: I0122 06:05:36.651589 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fc9d4fd76-k4qh6" event={"ID":"3945a3af-6972-419a-a60a-9f7b4b329fb1","Type":"ContainerStarted","Data":"cfbad903f2c92b82331e9650e7037487d2a1ca46f3cf0f1c99591c5b68a30640"} Jan 22 06:05:36 crc kubenswrapper[4933]: I0122 06:05:36.651601 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fc9d4fd76-k4qh6" event={"ID":"3945a3af-6972-419a-a60a-9f7b4b329fb1","Type":"ContainerStarted","Data":"86ac22ff6bcdaec6471813af0798064720fa8800080b411bbc4377f4965630e8"} Jan 22 06:05:36 crc kubenswrapper[4933]: I0122 06:05:36.652568 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:05:36 crc kubenswrapper[4933]: I0122 06:05:36.654495 4933 generic.go:334] "Generic (PLEG): container finished" podID="e98f5ce4-ac57-4a9e-9ffa-8d48a238e064" containerID="e8ff61a17541a5e8a5ec72dd298103ce7744ae1ca696ca271d5cc3c0051d62f3" exitCode=0 Jan 22 06:05:36 crc kubenswrapper[4933]: I0122 06:05:36.655499 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" event={"ID":"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064","Type":"ContainerDied","Data":"e8ff61a17541a5e8a5ec72dd298103ce7744ae1ca696ca271d5cc3c0051d62f3"} Jan 22 06:05:36 crc kubenswrapper[4933]: I0122 06:05:36.655525 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" event={"ID":"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064","Type":"ContainerStarted","Data":"88293620d576b74ef169edbb8151035e945734fede38301bbc82ce5fab6843f3"} Jan 22 06:05:36 crc kubenswrapper[4933]: I0122 06:05:36.678969 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5fc9d4fd76-k4qh6" podStartSLOduration=2.67895316 podStartE2EDuration="2.67895316s" podCreationTimestamp="2026-01-22 06:05:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:05:36.673499374 +0000 UTC m=+1184.510624727" watchObservedRunningTime="2026-01-22 06:05:36.67895316 +0000 UTC m=+1184.516078513" Jan 22 06:05:36 crc kubenswrapper[4933]: I0122 06:05:36.963107 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-nkj4c" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.044025 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/913fa4df-79e3-40d8-8218-a869383e2a25-config-data\") pod \"913fa4df-79e3-40d8-8218-a869383e2a25\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.044146 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/913fa4df-79e3-40d8-8218-a869383e2a25-logs\") pod \"913fa4df-79e3-40d8-8218-a869383e2a25\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.044262 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/913fa4df-79e3-40d8-8218-a869383e2a25-scripts\") pod \"913fa4df-79e3-40d8-8218-a869383e2a25\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.044289 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/913fa4df-79e3-40d8-8218-a869383e2a25-combined-ca-bundle\") pod \"913fa4df-79e3-40d8-8218-a869383e2a25\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.044381 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w2kv7\" (UniqueName: \"kubernetes.io/projected/913fa4df-79e3-40d8-8218-a869383e2a25-kube-api-access-w2kv7\") pod \"913fa4df-79e3-40d8-8218-a869383e2a25\" (UID: \"913fa4df-79e3-40d8-8218-a869383e2a25\") " Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.045069 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/913fa4df-79e3-40d8-8218-a869383e2a25-logs" (OuterVolumeSpecName: "logs") pod "913fa4df-79e3-40d8-8218-a869383e2a25" (UID: "913fa4df-79e3-40d8-8218-a869383e2a25"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.049437 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/913fa4df-79e3-40d8-8218-a869383e2a25-kube-api-access-w2kv7" (OuterVolumeSpecName: "kube-api-access-w2kv7") pod "913fa4df-79e3-40d8-8218-a869383e2a25" (UID: "913fa4df-79e3-40d8-8218-a869383e2a25"). InnerVolumeSpecName "kube-api-access-w2kv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.052416 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/913fa4df-79e3-40d8-8218-a869383e2a25-scripts" (OuterVolumeSpecName: "scripts") pod "913fa4df-79e3-40d8-8218-a869383e2a25" (UID: "913fa4df-79e3-40d8-8218-a869383e2a25"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.069940 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/913fa4df-79e3-40d8-8218-a869383e2a25-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "913fa4df-79e3-40d8-8218-a869383e2a25" (UID: "913fa4df-79e3-40d8-8218-a869383e2a25"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.077183 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/913fa4df-79e3-40d8-8218-a869383e2a25-config-data" (OuterVolumeSpecName: "config-data") pod "913fa4df-79e3-40d8-8218-a869383e2a25" (UID: "913fa4df-79e3-40d8-8218-a869383e2a25"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.146477 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/913fa4df-79e3-40d8-8218-a869383e2a25-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.146509 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/913fa4df-79e3-40d8-8218-a869383e2a25-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.146521 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w2kv7\" (UniqueName: \"kubernetes.io/projected/913fa4df-79e3-40d8-8218-a869383e2a25-kube-api-access-w2kv7\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.146530 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/913fa4df-79e3-40d8-8218-a869383e2a25-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.146539 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/913fa4df-79e3-40d8-8218-a869383e2a25-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.503783 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-56c9bc6f5c-qmln7" podUID="6ead38e6-cb9b-4cdf-8b68-2971113c1af1" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.137:5353: i/o timeout" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.679315 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" event={"ID":"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064","Type":"ContainerStarted","Data":"4d6483276b97f7c59ec0cf5441b440eca417184a7ba22e5b4af01d24fc16568e"} Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.680107 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.683626 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-nkj4c" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.685209 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-nkj4c" event={"ID":"913fa4df-79e3-40d8-8218-a869383e2a25","Type":"ContainerDied","Data":"9b8265d4ed45940a59c0ebc8bb62946288b89fa70176dbdb1b80138ab5dd537b"} Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.685259 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9b8265d4ed45940a59c0ebc8bb62946288b89fa70176dbdb1b80138ab5dd537b" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.706869 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" podStartSLOduration=3.706850348 podStartE2EDuration="3.706850348s" podCreationTimestamp="2026-01-22 06:05:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:05:37.697414813 +0000 UTC m=+1185.534540186" watchObservedRunningTime="2026-01-22 06:05:37.706850348 +0000 UTC m=+1185.543975691" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.820442 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-566788757d-gkrdt"] Jan 22 06:05:37 crc kubenswrapper[4933]: E0122 06:05:37.820804 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="913fa4df-79e3-40d8-8218-a869383e2a25" containerName="placement-db-sync" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.820821 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="913fa4df-79e3-40d8-8218-a869383e2a25" containerName="placement-db-sync" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.820998 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="913fa4df-79e3-40d8-8218-a869383e2a25" containerName="placement-db-sync" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.821969 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.826398 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.826678 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.827033 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-xrjz2" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.827254 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.830019 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.893109 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-566788757d-gkrdt"] Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.983446 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-internal-tls-certs\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.983529 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-public-tls-certs\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.983785 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-logs\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.983840 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmp5k\" (UniqueName: \"kubernetes.io/projected/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-kube-api-access-tmp5k\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.983916 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-scripts\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.984163 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-config-data\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:37 crc kubenswrapper[4933]: I0122 06:05:37.984276 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-combined-ca-bundle\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.086775 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-logs\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.087323 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmp5k\" (UniqueName: \"kubernetes.io/projected/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-kube-api-access-tmp5k\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.087365 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-scripts\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.087414 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-config-data\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.087430 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-logs\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.087453 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-combined-ca-bundle\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.087537 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-internal-tls-certs\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.087637 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-public-tls-certs\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.100276 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-combined-ca-bundle\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.106998 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-config-data\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.111760 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-public-tls-certs\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.113584 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-internal-tls-certs\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.113610 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-scripts\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.114499 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmp5k\" (UniqueName: \"kubernetes.io/projected/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-kube-api-access-tmp5k\") pod \"placement-566788757d-gkrdt\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.115617 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-856bccf57c-l2f82"] Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.126315 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-856bccf57c-l2f82"] Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.126430 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.132323 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.132418 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.267245 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.290648 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-httpd-config\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.290736 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-config\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.290788 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdzjg\" (UniqueName: \"kubernetes.io/projected/6541a01b-555e-4734-8eb8-bc63625dd293-kube-api-access-sdzjg\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.291162 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-combined-ca-bundle\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.291249 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-internal-tls-certs\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.291282 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-ovndb-tls-certs\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.291402 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-public-tls-certs\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.392890 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-public-tls-certs\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.392952 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-httpd-config\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.392977 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-config\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.393009 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdzjg\" (UniqueName: \"kubernetes.io/projected/6541a01b-555e-4734-8eb8-bc63625dd293-kube-api-access-sdzjg\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.393137 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-combined-ca-bundle\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.393171 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-internal-tls-certs\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.393195 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-ovndb-tls-certs\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.398824 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-httpd-config\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.398866 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-public-tls-certs\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.400174 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-ovndb-tls-certs\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.400746 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-combined-ca-bundle\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.400884 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-internal-tls-certs\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.407230 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-config\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.410661 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdzjg\" (UniqueName: \"kubernetes.io/projected/6541a01b-555e-4734-8eb8-bc63625dd293-kube-api-access-sdzjg\") pod \"neutron-856bccf57c-l2f82\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.520958 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.696622 4933 generic.go:334] "Generic (PLEG): container finished" podID="1b555a2a-0c59-4b4c-901e-6b160e5d57a1" containerID="25256a7adb222867f28f0101189b07c7deb83968a46968e6cf7492f82b35018b" exitCode=0 Jan 22 06:05:38 crc kubenswrapper[4933]: I0122 06:05:38.696697 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bvb58" event={"ID":"1b555a2a-0c59-4b4c-901e-6b160e5d57a1","Type":"ContainerDied","Data":"25256a7adb222867f28f0101189b07c7deb83968a46968e6cf7492f82b35018b"} Jan 22 06:05:40 crc kubenswrapper[4933]: I0122 06:05:40.950912 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.139998 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-scripts\") pod \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.140046 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-credential-keys\") pod \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.140123 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-config-data\") pod \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.140177 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-fernet-keys\") pod \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.140228 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ttbj\" (UniqueName: \"kubernetes.io/projected/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-kube-api-access-5ttbj\") pod \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.140386 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-combined-ca-bundle\") pod \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\" (UID: \"1b555a2a-0c59-4b4c-901e-6b160e5d57a1\") " Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.145419 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-scripts" (OuterVolumeSpecName: "scripts") pod "1b555a2a-0c59-4b4c-901e-6b160e5d57a1" (UID: "1b555a2a-0c59-4b4c-901e-6b160e5d57a1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.146030 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "1b555a2a-0c59-4b4c-901e-6b160e5d57a1" (UID: "1b555a2a-0c59-4b4c-901e-6b160e5d57a1"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.153344 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-kube-api-access-5ttbj" (OuterVolumeSpecName: "kube-api-access-5ttbj") pod "1b555a2a-0c59-4b4c-901e-6b160e5d57a1" (UID: "1b555a2a-0c59-4b4c-901e-6b160e5d57a1"). InnerVolumeSpecName "kube-api-access-5ttbj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.159185 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "1b555a2a-0c59-4b4c-901e-6b160e5d57a1" (UID: "1b555a2a-0c59-4b4c-901e-6b160e5d57a1"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.174120 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1b555a2a-0c59-4b4c-901e-6b160e5d57a1" (UID: "1b555a2a-0c59-4b4c-901e-6b160e5d57a1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.176630 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-config-data" (OuterVolumeSpecName: "config-data") pod "1b555a2a-0c59-4b4c-901e-6b160e5d57a1" (UID: "1b555a2a-0c59-4b4c-901e-6b160e5d57a1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.242714 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.242955 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.242966 4933 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.242976 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.242984 4933 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.242991 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ttbj\" (UniqueName: \"kubernetes.io/projected/1b555a2a-0c59-4b4c-901e-6b160e5d57a1-kube-api-access-5ttbj\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.393046 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-856bccf57c-l2f82"] Jan 22 06:05:41 crc kubenswrapper[4933]: W0122 06:05:41.395884 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6541a01b_555e_4734_8eb8_bc63625dd293.slice/crio-2fa8955d0a6794d8893c22c2a462aa4524ef9380b44afa5d456b1c2baf2e7fe6 WatchSource:0}: Error finding container 2fa8955d0a6794d8893c22c2a462aa4524ef9380b44afa5d456b1c2baf2e7fe6: Status 404 returned error can't find the container with id 2fa8955d0a6794d8893c22c2a462aa4524ef9380b44afa5d456b1c2baf2e7fe6 Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.412738 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-566788757d-gkrdt"] Jan 22 06:05:41 crc kubenswrapper[4933]: W0122 06:05:41.414508 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd1bcdbaa_0f6d_4bc8_95ec_892db9c8d194.slice/crio-0a10a03f273eed558bb474577d5acd3969def978c71514dbe156b0931a26f53b WatchSource:0}: Error finding container 0a10a03f273eed558bb474577d5acd3969def978c71514dbe156b0931a26f53b: Status 404 returned error can't find the container with id 0a10a03f273eed558bb474577d5acd3969def978c71514dbe156b0931a26f53b Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.733030 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-566788757d-gkrdt" event={"ID":"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194","Type":"ContainerStarted","Data":"9ef7106268a25e14e2c1b4b8ae2a3405385542e15442cd78aec71a27dca326ba"} Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.733408 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-566788757d-gkrdt" event={"ID":"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194","Type":"ContainerStarted","Data":"0a10a03f273eed558bb474577d5acd3969def978c71514dbe156b0931a26f53b"} Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.734879 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bvb58" event={"ID":"1b555a2a-0c59-4b4c-901e-6b160e5d57a1","Type":"ContainerDied","Data":"5ed27c1901802da6df63e4a53c32da58d4f7510ad015b8758249439f0dd65d5f"} Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.734904 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bvb58" Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.734918 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5ed27c1901802da6df63e4a53c32da58d4f7510ad015b8758249439f0dd65d5f" Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.736654 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-856bccf57c-l2f82" event={"ID":"6541a01b-555e-4734-8eb8-bc63625dd293","Type":"ContainerStarted","Data":"f4e82b1bcf25275851a5aef8022cd4bdd910f6f9ab17a41f7a84149b88a2abdc"} Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.736693 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-856bccf57c-l2f82" event={"ID":"6541a01b-555e-4734-8eb8-bc63625dd293","Type":"ContainerStarted","Data":"2fa8955d0a6794d8893c22c2a462aa4524ef9380b44afa5d456b1c2baf2e7fe6"} Jan 22 06:05:41 crc kubenswrapper[4933]: I0122 06:05:41.741936 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7d72c18-1516-44a5-ad92-f367c93280b1","Type":"ContainerStarted","Data":"89bcc286fb4735a8fa8fb0151a0f607159a60ad7fa26a1b4794fb7ae2e866707"} Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.058019 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-7d459c58f9-bc2hf"] Jan 22 06:05:42 crc kubenswrapper[4933]: E0122 06:05:42.058480 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b555a2a-0c59-4b4c-901e-6b160e5d57a1" containerName="keystone-bootstrap" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.058499 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b555a2a-0c59-4b4c-901e-6b160e5d57a1" containerName="keystone-bootstrap" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.058752 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b555a2a-0c59-4b4c-901e-6b160e5d57a1" containerName="keystone-bootstrap" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.059446 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.065712 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.065742 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.065773 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.065713 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.070097 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-vftwj" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.070261 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.084004 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7d459c58f9-bc2hf"] Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.158997 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lb62m\" (UniqueName: \"kubernetes.io/projected/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-kube-api-access-lb62m\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.159130 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-internal-tls-certs\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.159165 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-public-tls-certs\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.159189 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-combined-ca-bundle\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.159210 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-scripts\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.159412 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-credential-keys\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.159629 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-fernet-keys\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.159681 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-config-data\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.262855 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-credential-keys\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.263303 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-fernet-keys\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.263916 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-config-data\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.264100 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lb62m\" (UniqueName: \"kubernetes.io/projected/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-kube-api-access-lb62m\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.265697 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-internal-tls-certs\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.265803 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-public-tls-certs\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.265877 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-combined-ca-bundle\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.266594 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-scripts\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.269892 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-fernet-keys\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.271333 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-internal-tls-certs\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.272473 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-config-data\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.273569 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-credential-keys\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.273894 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-scripts\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.274721 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-combined-ca-bundle\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.290629 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lb62m\" (UniqueName: \"kubernetes.io/projected/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-kube-api-access-lb62m\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.293257 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-public-tls-certs\") pod \"keystone-7d459c58f9-bc2hf\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.417110 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:42 crc kubenswrapper[4933]: I0122 06:05:42.913044 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7d459c58f9-bc2hf"] Jan 22 06:05:42 crc kubenswrapper[4933]: W0122 06:05:42.916712 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc16f2ab8_68b3_43fa_a862_c182aaa3dc23.slice/crio-ac8e3f1db194ec4cc7df2929d79be99c4c7f4bcbf1e2b486e2ea5a0322300503 WatchSource:0}: Error finding container ac8e3f1db194ec4cc7df2929d79be99c4c7f4bcbf1e2b486e2ea5a0322300503: Status 404 returned error can't find the container with id ac8e3f1db194ec4cc7df2929d79be99c4c7f4bcbf1e2b486e2ea5a0322300503 Jan 22 06:05:43 crc kubenswrapper[4933]: I0122 06:05:43.761889 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7d459c58f9-bc2hf" event={"ID":"c16f2ab8-68b3-43fa-a862-c182aaa3dc23","Type":"ContainerStarted","Data":"ac8e3f1db194ec4cc7df2929d79be99c4c7f4bcbf1e2b486e2ea5a0322300503"} Jan 22 06:05:43 crc kubenswrapper[4933]: I0122 06:05:43.848547 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 22 06:05:43 crc kubenswrapper[4933]: I0122 06:05:43.848649 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 22 06:05:43 crc kubenswrapper[4933]: I0122 06:05:43.884378 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 22 06:05:43 crc kubenswrapper[4933]: I0122 06:05:43.898444 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 22 06:05:44 crc kubenswrapper[4933]: I0122 06:05:44.773155 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 06:05:44 crc kubenswrapper[4933]: I0122 06:05:44.773243 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 06:05:44 crc kubenswrapper[4933]: I0122 06:05:44.867346 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 22 06:05:44 crc kubenswrapper[4933]: I0122 06:05:44.867423 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 22 06:05:44 crc kubenswrapper[4933]: I0122 06:05:44.923838 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 22 06:05:44 crc kubenswrapper[4933]: I0122 06:05:44.971493 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 22 06:05:45 crc kubenswrapper[4933]: I0122 06:05:45.224245 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:05:45 crc kubenswrapper[4933]: I0122 06:05:45.300495 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5dc4fcdbc-8rcfp"] Jan 22 06:05:45 crc kubenswrapper[4933]: I0122 06:05:45.300726 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" podUID="db9370fd-7e89-47ab-8238-7d24abeb981f" containerName="dnsmasq-dns" containerID="cri-o://8fd13567d632d5099d30b19499d649a020aef880efa93a39cddc58f7ca281d15" gracePeriod=10 Jan 22 06:05:45 crc kubenswrapper[4933]: I0122 06:05:45.781770 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-856bccf57c-l2f82" event={"ID":"6541a01b-555e-4734-8eb8-bc63625dd293","Type":"ContainerStarted","Data":"57d4c641b72ffcca676596e412bcc902119cbf32a7b4fb39d8930b32680d0018"} Jan 22 06:05:45 crc kubenswrapper[4933]: I0122 06:05:45.781850 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:05:45 crc kubenswrapper[4933]: I0122 06:05:45.784436 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-566788757d-gkrdt" event={"ID":"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194","Type":"ContainerStarted","Data":"9656bc111246901626a3ae6303bb9f3055280024dabd06af3b590f5dad87fafe"} Jan 22 06:05:45 crc kubenswrapper[4933]: I0122 06:05:45.784826 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:45 crc kubenswrapper[4933]: I0122 06:05:45.784914 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:45 crc kubenswrapper[4933]: I0122 06:05:45.786646 4933 generic.go:334] "Generic (PLEG): container finished" podID="db9370fd-7e89-47ab-8238-7d24abeb981f" containerID="8fd13567d632d5099d30b19499d649a020aef880efa93a39cddc58f7ca281d15" exitCode=0 Jan 22 06:05:45 crc kubenswrapper[4933]: I0122 06:05:45.786696 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" event={"ID":"db9370fd-7e89-47ab-8238-7d24abeb981f","Type":"ContainerDied","Data":"8fd13567d632d5099d30b19499d649a020aef880efa93a39cddc58f7ca281d15"} Jan 22 06:05:45 crc kubenswrapper[4933]: I0122 06:05:45.787363 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 06:05:45 crc kubenswrapper[4933]: I0122 06:05:45.787420 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 06:05:45 crc kubenswrapper[4933]: I0122 06:05:45.816958 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-856bccf57c-l2f82" podStartSLOduration=7.816938034 podStartE2EDuration="7.816938034s" podCreationTimestamp="2026-01-22 06:05:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:05:45.808179014 +0000 UTC m=+1193.645304377" watchObservedRunningTime="2026-01-22 06:05:45.816938034 +0000 UTC m=+1193.654063377" Jan 22 06:05:45 crc kubenswrapper[4933]: I0122 06:05:45.842734 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-566788757d-gkrdt" podStartSLOduration=8.842708429 podStartE2EDuration="8.842708429s" podCreationTimestamp="2026-01-22 06:05:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:05:45.827722173 +0000 UTC m=+1193.664847526" watchObservedRunningTime="2026-01-22 06:05:45.842708429 +0000 UTC m=+1193.679833812" Jan 22 06:05:46 crc kubenswrapper[4933]: I0122 06:05:46.797803 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" event={"ID":"db9370fd-7e89-47ab-8238-7d24abeb981f","Type":"ContainerDied","Data":"8c94eb1e4f9b229976e510ec63a177c75444d8b05c76ca65d8c57488369f3093"} Jan 22 06:05:46 crc kubenswrapper[4933]: I0122 06:05:46.798154 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8c94eb1e4f9b229976e510ec63a177c75444d8b05c76ca65d8c57488369f3093" Jan 22 06:05:46 crc kubenswrapper[4933]: I0122 06:05:46.855486 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:46 crc kubenswrapper[4933]: I0122 06:05:46.966338 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-dns-swift-storage-0\") pod \"db9370fd-7e89-47ab-8238-7d24abeb981f\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " Jan 22 06:05:46 crc kubenswrapper[4933]: I0122 06:05:46.966412 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbgh2\" (UniqueName: \"kubernetes.io/projected/db9370fd-7e89-47ab-8238-7d24abeb981f-kube-api-access-bbgh2\") pod \"db9370fd-7e89-47ab-8238-7d24abeb981f\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " Jan 22 06:05:46 crc kubenswrapper[4933]: I0122 06:05:46.966487 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-ovsdbserver-sb\") pod \"db9370fd-7e89-47ab-8238-7d24abeb981f\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " Jan 22 06:05:46 crc kubenswrapper[4933]: I0122 06:05:46.966579 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-ovsdbserver-nb\") pod \"db9370fd-7e89-47ab-8238-7d24abeb981f\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " Jan 22 06:05:46 crc kubenswrapper[4933]: I0122 06:05:46.966665 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-config\") pod \"db9370fd-7e89-47ab-8238-7d24abeb981f\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " Jan 22 06:05:46 crc kubenswrapper[4933]: I0122 06:05:46.966703 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-dns-svc\") pod \"db9370fd-7e89-47ab-8238-7d24abeb981f\" (UID: \"db9370fd-7e89-47ab-8238-7d24abeb981f\") " Jan 22 06:05:46 crc kubenswrapper[4933]: I0122 06:05:46.979193 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db9370fd-7e89-47ab-8238-7d24abeb981f-kube-api-access-bbgh2" (OuterVolumeSpecName: "kube-api-access-bbgh2") pod "db9370fd-7e89-47ab-8238-7d24abeb981f" (UID: "db9370fd-7e89-47ab-8238-7d24abeb981f"). InnerVolumeSpecName "kube-api-access-bbgh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.068306 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbgh2\" (UniqueName: \"kubernetes.io/projected/db9370fd-7e89-47ab-8238-7d24abeb981f-kube-api-access-bbgh2\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.310900 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "db9370fd-7e89-47ab-8238-7d24abeb981f" (UID: "db9370fd-7e89-47ab-8238-7d24abeb981f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.311112 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "db9370fd-7e89-47ab-8238-7d24abeb981f" (UID: "db9370fd-7e89-47ab-8238-7d24abeb981f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.311123 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "db9370fd-7e89-47ab-8238-7d24abeb981f" (UID: "db9370fd-7e89-47ab-8238-7d24abeb981f"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.311295 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-config" (OuterVolumeSpecName: "config") pod "db9370fd-7e89-47ab-8238-7d24abeb981f" (UID: "db9370fd-7e89-47ab-8238-7d24abeb981f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.311498 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "db9370fd-7e89-47ab-8238-7d24abeb981f" (UID: "db9370fd-7e89-47ab-8238-7d24abeb981f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.345863 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.345964 4933 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.373561 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.373610 4933 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.373621 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.373630 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.373640 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db9370fd-7e89-47ab-8238-7d24abeb981f-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.419834 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.805067 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dc4fcdbc-8rcfp" Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.855936 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5dc4fcdbc-8rcfp"] Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.862210 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5dc4fcdbc-8rcfp"] Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.957113 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.957221 4933 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 06:05:47 crc kubenswrapper[4933]: I0122 06:05:47.958264 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 22 06:05:48 crc kubenswrapper[4933]: I0122 06:05:48.503483 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db9370fd-7e89-47ab-8238-7d24abeb981f" path="/var/lib/kubelet/pods/db9370fd-7e89-47ab-8238-7d24abeb981f/volumes" Jan 22 06:05:49 crc kubenswrapper[4933]: I0122 06:05:49.822174 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7d459c58f9-bc2hf" event={"ID":"c16f2ab8-68b3-43fa-a862-c182aaa3dc23","Type":"ContainerStarted","Data":"abbc72b7cf0aa36b4904038dc83404e06c706971f42e299cdcaa546f6ea3f0e7"} Jan 22 06:05:49 crc kubenswrapper[4933]: I0122 06:05:49.956172 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-566788757d-gkrdt" Jan 22 06:05:50 crc kubenswrapper[4933]: I0122 06:05:50.829738 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:05:50 crc kubenswrapper[4933]: I0122 06:05:50.858734 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-7d459c58f9-bc2hf" podStartSLOduration=8.85870761 podStartE2EDuration="8.85870761s" podCreationTimestamp="2026-01-22 06:05:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:05:50.848223017 +0000 UTC m=+1198.685348390" watchObservedRunningTime="2026-01-22 06:05:50.85870761 +0000 UTC m=+1198.695832973" Jan 22 06:05:54 crc kubenswrapper[4933]: I0122 06:05:54.873846 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7d72c18-1516-44a5-ad92-f367c93280b1","Type":"ContainerStarted","Data":"cb603c27c531e01097d003730c1f696a7553915e5e88064a96e1cef68a5b7863"} Jan 22 06:05:54 crc kubenswrapper[4933]: I0122 06:05:54.874291 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerName="ceilometer-central-agent" containerID="cri-o://b29d41c6c3906eb46f5e625db215c2c233737c530c75840950c73cff63017a43" gracePeriod=30 Jan 22 06:05:54 crc kubenswrapper[4933]: I0122 06:05:54.874317 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 06:05:54 crc kubenswrapper[4933]: I0122 06:05:54.874331 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerName="proxy-httpd" containerID="cri-o://cb603c27c531e01097d003730c1f696a7553915e5e88064a96e1cef68a5b7863" gracePeriod=30 Jan 22 06:05:54 crc kubenswrapper[4933]: I0122 06:05:54.874350 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerName="sg-core" containerID="cri-o://89bcc286fb4735a8fa8fb0151a0f607159a60ad7fa26a1b4794fb7ae2e866707" gracePeriod=30 Jan 22 06:05:54 crc kubenswrapper[4933]: I0122 06:05:54.874391 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerName="ceilometer-notification-agent" containerID="cri-o://396cdd5329e8a64271bc121815e8795ec8443f663a5b2fa0cebe713443617e70" gracePeriod=30 Jan 22 06:05:54 crc kubenswrapper[4933]: I0122 06:05:54.878198 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-gwqkb" event={"ID":"c6ae84cf-ec9a-42e4-9c55-035d9accb4b2","Type":"ContainerStarted","Data":"e0226956031a6164e10e35e1b1744abeea73d2f85ab0cdab0974600cd35c965b"} Jan 22 06:05:54 crc kubenswrapper[4933]: I0122 06:05:54.907343 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.806595367 podStartE2EDuration="46.907323866s" podCreationTimestamp="2026-01-22 06:05:08 +0000 UTC" firstStartedPulling="2026-01-22 06:05:10.375709771 +0000 UTC m=+1158.212835124" lastFinishedPulling="2026-01-22 06:05:54.47643827 +0000 UTC m=+1202.313563623" observedRunningTime="2026-01-22 06:05:54.895819778 +0000 UTC m=+1202.732945131" watchObservedRunningTime="2026-01-22 06:05:54.907323866 +0000 UTC m=+1202.744449219" Jan 22 06:05:54 crc kubenswrapper[4933]: I0122 06:05:54.942830 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-gwqkb" podStartSLOduration=3.417315253 podStartE2EDuration="46.942811434s" podCreationTimestamp="2026-01-22 06:05:08 +0000 UTC" firstStartedPulling="2026-01-22 06:05:10.943294818 +0000 UTC m=+1158.780420171" lastFinishedPulling="2026-01-22 06:05:54.468791009 +0000 UTC m=+1202.305916352" observedRunningTime="2026-01-22 06:05:54.935988464 +0000 UTC m=+1202.773113837" watchObservedRunningTime="2026-01-22 06:05:54.942811434 +0000 UTC m=+1202.779936787" Jan 22 06:05:55 crc kubenswrapper[4933]: I0122 06:05:55.893129 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-92ghq" event={"ID":"1086bd39-4637-4123-a7b2-d85d3a603dd5","Type":"ContainerStarted","Data":"f2721000af5a120f7871e7c19c9dc7ce44e29a702f48b5d2b4de79c19cb595fd"} Jan 22 06:05:55 crc kubenswrapper[4933]: I0122 06:05:55.896520 4933 generic.go:334] "Generic (PLEG): container finished" podID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerID="cb603c27c531e01097d003730c1f696a7553915e5e88064a96e1cef68a5b7863" exitCode=0 Jan 22 06:05:55 crc kubenswrapper[4933]: I0122 06:05:55.896576 4933 generic.go:334] "Generic (PLEG): container finished" podID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerID="89bcc286fb4735a8fa8fb0151a0f607159a60ad7fa26a1b4794fb7ae2e866707" exitCode=2 Jan 22 06:05:55 crc kubenswrapper[4933]: I0122 06:05:55.896586 4933 generic.go:334] "Generic (PLEG): container finished" podID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerID="b29d41c6c3906eb46f5e625db215c2c233737c530c75840950c73cff63017a43" exitCode=0 Jan 22 06:05:55 crc kubenswrapper[4933]: I0122 06:05:55.896603 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7d72c18-1516-44a5-ad92-f367c93280b1","Type":"ContainerDied","Data":"cb603c27c531e01097d003730c1f696a7553915e5e88064a96e1cef68a5b7863"} Jan 22 06:05:55 crc kubenswrapper[4933]: I0122 06:05:55.896643 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7d72c18-1516-44a5-ad92-f367c93280b1","Type":"ContainerDied","Data":"89bcc286fb4735a8fa8fb0151a0f607159a60ad7fa26a1b4794fb7ae2e866707"} Jan 22 06:05:55 crc kubenswrapper[4933]: I0122 06:05:55.896653 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7d72c18-1516-44a5-ad92-f367c93280b1","Type":"ContainerDied","Data":"b29d41c6c3906eb46f5e625db215c2c233737c530c75840950c73cff63017a43"} Jan 22 06:05:55 crc kubenswrapper[4933]: I0122 06:05:55.918280 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-92ghq" podStartSLOduration=3.820629167 podStartE2EDuration="47.918259069s" podCreationTimestamp="2026-01-22 06:05:08 +0000 UTC" firstStartedPulling="2026-01-22 06:05:10.36925618 +0000 UTC m=+1158.206381533" lastFinishedPulling="2026-01-22 06:05:54.466886082 +0000 UTC m=+1202.304011435" observedRunningTime="2026-01-22 06:05:55.912234079 +0000 UTC m=+1203.749359432" watchObservedRunningTime="2026-01-22 06:05:55.918259069 +0000 UTC m=+1203.755384422" Jan 22 06:05:56 crc kubenswrapper[4933]: I0122 06:05:56.907856 4933 generic.go:334] "Generic (PLEG): container finished" podID="c6ae84cf-ec9a-42e4-9c55-035d9accb4b2" containerID="e0226956031a6164e10e35e1b1744abeea73d2f85ab0cdab0974600cd35c965b" exitCode=0 Jan 22 06:05:56 crc kubenswrapper[4933]: I0122 06:05:56.907994 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-gwqkb" event={"ID":"c6ae84cf-ec9a-42e4-9c55-035d9accb4b2","Type":"ContainerDied","Data":"e0226956031a6164e10e35e1b1744abeea73d2f85ab0cdab0974600cd35c965b"} Jan 22 06:05:58 crc kubenswrapper[4933]: I0122 06:05:58.290159 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-gwqkb" Jan 22 06:05:58 crc kubenswrapper[4933]: I0122 06:05:58.374937 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c6ae84cf-ec9a-42e4-9c55-035d9accb4b2-db-sync-config-data\") pod \"c6ae84cf-ec9a-42e4-9c55-035d9accb4b2\" (UID: \"c6ae84cf-ec9a-42e4-9c55-035d9accb4b2\") " Jan 22 06:05:58 crc kubenswrapper[4933]: I0122 06:05:58.375310 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6ae84cf-ec9a-42e4-9c55-035d9accb4b2-combined-ca-bundle\") pod \"c6ae84cf-ec9a-42e4-9c55-035d9accb4b2\" (UID: \"c6ae84cf-ec9a-42e4-9c55-035d9accb4b2\") " Jan 22 06:05:58 crc kubenswrapper[4933]: I0122 06:05:58.375529 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9n4lp\" (UniqueName: \"kubernetes.io/projected/c6ae84cf-ec9a-42e4-9c55-035d9accb4b2-kube-api-access-9n4lp\") pod \"c6ae84cf-ec9a-42e4-9c55-035d9accb4b2\" (UID: \"c6ae84cf-ec9a-42e4-9c55-035d9accb4b2\") " Jan 22 06:05:58 crc kubenswrapper[4933]: I0122 06:05:58.380985 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6ae84cf-ec9a-42e4-9c55-035d9accb4b2-kube-api-access-9n4lp" (OuterVolumeSpecName: "kube-api-access-9n4lp") pod "c6ae84cf-ec9a-42e4-9c55-035d9accb4b2" (UID: "c6ae84cf-ec9a-42e4-9c55-035d9accb4b2"). InnerVolumeSpecName "kube-api-access-9n4lp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:05:58 crc kubenswrapper[4933]: I0122 06:05:58.381029 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6ae84cf-ec9a-42e4-9c55-035d9accb4b2-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "c6ae84cf-ec9a-42e4-9c55-035d9accb4b2" (UID: "c6ae84cf-ec9a-42e4-9c55-035d9accb4b2"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:58 crc kubenswrapper[4933]: I0122 06:05:58.401727 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6ae84cf-ec9a-42e4-9c55-035d9accb4b2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c6ae84cf-ec9a-42e4-9c55-035d9accb4b2" (UID: "c6ae84cf-ec9a-42e4-9c55-035d9accb4b2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:58 crc kubenswrapper[4933]: I0122 06:05:58.477659 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9n4lp\" (UniqueName: \"kubernetes.io/projected/c6ae84cf-ec9a-42e4-9c55-035d9accb4b2-kube-api-access-9n4lp\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:58 crc kubenswrapper[4933]: I0122 06:05:58.477712 4933 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c6ae84cf-ec9a-42e4-9c55-035d9accb4b2-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:58 crc kubenswrapper[4933]: I0122 06:05:58.477739 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6ae84cf-ec9a-42e4-9c55-035d9accb4b2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:58 crc kubenswrapper[4933]: I0122 06:05:58.932351 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-gwqkb" event={"ID":"c6ae84cf-ec9a-42e4-9c55-035d9accb4b2","Type":"ContainerDied","Data":"fb37ed81c499d94bfb8a688b10004edae6292c43b1faa1238ca7be6a0916daf0"} Jan 22 06:05:58 crc kubenswrapper[4933]: I0122 06:05:58.933731 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fb37ed81c499d94bfb8a688b10004edae6292c43b1faa1238ca7be6a0916daf0" Jan 22 06:05:58 crc kubenswrapper[4933]: I0122 06:05:58.932380 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-gwqkb" Jan 22 06:05:58 crc kubenswrapper[4933]: I0122 06:05:58.934123 4933 generic.go:334] "Generic (PLEG): container finished" podID="1086bd39-4637-4123-a7b2-d85d3a603dd5" containerID="f2721000af5a120f7871e7c19c9dc7ce44e29a702f48b5d2b4de79c19cb595fd" exitCode=0 Jan 22 06:05:58 crc kubenswrapper[4933]: I0122 06:05:58.934159 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-92ghq" event={"ID":"1086bd39-4637-4123-a7b2-d85d3a603dd5","Type":"ContainerDied","Data":"f2721000af5a120f7871e7c19c9dc7ce44e29a702f48b5d2b4de79c19cb595fd"} Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.379804 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.493117 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7d72c18-1516-44a5-ad92-f367c93280b1-run-httpd\") pod \"a7d72c18-1516-44a5-ad92-f367c93280b1\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.493229 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hwtpf\" (UniqueName: \"kubernetes.io/projected/a7d72c18-1516-44a5-ad92-f367c93280b1-kube-api-access-hwtpf\") pod \"a7d72c18-1516-44a5-ad92-f367c93280b1\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.493368 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-combined-ca-bundle\") pod \"a7d72c18-1516-44a5-ad92-f367c93280b1\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.493778 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7d72c18-1516-44a5-ad92-f367c93280b1-log-httpd\") pod \"a7d72c18-1516-44a5-ad92-f367c93280b1\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.493912 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7d72c18-1516-44a5-ad92-f367c93280b1-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a7d72c18-1516-44a5-ad92-f367c93280b1" (UID: "a7d72c18-1516-44a5-ad92-f367c93280b1"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.494044 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-scripts\") pod \"a7d72c18-1516-44a5-ad92-f367c93280b1\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.494120 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-config-data\") pod \"a7d72c18-1516-44a5-ad92-f367c93280b1\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.494196 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-sg-core-conf-yaml\") pod \"a7d72c18-1516-44a5-ad92-f367c93280b1\" (UID: \"a7d72c18-1516-44a5-ad92-f367c93280b1\") " Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.495022 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7d72c18-1516-44a5-ad92-f367c93280b1-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a7d72c18-1516-44a5-ad92-f367c93280b1" (UID: "a7d72c18-1516-44a5-ad92-f367c93280b1"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.495054 4933 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7d72c18-1516-44a5-ad92-f367c93280b1-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.501292 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7d72c18-1516-44a5-ad92-f367c93280b1-kube-api-access-hwtpf" (OuterVolumeSpecName: "kube-api-access-hwtpf") pod "a7d72c18-1516-44a5-ad92-f367c93280b1" (UID: "a7d72c18-1516-44a5-ad92-f367c93280b1"). InnerVolumeSpecName "kube-api-access-hwtpf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.502436 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-scripts" (OuterVolumeSpecName: "scripts") pod "a7d72c18-1516-44a5-ad92-f367c93280b1" (UID: "a7d72c18-1516-44a5-ad92-f367c93280b1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.541671 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a7d72c18-1516-44a5-ad92-f367c93280b1" (UID: "a7d72c18-1516-44a5-ad92-f367c93280b1"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.598592 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.598631 4933 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.598644 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hwtpf\" (UniqueName: \"kubernetes.io/projected/a7d72c18-1516-44a5-ad92-f367c93280b1-kube-api-access-hwtpf\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.598658 4933 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a7d72c18-1516-44a5-ad92-f367c93280b1-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.618018 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7bdf86f46f-5c2n7"] Jan 22 06:05:59 crc kubenswrapper[4933]: E0122 06:05:59.618412 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerName="sg-core" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.618423 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerName="sg-core" Jan 22 06:05:59 crc kubenswrapper[4933]: E0122 06:05:59.618432 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerName="proxy-httpd" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.618438 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerName="proxy-httpd" Jan 22 06:05:59 crc kubenswrapper[4933]: E0122 06:05:59.618459 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6ae84cf-ec9a-42e4-9c55-035d9accb4b2" containerName="barbican-db-sync" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.618465 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6ae84cf-ec9a-42e4-9c55-035d9accb4b2" containerName="barbican-db-sync" Jan 22 06:05:59 crc kubenswrapper[4933]: E0122 06:05:59.618474 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db9370fd-7e89-47ab-8238-7d24abeb981f" containerName="dnsmasq-dns" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.618479 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="db9370fd-7e89-47ab-8238-7d24abeb981f" containerName="dnsmasq-dns" Jan 22 06:05:59 crc kubenswrapper[4933]: E0122 06:05:59.618492 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db9370fd-7e89-47ab-8238-7d24abeb981f" containerName="init" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.618497 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="db9370fd-7e89-47ab-8238-7d24abeb981f" containerName="init" Jan 22 06:05:59 crc kubenswrapper[4933]: E0122 06:05:59.618508 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerName="ceilometer-notification-agent" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.618513 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerName="ceilometer-notification-agent" Jan 22 06:05:59 crc kubenswrapper[4933]: E0122 06:05:59.618526 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerName="ceilometer-central-agent" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.618532 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerName="ceilometer-central-agent" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.618669 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerName="ceilometer-notification-agent" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.618689 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerName="sg-core" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.618704 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerName="ceilometer-central-agent" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.618722 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="db9370fd-7e89-47ab-8238-7d24abeb981f" containerName="dnsmasq-dns" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.618742 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6ae84cf-ec9a-42e4-9c55-035d9accb4b2" containerName="barbican-db-sync" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.618752 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerName="proxy-httpd" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.619644 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.656033 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-config-data" (OuterVolumeSpecName: "config-data") pod "a7d72c18-1516-44a5-ad92-f367c93280b1" (UID: "a7d72c18-1516-44a5-ad92-f367c93280b1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.689593 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bdf86f46f-5c2n7"] Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.706249 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.709286 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-6448b46975-jx7gp"] Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.711250 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6448b46975-jx7gp" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.713430 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.717982 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-5lkk6" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.718215 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.726776 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a7d72c18-1516-44a5-ad92-f367c93280b1" (UID: "a7d72c18-1516-44a5-ad92-f367c93280b1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.726954 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-7c6944456-lk7l7"] Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.728726 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.741787 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.768336 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6448b46975-jx7gp"] Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.778840 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7c6944456-lk7l7"] Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.811858 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cq2km\" (UniqueName: \"kubernetes.io/projected/a656ee23-9957-4f1a-9fa8-9737afdd32b1-kube-api-access-cq2km\") pod \"dnsmasq-dns-7bdf86f46f-5c2n7\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.811920 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-dns-svc\") pod \"dnsmasq-dns-7bdf86f46f-5c2n7\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.811943 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-ovsdbserver-nb\") pod \"dnsmasq-dns-7bdf86f46f-5c2n7\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.811989 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-ovsdbserver-sb\") pod \"dnsmasq-dns-7bdf86f46f-5c2n7\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.812016 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-dns-swift-storage-0\") pod \"dnsmasq-dns-7bdf86f46f-5c2n7\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.812099 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-config\") pod \"dnsmasq-dns-7bdf86f46f-5c2n7\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.816734 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7d72c18-1516-44a5-ad92-f367c93280b1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.869163 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5c9bd98c56-qnqgq"] Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.870891 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.878886 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.890045 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5c9bd98c56-qnqgq"] Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.917965 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5505bed5-dba3-4067-b94c-acd00b7c37c7-config-data\") pod \"barbican-worker-6448b46975-jx7gp\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " pod="openstack/barbican-worker-6448b46975-jx7gp" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.918025 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cq2km\" (UniqueName: \"kubernetes.io/projected/a656ee23-9957-4f1a-9fa8-9737afdd32b1-kube-api-access-cq2km\") pod \"dnsmasq-dns-7bdf86f46f-5c2n7\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.918048 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5505bed5-dba3-4067-b94c-acd00b7c37c7-config-data-custom\") pod \"barbican-worker-6448b46975-jx7gp\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " pod="openstack/barbican-worker-6448b46975-jx7gp" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.918066 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0fecb571-89ee-4d10-a1e3-e3755946df2b-config-data-custom\") pod \"barbican-keystone-listener-7c6944456-lk7l7\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.918100 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fecb571-89ee-4d10-a1e3-e3755946df2b-logs\") pod \"barbican-keystone-listener-7c6944456-lk7l7\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.918118 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-ovsdbserver-nb\") pod \"dnsmasq-dns-7bdf86f46f-5c2n7\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.918132 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-dns-svc\") pod \"dnsmasq-dns-7bdf86f46f-5c2n7\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.918158 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fecb571-89ee-4d10-a1e3-e3755946df2b-combined-ca-bundle\") pod \"barbican-keystone-listener-7c6944456-lk7l7\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.918181 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rr2vd\" (UniqueName: \"kubernetes.io/projected/5505bed5-dba3-4067-b94c-acd00b7c37c7-kube-api-access-rr2vd\") pod \"barbican-worker-6448b46975-jx7gp\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " pod="openstack/barbican-worker-6448b46975-jx7gp" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.918198 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-ovsdbserver-sb\") pod \"dnsmasq-dns-7bdf86f46f-5c2n7\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.918225 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5505bed5-dba3-4067-b94c-acd00b7c37c7-logs\") pod \"barbican-worker-6448b46975-jx7gp\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " pod="openstack/barbican-worker-6448b46975-jx7gp" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.918250 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2dqw\" (UniqueName: \"kubernetes.io/projected/0fecb571-89ee-4d10-a1e3-e3755946df2b-kube-api-access-r2dqw\") pod \"barbican-keystone-listener-7c6944456-lk7l7\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.918272 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-dns-swift-storage-0\") pod \"dnsmasq-dns-7bdf86f46f-5c2n7\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.918300 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5505bed5-dba3-4067-b94c-acd00b7c37c7-combined-ca-bundle\") pod \"barbican-worker-6448b46975-jx7gp\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " pod="openstack/barbican-worker-6448b46975-jx7gp" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.918329 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fecb571-89ee-4d10-a1e3-e3755946df2b-config-data\") pod \"barbican-keystone-listener-7c6944456-lk7l7\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.918366 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-config\") pod \"dnsmasq-dns-7bdf86f46f-5c2n7\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.918978 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-ovsdbserver-nb\") pod \"dnsmasq-dns-7bdf86f46f-5c2n7\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.919062 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-dns-svc\") pod \"dnsmasq-dns-7bdf86f46f-5c2n7\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.919166 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-config\") pod \"dnsmasq-dns-7bdf86f46f-5c2n7\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.919568 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-ovsdbserver-sb\") pod \"dnsmasq-dns-7bdf86f46f-5c2n7\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.919741 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-dns-swift-storage-0\") pod \"dnsmasq-dns-7bdf86f46f-5c2n7\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.944984 4933 generic.go:334] "Generic (PLEG): container finished" podID="a7d72c18-1516-44a5-ad92-f367c93280b1" containerID="396cdd5329e8a64271bc121815e8795ec8443f663a5b2fa0cebe713443617e70" exitCode=0 Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.945190 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.947337 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7d72c18-1516-44a5-ad92-f367c93280b1","Type":"ContainerDied","Data":"396cdd5329e8a64271bc121815e8795ec8443f663a5b2fa0cebe713443617e70"} Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.947369 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a7d72c18-1516-44a5-ad92-f367c93280b1","Type":"ContainerDied","Data":"ff3e6ec91ffea38b7c3966a2d7f8f278107f99bdd327ca1480363d6b370a90d1"} Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.947391 4933 scope.go:117] "RemoveContainer" containerID="cb603c27c531e01097d003730c1f696a7553915e5e88064a96e1cef68a5b7863" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.957104 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cq2km\" (UniqueName: \"kubernetes.io/projected/a656ee23-9957-4f1a-9fa8-9737afdd32b1-kube-api-access-cq2km\") pod \"dnsmasq-dns-7bdf86f46f-5c2n7\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.984496 4933 scope.go:117] "RemoveContainer" containerID="89bcc286fb4735a8fa8fb0151a0f607159a60ad7fa26a1b4794fb7ae2e866707" Jan 22 06:05:59 crc kubenswrapper[4933]: I0122 06:05:59.989271 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.007434 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.021756 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.022127 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-logs\") pod \"barbican-api-5c9bd98c56-qnqgq\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.022176 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rq7cj\" (UniqueName: \"kubernetes.io/projected/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-kube-api-access-rq7cj\") pod \"barbican-api-5c9bd98c56-qnqgq\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.022215 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-config-data\") pod \"barbican-api-5c9bd98c56-qnqgq\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.022243 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-config-data-custom\") pod \"barbican-api-5c9bd98c56-qnqgq\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.022282 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5505bed5-dba3-4067-b94c-acd00b7c37c7-config-data\") pod \"barbican-worker-6448b46975-jx7gp\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " pod="openstack/barbican-worker-6448b46975-jx7gp" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.022301 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-combined-ca-bundle\") pod \"barbican-api-5c9bd98c56-qnqgq\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.022335 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5505bed5-dba3-4067-b94c-acd00b7c37c7-config-data-custom\") pod \"barbican-worker-6448b46975-jx7gp\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " pod="openstack/barbican-worker-6448b46975-jx7gp" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.022351 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0fecb571-89ee-4d10-a1e3-e3755946df2b-config-data-custom\") pod \"barbican-keystone-listener-7c6944456-lk7l7\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.022368 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fecb571-89ee-4d10-a1e3-e3755946df2b-logs\") pod \"barbican-keystone-listener-7c6944456-lk7l7\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.022390 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fecb571-89ee-4d10-a1e3-e3755946df2b-combined-ca-bundle\") pod \"barbican-keystone-listener-7c6944456-lk7l7\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.022415 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rr2vd\" (UniqueName: \"kubernetes.io/projected/5505bed5-dba3-4067-b94c-acd00b7c37c7-kube-api-access-rr2vd\") pod \"barbican-worker-6448b46975-jx7gp\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " pod="openstack/barbican-worker-6448b46975-jx7gp" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.022436 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5505bed5-dba3-4067-b94c-acd00b7c37c7-logs\") pod \"barbican-worker-6448b46975-jx7gp\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " pod="openstack/barbican-worker-6448b46975-jx7gp" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.022454 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2dqw\" (UniqueName: \"kubernetes.io/projected/0fecb571-89ee-4d10-a1e3-e3755946df2b-kube-api-access-r2dqw\") pod \"barbican-keystone-listener-7c6944456-lk7l7\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.022479 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5505bed5-dba3-4067-b94c-acd00b7c37c7-combined-ca-bundle\") pod \"barbican-worker-6448b46975-jx7gp\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " pod="openstack/barbican-worker-6448b46975-jx7gp" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.022505 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fecb571-89ee-4d10-a1e3-e3755946df2b-config-data\") pod \"barbican-keystone-listener-7c6944456-lk7l7\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.023979 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.024431 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5505bed5-dba3-4067-b94c-acd00b7c37c7-logs\") pod \"barbican-worker-6448b46975-jx7gp\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " pod="openstack/barbican-worker-6448b46975-jx7gp" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.024714 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fecb571-89ee-4d10-a1e3-e3755946df2b-logs\") pod \"barbican-keystone-listener-7c6944456-lk7l7\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.031147 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5505bed5-dba3-4067-b94c-acd00b7c37c7-combined-ca-bundle\") pod \"barbican-worker-6448b46975-jx7gp\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " pod="openstack/barbican-worker-6448b46975-jx7gp" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.031549 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fecb571-89ee-4d10-a1e3-e3755946df2b-combined-ca-bundle\") pod \"barbican-keystone-listener-7c6944456-lk7l7\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.031679 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5505bed5-dba3-4067-b94c-acd00b7c37c7-config-data-custom\") pod \"barbican-worker-6448b46975-jx7gp\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " pod="openstack/barbican-worker-6448b46975-jx7gp" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.031897 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.032306 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5505bed5-dba3-4067-b94c-acd00b7c37c7-config-data\") pod \"barbican-worker-6448b46975-jx7gp\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " pod="openstack/barbican-worker-6448b46975-jx7gp" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.032324 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.032950 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fecb571-89ee-4d10-a1e3-e3755946df2b-config-data\") pod \"barbican-keystone-listener-7c6944456-lk7l7\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.033484 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0fecb571-89ee-4d10-a1e3-e3755946df2b-config-data-custom\") pod \"barbican-keystone-listener-7c6944456-lk7l7\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.040317 4933 scope.go:117] "RemoveContainer" containerID="396cdd5329e8a64271bc121815e8795ec8443f663a5b2fa0cebe713443617e70" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.041326 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.048284 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rr2vd\" (UniqueName: \"kubernetes.io/projected/5505bed5-dba3-4067-b94c-acd00b7c37c7-kube-api-access-rr2vd\") pod \"barbican-worker-6448b46975-jx7gp\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " pod="openstack/barbican-worker-6448b46975-jx7gp" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.054626 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2dqw\" (UniqueName: \"kubernetes.io/projected/0fecb571-89ee-4d10-a1e3-e3755946df2b-kube-api-access-r2dqw\") pod \"barbican-keystone-listener-7c6944456-lk7l7\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.071405 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6448b46975-jx7gp" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.074282 4933 scope.go:117] "RemoveContainer" containerID="b29d41c6c3906eb46f5e625db215c2c233737c530c75840950c73cff63017a43" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.084704 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.110311 4933 scope.go:117] "RemoveContainer" containerID="cb603c27c531e01097d003730c1f696a7553915e5e88064a96e1cef68a5b7863" Jan 22 06:06:00 crc kubenswrapper[4933]: E0122 06:06:00.110780 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb603c27c531e01097d003730c1f696a7553915e5e88064a96e1cef68a5b7863\": container with ID starting with cb603c27c531e01097d003730c1f696a7553915e5e88064a96e1cef68a5b7863 not found: ID does not exist" containerID="cb603c27c531e01097d003730c1f696a7553915e5e88064a96e1cef68a5b7863" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.110826 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb603c27c531e01097d003730c1f696a7553915e5e88064a96e1cef68a5b7863"} err="failed to get container status \"cb603c27c531e01097d003730c1f696a7553915e5e88064a96e1cef68a5b7863\": rpc error: code = NotFound desc = could not find container \"cb603c27c531e01097d003730c1f696a7553915e5e88064a96e1cef68a5b7863\": container with ID starting with cb603c27c531e01097d003730c1f696a7553915e5e88064a96e1cef68a5b7863 not found: ID does not exist" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.110860 4933 scope.go:117] "RemoveContainer" containerID="89bcc286fb4735a8fa8fb0151a0f607159a60ad7fa26a1b4794fb7ae2e866707" Jan 22 06:06:00 crc kubenswrapper[4933]: E0122 06:06:00.111227 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89bcc286fb4735a8fa8fb0151a0f607159a60ad7fa26a1b4794fb7ae2e866707\": container with ID starting with 89bcc286fb4735a8fa8fb0151a0f607159a60ad7fa26a1b4794fb7ae2e866707 not found: ID does not exist" containerID="89bcc286fb4735a8fa8fb0151a0f607159a60ad7fa26a1b4794fb7ae2e866707" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.111268 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89bcc286fb4735a8fa8fb0151a0f607159a60ad7fa26a1b4794fb7ae2e866707"} err="failed to get container status \"89bcc286fb4735a8fa8fb0151a0f607159a60ad7fa26a1b4794fb7ae2e866707\": rpc error: code = NotFound desc = could not find container \"89bcc286fb4735a8fa8fb0151a0f607159a60ad7fa26a1b4794fb7ae2e866707\": container with ID starting with 89bcc286fb4735a8fa8fb0151a0f607159a60ad7fa26a1b4794fb7ae2e866707 not found: ID does not exist" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.111297 4933 scope.go:117] "RemoveContainer" containerID="396cdd5329e8a64271bc121815e8795ec8443f663a5b2fa0cebe713443617e70" Jan 22 06:06:00 crc kubenswrapper[4933]: E0122 06:06:00.111662 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"396cdd5329e8a64271bc121815e8795ec8443f663a5b2fa0cebe713443617e70\": container with ID starting with 396cdd5329e8a64271bc121815e8795ec8443f663a5b2fa0cebe713443617e70 not found: ID does not exist" containerID="396cdd5329e8a64271bc121815e8795ec8443f663a5b2fa0cebe713443617e70" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.111689 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"396cdd5329e8a64271bc121815e8795ec8443f663a5b2fa0cebe713443617e70"} err="failed to get container status \"396cdd5329e8a64271bc121815e8795ec8443f663a5b2fa0cebe713443617e70\": rpc error: code = NotFound desc = could not find container \"396cdd5329e8a64271bc121815e8795ec8443f663a5b2fa0cebe713443617e70\": container with ID starting with 396cdd5329e8a64271bc121815e8795ec8443f663a5b2fa0cebe713443617e70 not found: ID does not exist" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.111707 4933 scope.go:117] "RemoveContainer" containerID="b29d41c6c3906eb46f5e625db215c2c233737c530c75840950c73cff63017a43" Jan 22 06:06:00 crc kubenswrapper[4933]: E0122 06:06:00.112007 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b29d41c6c3906eb46f5e625db215c2c233737c530c75840950c73cff63017a43\": container with ID starting with b29d41c6c3906eb46f5e625db215c2c233737c530c75840950c73cff63017a43 not found: ID does not exist" containerID="b29d41c6c3906eb46f5e625db215c2c233737c530c75840950c73cff63017a43" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.112031 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b29d41c6c3906eb46f5e625db215c2c233737c530c75840950c73cff63017a43"} err="failed to get container status \"b29d41c6c3906eb46f5e625db215c2c233737c530c75840950c73cff63017a43\": rpc error: code = NotFound desc = could not find container \"b29d41c6c3906eb46f5e625db215c2c233737c530c75840950c73cff63017a43\": container with ID starting with b29d41c6c3906eb46f5e625db215c2c233737c530c75840950c73cff63017a43 not found: ID does not exist" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.126119 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rq7cj\" (UniqueName: \"kubernetes.io/projected/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-kube-api-access-rq7cj\") pod \"barbican-api-5c9bd98c56-qnqgq\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.126165 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-config-data\") pod \"barbican-api-5c9bd98c56-qnqgq\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.126226 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-config-data-custom\") pod \"barbican-api-5c9bd98c56-qnqgq\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.127356 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-combined-ca-bundle\") pod \"barbican-api-5c9bd98c56-qnqgq\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.127470 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-logs\") pod \"barbican-api-5c9bd98c56-qnqgq\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.127991 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-logs\") pod \"barbican-api-5c9bd98c56-qnqgq\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.130994 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-config-data-custom\") pod \"barbican-api-5c9bd98c56-qnqgq\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.131704 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-combined-ca-bundle\") pod \"barbican-api-5c9bd98c56-qnqgq\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.131802 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-config-data\") pod \"barbican-api-5c9bd98c56-qnqgq\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.148559 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rq7cj\" (UniqueName: \"kubernetes.io/projected/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-kube-api-access-rq7cj\") pod \"barbican-api-5c9bd98c56-qnqgq\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.196535 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.229456 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-config-data\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.229543 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-scripts\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.229577 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.229596 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-run-httpd\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.229625 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rk2v\" (UniqueName: \"kubernetes.io/projected/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-kube-api-access-7rk2v\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.229667 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.229703 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-log-httpd\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.247282 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.261751 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-92ghq" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.331253 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-config-data\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.331325 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-scripts\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.331349 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-run-httpd\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.331366 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.331386 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rk2v\" (UniqueName: \"kubernetes.io/projected/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-kube-api-access-7rk2v\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.331419 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.331440 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-log-httpd\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.331859 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-log-httpd\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.337460 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-run-httpd\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.340515 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.341148 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-scripts\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.342406 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-config-data\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.347508 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.350435 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rk2v\" (UniqueName: \"kubernetes.io/projected/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-kube-api-access-7rk2v\") pod \"ceilometer-0\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.359903 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.432744 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-config-data\") pod \"1086bd39-4637-4123-a7b2-d85d3a603dd5\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.433105 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b88r9\" (UniqueName: \"kubernetes.io/projected/1086bd39-4637-4123-a7b2-d85d3a603dd5-kube-api-access-b88r9\") pod \"1086bd39-4637-4123-a7b2-d85d3a603dd5\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.433196 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1086bd39-4637-4123-a7b2-d85d3a603dd5-etc-machine-id\") pod \"1086bd39-4637-4123-a7b2-d85d3a603dd5\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.433317 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-scripts\") pod \"1086bd39-4637-4123-a7b2-d85d3a603dd5\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.433381 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-combined-ca-bundle\") pod \"1086bd39-4637-4123-a7b2-d85d3a603dd5\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.433402 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-db-sync-config-data\") pod \"1086bd39-4637-4123-a7b2-d85d3a603dd5\" (UID: \"1086bd39-4637-4123-a7b2-d85d3a603dd5\") " Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.434363 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1086bd39-4637-4123-a7b2-d85d3a603dd5-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "1086bd39-4637-4123-a7b2-d85d3a603dd5" (UID: "1086bd39-4637-4123-a7b2-d85d3a603dd5"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.438969 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1086bd39-4637-4123-a7b2-d85d3a603dd5-kube-api-access-b88r9" (OuterVolumeSpecName: "kube-api-access-b88r9") pod "1086bd39-4637-4123-a7b2-d85d3a603dd5" (UID: "1086bd39-4637-4123-a7b2-d85d3a603dd5"). InnerVolumeSpecName "kube-api-access-b88r9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.443340 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "1086bd39-4637-4123-a7b2-d85d3a603dd5" (UID: "1086bd39-4637-4123-a7b2-d85d3a603dd5"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.443426 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-scripts" (OuterVolumeSpecName: "scripts") pod "1086bd39-4637-4123-a7b2-d85d3a603dd5" (UID: "1086bd39-4637-4123-a7b2-d85d3a603dd5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.502765 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1086bd39-4637-4123-a7b2-d85d3a603dd5" (UID: "1086bd39-4637-4123-a7b2-d85d3a603dd5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.502872 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-config-data" (OuterVolumeSpecName: "config-data") pod "1086bd39-4637-4123-a7b2-d85d3a603dd5" (UID: "1086bd39-4637-4123-a7b2-d85d3a603dd5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.524047 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7d72c18-1516-44a5-ad92-f367c93280b1" path="/var/lib/kubelet/pods/a7d72c18-1516-44a5-ad92-f367c93280b1/volumes" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.536417 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.536454 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.536466 4933 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.536475 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1086bd39-4637-4123-a7b2-d85d3a603dd5-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.536488 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b88r9\" (UniqueName: \"kubernetes.io/projected/1086bd39-4637-4123-a7b2-d85d3a603dd5-kube-api-access-b88r9\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.536500 4933 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1086bd39-4637-4123-a7b2-d85d3a603dd5-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.586552 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6448b46975-jx7gp"] Jan 22 06:06:00 crc kubenswrapper[4933]: W0122 06:06:00.710346 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0fecb571_89ee_4d10_a1e3_e3755946df2b.slice/crio-9ab2c674b5e475c40a5e7792bf4e54e39a93136dd4fe22c3d2e0528252cbef03 WatchSource:0}: Error finding container 9ab2c674b5e475c40a5e7792bf4e54e39a93136dd4fe22c3d2e0528252cbef03: Status 404 returned error can't find the container with id 9ab2c674b5e475c40a5e7792bf4e54e39a93136dd4fe22c3d2e0528252cbef03 Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.733365 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7c6944456-lk7l7"] Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.852101 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5c9bd98c56-qnqgq"] Jan 22 06:06:00 crc kubenswrapper[4933]: W0122 06:06:00.853617 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2d683dcb_f19b_4f7c_a6e5_69ec3a93208b.slice/crio-4f51c665c42e2aa8e4d95dfbc88fb6a4fda0d3957307a464cba02c0d11625657 WatchSource:0}: Error finding container 4f51c665c42e2aa8e4d95dfbc88fb6a4fda0d3957307a464cba02c0d11625657: Status 404 returned error can't find the container with id 4f51c665c42e2aa8e4d95dfbc88fb6a4fda0d3957307a464cba02c0d11625657 Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.953252 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5c9bd98c56-qnqgq" event={"ID":"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b","Type":"ContainerStarted","Data":"4f51c665c42e2aa8e4d95dfbc88fb6a4fda0d3957307a464cba02c0d11625657"} Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.954231 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6448b46975-jx7gp" event={"ID":"5505bed5-dba3-4067-b94c-acd00b7c37c7","Type":"ContainerStarted","Data":"ee2bb284bfa725b795ef8face4a2108b16ca4a0f464eff6c1baa136a33a15c0a"} Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.955747 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.956091 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" event={"ID":"0fecb571-89ee-4d10-a1e3-e3755946df2b","Type":"ContainerStarted","Data":"9ab2c674b5e475c40a5e7792bf4e54e39a93136dd4fe22c3d2e0528252cbef03"} Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.957881 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-92ghq" event={"ID":"1086bd39-4637-4123-a7b2-d85d3a603dd5","Type":"ContainerDied","Data":"d142b23612449fcd3d950de4f910d8fe2e0b01d2d660279f5adfea23cdafcc7c"} Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.957920 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d142b23612449fcd3d950de4f910d8fe2e0b01d2d660279f5adfea23cdafcc7c" Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.957980 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-92ghq" Jan 22 06:06:00 crc kubenswrapper[4933]: W0122 06:06:00.958797 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff3d18a9_ac4c_41ab_bd2e_3c361b0f428a.slice/crio-d5804d8c32ca60e82b2c78ccf1929b1e73eb175e5c95007d7cb2d9ade0334748 WatchSource:0}: Error finding container d5804d8c32ca60e82b2c78ccf1929b1e73eb175e5c95007d7cb2d9ade0334748: Status 404 returned error can't find the container with id d5804d8c32ca60e82b2c78ccf1929b1e73eb175e5c95007d7cb2d9ade0334748 Jan 22 06:06:00 crc kubenswrapper[4933]: W0122 06:06:00.983708 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda656ee23_9957_4f1a_9fa8_9737afdd32b1.slice/crio-7bf43300bf18e984e7220e98ac3ad5a7c474eced3f7e934497d6f6bc2677fd5e WatchSource:0}: Error finding container 7bf43300bf18e984e7220e98ac3ad5a7c474eced3f7e934497d6f6bc2677fd5e: Status 404 returned error can't find the container with id 7bf43300bf18e984e7220e98ac3ad5a7c474eced3f7e934497d6f6bc2677fd5e Jan 22 06:06:00 crc kubenswrapper[4933]: I0122 06:06:00.984157 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bdf86f46f-5c2n7"] Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.187582 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 06:06:01 crc kubenswrapper[4933]: E0122 06:06:01.188625 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1086bd39-4637-4123-a7b2-d85d3a603dd5" containerName="cinder-db-sync" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.188696 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="1086bd39-4637-4123-a7b2-d85d3a603dd5" containerName="cinder-db-sync" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.189141 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="1086bd39-4637-4123-a7b2-d85d3a603dd5" containerName="cinder-db-sync" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.191071 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.193355 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.193623 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-m2xwl" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.196401 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.196428 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.204753 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.254476 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9654ff73-fb3b-4003-b49a-45c98879a7a1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.254534 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.254565 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-config-data\") pod \"cinder-scheduler-0\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.254592 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v52b9\" (UniqueName: \"kubernetes.io/projected/9654ff73-fb3b-4003-b49a-45c98879a7a1-kube-api-access-v52b9\") pod \"cinder-scheduler-0\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.254628 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.254680 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-scripts\") pod \"cinder-scheduler-0\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.305396 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bdf86f46f-5c2n7"] Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.331230 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75bfc9b94f-blh75"] Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.332536 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.357410 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9654ff73-fb3b-4003-b49a-45c98879a7a1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.357484 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-config\") pod \"dnsmasq-dns-75bfc9b94f-blh75\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.357528 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.357566 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-config-data\") pod \"cinder-scheduler-0\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.357592 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-dns-svc\") pod \"dnsmasq-dns-75bfc9b94f-blh75\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.357623 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v52b9\" (UniqueName: \"kubernetes.io/projected/9654ff73-fb3b-4003-b49a-45c98879a7a1-kube-api-access-v52b9\") pod \"cinder-scheduler-0\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.357679 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.357718 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-ovsdbserver-sb\") pod \"dnsmasq-dns-75bfc9b94f-blh75\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.357757 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-scripts\") pod \"cinder-scheduler-0\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.357783 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-dns-swift-storage-0\") pod \"dnsmasq-dns-75bfc9b94f-blh75\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.357849 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xm5z9\" (UniqueName: \"kubernetes.io/projected/86d22942-518c-4716-86a2-b0781a2d92ca-kube-api-access-xm5z9\") pod \"dnsmasq-dns-75bfc9b94f-blh75\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.357900 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-ovsdbserver-nb\") pod \"dnsmasq-dns-75bfc9b94f-blh75\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.358005 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9654ff73-fb3b-4003-b49a-45c98879a7a1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.366216 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-config-data\") pod \"cinder-scheduler-0\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.368050 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75bfc9b94f-blh75"] Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.368487 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-scripts\") pod \"cinder-scheduler-0\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.371282 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.382083 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.386694 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v52b9\" (UniqueName: \"kubernetes.io/projected/9654ff73-fb3b-4003-b49a-45c98879a7a1-kube-api-access-v52b9\") pod \"cinder-scheduler-0\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.459662 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xm5z9\" (UniqueName: \"kubernetes.io/projected/86d22942-518c-4716-86a2-b0781a2d92ca-kube-api-access-xm5z9\") pod \"dnsmasq-dns-75bfc9b94f-blh75\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.459720 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-ovsdbserver-nb\") pod \"dnsmasq-dns-75bfc9b94f-blh75\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.459745 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-config\") pod \"dnsmasq-dns-75bfc9b94f-blh75\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.459793 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-dns-svc\") pod \"dnsmasq-dns-75bfc9b94f-blh75\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.459859 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-ovsdbserver-sb\") pod \"dnsmasq-dns-75bfc9b94f-blh75\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.459890 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-dns-swift-storage-0\") pod \"dnsmasq-dns-75bfc9b94f-blh75\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.460652 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-config\") pod \"dnsmasq-dns-75bfc9b94f-blh75\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.460876 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-ovsdbserver-nb\") pod \"dnsmasq-dns-75bfc9b94f-blh75\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.461769 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-dns-svc\") pod \"dnsmasq-dns-75bfc9b94f-blh75\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.461770 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-dns-swift-storage-0\") pod \"dnsmasq-dns-75bfc9b94f-blh75\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.462343 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-ovsdbserver-sb\") pod \"dnsmasq-dns-75bfc9b94f-blh75\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.475977 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.477475 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.478215 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xm5z9\" (UniqueName: \"kubernetes.io/projected/86d22942-518c-4716-86a2-b0781a2d92ca-kube-api-access-xm5z9\") pod \"dnsmasq-dns-75bfc9b94f-blh75\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.485566 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.486314 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.519239 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.561412 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.561618 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-config-data\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.561775 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-logs\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.561896 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhrb4\" (UniqueName: \"kubernetes.io/projected/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-kube-api-access-jhrb4\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.562027 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-config-data-custom\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.562194 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-etc-machine-id\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.562280 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-scripts\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.650553 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.663246 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.663298 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-config-data\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.663384 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-logs\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.663453 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhrb4\" (UniqueName: \"kubernetes.io/projected/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-kube-api-access-jhrb4\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.665281 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-logs\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.665772 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-config-data-custom\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.665814 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-etc-machine-id\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.665850 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-scripts\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.666264 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-etc-machine-id\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.672474 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-scripts\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.673603 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-config-data\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.677552 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-config-data-custom\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.677798 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.687535 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhrb4\" (UniqueName: \"kubernetes.io/projected/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-kube-api-access-jhrb4\") pod \"cinder-api-0\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.889213 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.975430 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5c9bd98c56-qnqgq" event={"ID":"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b","Type":"ContainerStarted","Data":"d01bdab4b1711810909a920cee5293034b71009acdfd620b98e33f77e1c8919b"} Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.977788 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a","Type":"ContainerStarted","Data":"d5804d8c32ca60e82b2c78ccf1929b1e73eb175e5c95007d7cb2d9ade0334748"} Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.983955 4933 generic.go:334] "Generic (PLEG): container finished" podID="a656ee23-9957-4f1a-9fa8-9737afdd32b1" containerID="6329bba1d64015fce9ecb87bd256103b1af87ead4dd3bda818e81dd3ed2cdf84" exitCode=0 Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.984001 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" event={"ID":"a656ee23-9957-4f1a-9fa8-9737afdd32b1","Type":"ContainerDied","Data":"6329bba1d64015fce9ecb87bd256103b1af87ead4dd3bda818e81dd3ed2cdf84"} Jan 22 06:06:01 crc kubenswrapper[4933]: I0122 06:06:01.984026 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" event={"ID":"a656ee23-9957-4f1a-9fa8-9737afdd32b1","Type":"ContainerStarted","Data":"7bf43300bf18e984e7220e98ac3ad5a7c474eced3f7e934497d6f6bc2677fd5e"} Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.008154 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 06:06:02 crc kubenswrapper[4933]: W0122 06:06:02.041723 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9654ff73_fb3b_4003_b49a_45c98879a7a1.slice/crio-d47828d66720508abd004a09bd2d2c9a810a576c88e7b432b0cd456c782be480 WatchSource:0}: Error finding container d47828d66720508abd004a09bd2d2c9a810a576c88e7b432b0cd456c782be480: Status 404 returned error can't find the container with id d47828d66720508abd004a09bd2d2c9a810a576c88e7b432b0cd456c782be480 Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.162978 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75bfc9b94f-blh75"] Jan 22 06:06:02 crc kubenswrapper[4933]: W0122 06:06:02.164556 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod86d22942_518c_4716_86a2_b0781a2d92ca.slice/crio-9a00094d926c974b70a8ff269ce55628626015f0326b1e66813b44983ea70e85 WatchSource:0}: Error finding container 9a00094d926c974b70a8ff269ce55628626015f0326b1e66813b44983ea70e85: Status 404 returned error can't find the container with id 9a00094d926c974b70a8ff269ce55628626015f0326b1e66813b44983ea70e85 Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.411740 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.454643 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.481231 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-dns-swift-storage-0\") pod \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.481334 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-dns-svc\") pod \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.481837 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-config\") pod \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.481855 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-ovsdbserver-sb\") pod \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.481897 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cq2km\" (UniqueName: \"kubernetes.io/projected/a656ee23-9957-4f1a-9fa8-9737afdd32b1-kube-api-access-cq2km\") pod \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.482165 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-ovsdbserver-nb\") pod \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\" (UID: \"a656ee23-9957-4f1a-9fa8-9737afdd32b1\") " Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.487674 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a656ee23-9957-4f1a-9fa8-9737afdd32b1-kube-api-access-cq2km" (OuterVolumeSpecName: "kube-api-access-cq2km") pod "a656ee23-9957-4f1a-9fa8-9737afdd32b1" (UID: "a656ee23-9957-4f1a-9fa8-9737afdd32b1"). InnerVolumeSpecName "kube-api-access-cq2km". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.543090 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a656ee23-9957-4f1a-9fa8-9737afdd32b1" (UID: "a656ee23-9957-4f1a-9fa8-9737afdd32b1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.555346 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a656ee23-9957-4f1a-9fa8-9737afdd32b1" (UID: "a656ee23-9957-4f1a-9fa8-9737afdd32b1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.566737 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-config" (OuterVolumeSpecName: "config") pod "a656ee23-9957-4f1a-9fa8-9737afdd32b1" (UID: "a656ee23-9957-4f1a-9fa8-9737afdd32b1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.570312 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a656ee23-9957-4f1a-9fa8-9737afdd32b1" (UID: "a656ee23-9957-4f1a-9fa8-9737afdd32b1"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.577311 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a656ee23-9957-4f1a-9fa8-9737afdd32b1" (UID: "a656ee23-9957-4f1a-9fa8-9737afdd32b1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.583697 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.583739 4933 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.583820 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.583838 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.583850 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a656ee23-9957-4f1a-9fa8-9737afdd32b1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.583861 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cq2km\" (UniqueName: \"kubernetes.io/projected/a656ee23-9957-4f1a-9fa8-9737afdd32b1-kube-api-access-cq2km\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:02 crc kubenswrapper[4933]: W0122 06:06:02.806736 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9349cd0_4ca8_4508_9a4c_a0951af5a2a8.slice/crio-7cfd757e04ae5da06d3d6d516c2b98a95d532b009bd23f4195ce398860c245de WatchSource:0}: Error finding container 7cfd757e04ae5da06d3d6d516c2b98a95d532b009bd23f4195ce398860c245de: Status 404 returned error can't find the container with id 7cfd757e04ae5da06d3d6d516c2b98a95d532b009bd23f4195ce398860c245de Jan 22 06:06:02 crc kubenswrapper[4933]: I0122 06:06:02.998410 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a","Type":"ContainerStarted","Data":"fcfa447ba938851d83bd12a97b48467f27657a61dfbaff803278e1ce781d42d5"} Jan 22 06:06:03 crc kubenswrapper[4933]: I0122 06:06:02.999442 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9654ff73-fb3b-4003-b49a-45c98879a7a1","Type":"ContainerStarted","Data":"d47828d66720508abd004a09bd2d2c9a810a576c88e7b432b0cd456c782be480"} Jan 22 06:06:03 crc kubenswrapper[4933]: I0122 06:06:03.002444 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" event={"ID":"a656ee23-9957-4f1a-9fa8-9737afdd32b1","Type":"ContainerDied","Data":"7bf43300bf18e984e7220e98ac3ad5a7c474eced3f7e934497d6f6bc2677fd5e"} Jan 22 06:06:03 crc kubenswrapper[4933]: I0122 06:06:03.002496 4933 scope.go:117] "RemoveContainer" containerID="6329bba1d64015fce9ecb87bd256103b1af87ead4dd3bda818e81dd3ed2cdf84" Jan 22 06:06:03 crc kubenswrapper[4933]: I0122 06:06:03.002799 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdf86f46f-5c2n7" Jan 22 06:06:03 crc kubenswrapper[4933]: I0122 06:06:03.007741 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8","Type":"ContainerStarted","Data":"7cfd757e04ae5da06d3d6d516c2b98a95d532b009bd23f4195ce398860c245de"} Jan 22 06:06:03 crc kubenswrapper[4933]: I0122 06:06:03.011356 4933 generic.go:334] "Generic (PLEG): container finished" podID="86d22942-518c-4716-86a2-b0781a2d92ca" containerID="1f225a85114ce01f83ebbce384017c80893d68c6189a9dd8b7f41d67a05d045d" exitCode=0 Jan 22 06:06:03 crc kubenswrapper[4933]: I0122 06:06:03.011401 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" event={"ID":"86d22942-518c-4716-86a2-b0781a2d92ca","Type":"ContainerDied","Data":"1f225a85114ce01f83ebbce384017c80893d68c6189a9dd8b7f41d67a05d045d"} Jan 22 06:06:03 crc kubenswrapper[4933]: I0122 06:06:03.013200 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" event={"ID":"86d22942-518c-4716-86a2-b0781a2d92ca","Type":"ContainerStarted","Data":"9a00094d926c974b70a8ff269ce55628626015f0326b1e66813b44983ea70e85"} Jan 22 06:06:03 crc kubenswrapper[4933]: I0122 06:06:03.015196 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5c9bd98c56-qnqgq" event={"ID":"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b","Type":"ContainerStarted","Data":"b688c9df9ddf18e461a754a176dde6318fee2f52bdd4e4de166be8f749491199"} Jan 22 06:06:03 crc kubenswrapper[4933]: I0122 06:06:03.015366 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:03 crc kubenswrapper[4933]: I0122 06:06:03.015400 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:03 crc kubenswrapper[4933]: I0122 06:06:03.056474 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5c9bd98c56-qnqgq" podStartSLOduration=4.056457118 podStartE2EDuration="4.056457118s" podCreationTimestamp="2026-01-22 06:05:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:06:03.041147705 +0000 UTC m=+1210.878273058" watchObservedRunningTime="2026-01-22 06:06:03.056457118 +0000 UTC m=+1210.893582471" Jan 22 06:06:03 crc kubenswrapper[4933]: I0122 06:06:03.084575 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bdf86f46f-5c2n7"] Jan 22 06:06:03 crc kubenswrapper[4933]: I0122 06:06:03.091766 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7bdf86f46f-5c2n7"] Jan 22 06:06:04 crc kubenswrapper[4933]: I0122 06:06:04.025591 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a","Type":"ContainerStarted","Data":"18274df194c4d9c5bf0d3aee6ee63f64398c344ac19e30ce8f1aeb774060aae9"} Jan 22 06:06:04 crc kubenswrapper[4933]: I0122 06:06:04.027298 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6448b46975-jx7gp" event={"ID":"5505bed5-dba3-4067-b94c-acd00b7c37c7","Type":"ContainerStarted","Data":"45526307e9624de914d45bbd929b47ffd667a4044ab9422715456c34fe59622d"} Jan 22 06:06:04 crc kubenswrapper[4933]: I0122 06:06:04.027343 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6448b46975-jx7gp" event={"ID":"5505bed5-dba3-4067-b94c-acd00b7c37c7","Type":"ContainerStarted","Data":"ae64a47d0a256e71036f25f3770e6938d214f52aa76de1db83e4a3d607be7dbc"} Jan 22 06:06:04 crc kubenswrapper[4933]: I0122 06:06:04.039249 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" event={"ID":"0fecb571-89ee-4d10-a1e3-e3755946df2b","Type":"ContainerStarted","Data":"19349f4fb699d96d982ef68a33cea6a25a5b8d3f3671b4a1b34a92f90876b922"} Jan 22 06:06:04 crc kubenswrapper[4933]: I0122 06:06:04.039293 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" event={"ID":"0fecb571-89ee-4d10-a1e3-e3755946df2b","Type":"ContainerStarted","Data":"11cdb1302043612d4966e8227c66cd55138cd37ef40b24e2659a9776bb49e386"} Jan 22 06:06:04 crc kubenswrapper[4933]: I0122 06:06:04.043956 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" event={"ID":"86d22942-518c-4716-86a2-b0781a2d92ca","Type":"ContainerStarted","Data":"c9ac846d183cf7936d4607fb4fd576371288fbaf8691ecb2feed707aa3fe2558"} Jan 22 06:06:04 crc kubenswrapper[4933]: I0122 06:06:04.043988 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:04 crc kubenswrapper[4933]: I0122 06:06:04.073190 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-6448b46975-jx7gp" podStartSLOduration=2.242524424 podStartE2EDuration="5.073176466s" podCreationTimestamp="2026-01-22 06:05:59 +0000 UTC" firstStartedPulling="2026-01-22 06:06:00.584948166 +0000 UTC m=+1208.422073519" lastFinishedPulling="2026-01-22 06:06:03.415600208 +0000 UTC m=+1211.252725561" observedRunningTime="2026-01-22 06:06:04.052386017 +0000 UTC m=+1211.889511360" watchObservedRunningTime="2026-01-22 06:06:04.073176466 +0000 UTC m=+1211.910301819" Jan 22 06:06:04 crc kubenswrapper[4933]: I0122 06:06:04.080018 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" podStartSLOduration=2.548898913 podStartE2EDuration="5.079999557s" podCreationTimestamp="2026-01-22 06:05:59 +0000 UTC" firstStartedPulling="2026-01-22 06:06:00.716744815 +0000 UTC m=+1208.553870168" lastFinishedPulling="2026-01-22 06:06:03.247845459 +0000 UTC m=+1211.084970812" observedRunningTime="2026-01-22 06:06:04.07048791 +0000 UTC m=+1211.907613263" watchObservedRunningTime="2026-01-22 06:06:04.079999557 +0000 UTC m=+1211.917124910" Jan 22 06:06:04 crc kubenswrapper[4933]: I0122 06:06:04.094692 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" podStartSLOduration=3.094673355 podStartE2EDuration="3.094673355s" podCreationTimestamp="2026-01-22 06:06:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:06:04.092580062 +0000 UTC m=+1211.929705415" watchObservedRunningTime="2026-01-22 06:06:04.094673355 +0000 UTC m=+1211.931798708" Jan 22 06:06:04 crc kubenswrapper[4933]: I0122 06:06:04.461523 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 22 06:06:04 crc kubenswrapper[4933]: I0122 06:06:04.510819 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a656ee23-9957-4f1a-9fa8-9737afdd32b1" path="/var/lib/kubelet/pods/a656ee23-9957-4f1a-9fa8-9737afdd32b1/volumes" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.081822 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9654ff73-fb3b-4003-b49a-45c98879a7a1","Type":"ContainerStarted","Data":"4688290a43c3cf36678f1573f589a3512f5f42ee365373b5e64aa3b27eed1ef4"} Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.084923 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8","Type":"ContainerStarted","Data":"8622e2f03164dd211ab27739e2adc696156772b3afda95d37b177426e80fc5b6"} Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.253108 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.568696 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-856bccf57c-l2f82"] Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.568936 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-856bccf57c-l2f82" podUID="6541a01b-555e-4734-8eb8-bc63625dd293" containerName="neutron-api" containerID="cri-o://f4e82b1bcf25275851a5aef8022cd4bdd910f6f9ab17a41f7a84149b88a2abdc" gracePeriod=30 Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.569336 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-856bccf57c-l2f82" podUID="6541a01b-555e-4734-8eb8-bc63625dd293" containerName="neutron-httpd" containerID="cri-o://57d4c641b72ffcca676596e412bcc902119cbf32a7b4fb39d8930b32680d0018" gracePeriod=30 Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.654557 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5f996bcdbf-kwx6s"] Jan 22 06:06:05 crc kubenswrapper[4933]: E0122 06:06:05.654991 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a656ee23-9957-4f1a-9fa8-9737afdd32b1" containerName="init" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.655016 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a656ee23-9957-4f1a-9fa8-9737afdd32b1" containerName="init" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.655289 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a656ee23-9957-4f1a-9fa8-9737afdd32b1" containerName="init" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.656403 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.676882 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5f996bcdbf-kwx6s"] Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.747811 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-combined-ca-bundle\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.747898 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-ovndb-tls-certs\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.747938 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-public-tls-certs\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.747971 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-config\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.748023 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6nrz\" (UniqueName: \"kubernetes.io/projected/fe53ac25-75b3-42c3-802f-5359023b26e7-kube-api-access-k6nrz\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.748098 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-httpd-config\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.748157 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-internal-tls-certs\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.849796 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-httpd-config\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.849863 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-internal-tls-certs\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.849906 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-combined-ca-bundle\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.849945 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-ovndb-tls-certs\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.849970 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-public-tls-certs\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.849993 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-config\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.850028 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6nrz\" (UniqueName: \"kubernetes.io/projected/fe53ac25-75b3-42c3-802f-5359023b26e7-kube-api-access-k6nrz\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.857137 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-internal-tls-certs\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.857421 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-combined-ca-bundle\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.858022 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-httpd-config\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.858881 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-public-tls-certs\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.860221 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-ovndb-tls-certs\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.863388 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-config\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.874180 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6nrz\" (UniqueName: \"kubernetes.io/projected/fe53ac25-75b3-42c3-802f-5359023b26e7-kube-api-access-k6nrz\") pod \"neutron-5f996bcdbf-kwx6s\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.943339 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-856bccf57c-l2f82" podUID="6541a01b-555e-4734-8eb8-bc63625dd293" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.154:9696/\": read tcp 10.217.0.2:42808->10.217.0.154:9696: read: connection reset by peer" Jan 22 06:06:05 crc kubenswrapper[4933]: I0122 06:06:05.972573 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.094768 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a","Type":"ContainerStarted","Data":"940b655bb0c7de4fcc417570ee7c8fa73e8ae31c8c84741e9fc12144df9a7083"} Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.542662 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-575b89575b-kkrzb"] Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.544229 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.546516 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.546717 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.562826 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-575b89575b-kkrzb"] Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.664987 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-internal-tls-certs\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.665060 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-config-data\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.665107 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91864da0-319b-46e9-b4ef-8ccee4c52d37-logs\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.665132 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29k5l\" (UniqueName: \"kubernetes.io/projected/91864da0-319b-46e9-b4ef-8ccee4c52d37-kube-api-access-29k5l\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.665158 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-config-data-custom\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.665178 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-public-tls-certs\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.665212 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-combined-ca-bundle\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: W0122 06:06:06.732249 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfe53ac25_75b3_42c3_802f_5359023b26e7.slice/crio-92c106dd1cfea34377fcb0355c29ec458885ff5df61981d69adf101c3328fad3 WatchSource:0}: Error finding container 92c106dd1cfea34377fcb0355c29ec458885ff5df61981d69adf101c3328fad3: Status 404 returned error can't find the container with id 92c106dd1cfea34377fcb0355c29ec458885ff5df61981d69adf101c3328fad3 Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.741639 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5f996bcdbf-kwx6s"] Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.767927 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91864da0-319b-46e9-b4ef-8ccee4c52d37-logs\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.768005 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29k5l\" (UniqueName: \"kubernetes.io/projected/91864da0-319b-46e9-b4ef-8ccee4c52d37-kube-api-access-29k5l\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.768040 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-config-data-custom\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.768062 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-public-tls-certs\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.768115 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-combined-ca-bundle\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.768197 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-internal-tls-certs\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.768249 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-config-data\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.780484 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91864da0-319b-46e9-b4ef-8ccee4c52d37-logs\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.784564 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-public-tls-certs\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.785109 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-internal-tls-certs\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.785185 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-config-data\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.785741 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-combined-ca-bundle\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.785906 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-config-data-custom\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.786181 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29k5l\" (UniqueName: \"kubernetes.io/projected/91864da0-319b-46e9-b4ef-8ccee4c52d37-kube-api-access-29k5l\") pod \"barbican-api-575b89575b-kkrzb\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:06 crc kubenswrapper[4933]: I0122 06:06:06.873643 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.108611 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8","Type":"ContainerStarted","Data":"368668889d693f9de2ccc2afce5cfe8616e8f1d8bf44d05875fb069940c4ec6d"} Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.109199 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="b9349cd0-4ca8-4508-9a4c-a0951af5a2a8" containerName="cinder-api-log" containerID="cri-o://8622e2f03164dd211ab27739e2adc696156772b3afda95d37b177426e80fc5b6" gracePeriod=30 Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.109436 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.109562 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="b9349cd0-4ca8-4508-9a4c-a0951af5a2a8" containerName="cinder-api" containerID="cri-o://368668889d693f9de2ccc2afce5cfe8616e8f1d8bf44d05875fb069940c4ec6d" gracePeriod=30 Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.114787 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f996bcdbf-kwx6s" event={"ID":"fe53ac25-75b3-42c3-802f-5359023b26e7","Type":"ContainerStarted","Data":"2ae19b8623001ea92970743871f64a042fee6abb1332e83f11c894e81eff91b0"} Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.114819 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f996bcdbf-kwx6s" event={"ID":"fe53ac25-75b3-42c3-802f-5359023b26e7","Type":"ContainerStarted","Data":"92c106dd1cfea34377fcb0355c29ec458885ff5df61981d69adf101c3328fad3"} Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.128123 4933 generic.go:334] "Generic (PLEG): container finished" podID="6541a01b-555e-4734-8eb8-bc63625dd293" containerID="57d4c641b72ffcca676596e412bcc902119cbf32a7b4fb39d8930b32680d0018" exitCode=0 Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.128165 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-856bccf57c-l2f82" event={"ID":"6541a01b-555e-4734-8eb8-bc63625dd293","Type":"ContainerDied","Data":"57d4c641b72ffcca676596e412bcc902119cbf32a7b4fb39d8930b32680d0018"} Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.140123 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9654ff73-fb3b-4003-b49a-45c98879a7a1","Type":"ContainerStarted","Data":"8f755668d9241345f086ca8331cf5575ffa315a5b0d5ff45f93ab91543269ac8"} Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.162654 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=6.162633465 podStartE2EDuration="6.162633465s" podCreationTimestamp="2026-01-22 06:06:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:06:07.139370573 +0000 UTC m=+1214.976495926" watchObservedRunningTime="2026-01-22 06:06:07.162633465 +0000 UTC m=+1214.999758818" Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.341847 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.989977184 podStartE2EDuration="6.341828951s" podCreationTimestamp="2026-01-22 06:06:01 +0000 UTC" firstStartedPulling="2026-01-22 06:06:02.087502205 +0000 UTC m=+1209.924627558" lastFinishedPulling="2026-01-22 06:06:03.439353972 +0000 UTC m=+1211.276479325" observedRunningTime="2026-01-22 06:06:07.176105293 +0000 UTC m=+1215.013230666" watchObservedRunningTime="2026-01-22 06:06:07.341828951 +0000 UTC m=+1215.178954304" Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.341997 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-575b89575b-kkrzb"] Jan 22 06:06:07 crc kubenswrapper[4933]: W0122 06:06:07.357634 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod91864da0_319b_46e9_b4ef_8ccee4c52d37.slice/crio-b0eceadb1d255469a928409a36ac9832735a249064e555e1cf2847e6302b19bc WatchSource:0}: Error finding container b0eceadb1d255469a928409a36ac9832735a249064e555e1cf2847e6302b19bc: Status 404 returned error can't find the container with id b0eceadb1d255469a928409a36ac9832735a249064e555e1cf2847e6302b19bc Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.646466 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.793435 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-combined-ca-bundle\") pod \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.793496 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-config-data\") pod \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.793562 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-etc-machine-id\") pod \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.793583 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhrb4\" (UniqueName: \"kubernetes.io/projected/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-kube-api-access-jhrb4\") pod \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.793609 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-scripts\") pod \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.793629 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-config-data-custom\") pod \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.793654 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-logs\") pod \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\" (UID: \"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8\") " Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.794546 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-logs" (OuterVolumeSpecName: "logs") pod "b9349cd0-4ca8-4508-9a4c-a0951af5a2a8" (UID: "b9349cd0-4ca8-4508-9a4c-a0951af5a2a8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.795179 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "b9349cd0-4ca8-4508-9a4c-a0951af5a2a8" (UID: "b9349cd0-4ca8-4508-9a4c-a0951af5a2a8"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.810957 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b9349cd0-4ca8-4508-9a4c-a0951af5a2a8" (UID: "b9349cd0-4ca8-4508-9a4c-a0951af5a2a8"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.810999 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-scripts" (OuterVolumeSpecName: "scripts") pod "b9349cd0-4ca8-4508-9a4c-a0951af5a2a8" (UID: "b9349cd0-4ca8-4508-9a4c-a0951af5a2a8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.812892 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-kube-api-access-jhrb4" (OuterVolumeSpecName: "kube-api-access-jhrb4") pod "b9349cd0-4ca8-4508-9a4c-a0951af5a2a8" (UID: "b9349cd0-4ca8-4508-9a4c-a0951af5a2a8"). InnerVolumeSpecName "kube-api-access-jhrb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.841889 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b9349cd0-4ca8-4508-9a4c-a0951af5a2a8" (UID: "b9349cd0-4ca8-4508-9a4c-a0951af5a2a8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.877637 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-config-data" (OuterVolumeSpecName: "config-data") pod "b9349cd0-4ca8-4508-9a4c-a0951af5a2a8" (UID: "b9349cd0-4ca8-4508-9a4c-a0951af5a2a8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.896809 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.897581 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.897591 4933 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.897600 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhrb4\" (UniqueName: \"kubernetes.io/projected/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-kube-api-access-jhrb4\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.897612 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.897620 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:07 crc kubenswrapper[4933]: I0122 06:06:07.897628 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.157826 4933 generic.go:334] "Generic (PLEG): container finished" podID="b9349cd0-4ca8-4508-9a4c-a0951af5a2a8" containerID="368668889d693f9de2ccc2afce5cfe8616e8f1d8bf44d05875fb069940c4ec6d" exitCode=0 Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.157859 4933 generic.go:334] "Generic (PLEG): container finished" podID="b9349cd0-4ca8-4508-9a4c-a0951af5a2a8" containerID="8622e2f03164dd211ab27739e2adc696156772b3afda95d37b177426e80fc5b6" exitCode=143 Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.157903 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.157913 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8","Type":"ContainerDied","Data":"368668889d693f9de2ccc2afce5cfe8616e8f1d8bf44d05875fb069940c4ec6d"} Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.158027 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8","Type":"ContainerDied","Data":"8622e2f03164dd211ab27739e2adc696156772b3afda95d37b177426e80fc5b6"} Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.158042 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b9349cd0-4ca8-4508-9a4c-a0951af5a2a8","Type":"ContainerDied","Data":"7cfd757e04ae5da06d3d6d516c2b98a95d532b009bd23f4195ce398860c245de"} Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.158060 4933 scope.go:117] "RemoveContainer" containerID="368668889d693f9de2ccc2afce5cfe8616e8f1d8bf44d05875fb069940c4ec6d" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.167793 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f996bcdbf-kwx6s" event={"ID":"fe53ac25-75b3-42c3-802f-5359023b26e7","Type":"ContainerStarted","Data":"e6d22e9623f38cd68cde6e98c7b0ee2b102f8edb35af1c6667a3464fee551f58"} Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.167990 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.173949 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a","Type":"ContainerStarted","Data":"4a1ff673babb55ae0c8b13a685ef7dc0941d53b2e94847d0a4636f75c60b8a64"} Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.174950 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.186705 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-575b89575b-kkrzb" event={"ID":"91864da0-319b-46e9-b4ef-8ccee4c52d37","Type":"ContainerStarted","Data":"c6536d22d6c93b55c99436f1124a25adb4cae4775413fcbf11c9e30b27a18603"} Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.186910 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-575b89575b-kkrzb" event={"ID":"91864da0-319b-46e9-b4ef-8ccee4c52d37","Type":"ContainerStarted","Data":"919264dcf0b82bef1d83226e9c6472a32457bb0515e5a68ff0d276535cb3dcff"} Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.187002 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-575b89575b-kkrzb" event={"ID":"91864da0-319b-46e9-b4ef-8ccee4c52d37","Type":"ContainerStarted","Data":"b0eceadb1d255469a928409a36ac9832735a249064e555e1cf2847e6302b19bc"} Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.187142 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.187294 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.192608 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5f996bcdbf-kwx6s" podStartSLOduration=3.192567205 podStartE2EDuration="3.192567205s" podCreationTimestamp="2026-01-22 06:06:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:06:08.189519508 +0000 UTC m=+1216.026644891" watchObservedRunningTime="2026-01-22 06:06:08.192567205 +0000 UTC m=+1216.029692568" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.194297 4933 scope.go:117] "RemoveContainer" containerID="8622e2f03164dd211ab27739e2adc696156772b3afda95d37b177426e80fc5b6" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.215538 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-575b89575b-kkrzb" podStartSLOduration=2.215523079 podStartE2EDuration="2.215523079s" podCreationTimestamp="2026-01-22 06:06:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:06:08.208552175 +0000 UTC m=+1216.045677528" watchObservedRunningTime="2026-01-22 06:06:08.215523079 +0000 UTC m=+1216.052648432" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.222797 4933 scope.go:117] "RemoveContainer" containerID="368668889d693f9de2ccc2afce5cfe8616e8f1d8bf44d05875fb069940c4ec6d" Jan 22 06:06:08 crc kubenswrapper[4933]: E0122 06:06:08.225586 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"368668889d693f9de2ccc2afce5cfe8616e8f1d8bf44d05875fb069940c4ec6d\": container with ID starting with 368668889d693f9de2ccc2afce5cfe8616e8f1d8bf44d05875fb069940c4ec6d not found: ID does not exist" containerID="368668889d693f9de2ccc2afce5cfe8616e8f1d8bf44d05875fb069940c4ec6d" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.225628 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"368668889d693f9de2ccc2afce5cfe8616e8f1d8bf44d05875fb069940c4ec6d"} err="failed to get container status \"368668889d693f9de2ccc2afce5cfe8616e8f1d8bf44d05875fb069940c4ec6d\": rpc error: code = NotFound desc = could not find container \"368668889d693f9de2ccc2afce5cfe8616e8f1d8bf44d05875fb069940c4ec6d\": container with ID starting with 368668889d693f9de2ccc2afce5cfe8616e8f1d8bf44d05875fb069940c4ec6d not found: ID does not exist" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.225650 4933 scope.go:117] "RemoveContainer" containerID="8622e2f03164dd211ab27739e2adc696156772b3afda95d37b177426e80fc5b6" Jan 22 06:06:08 crc kubenswrapper[4933]: E0122 06:06:08.226180 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8622e2f03164dd211ab27739e2adc696156772b3afda95d37b177426e80fc5b6\": container with ID starting with 8622e2f03164dd211ab27739e2adc696156772b3afda95d37b177426e80fc5b6 not found: ID does not exist" containerID="8622e2f03164dd211ab27739e2adc696156772b3afda95d37b177426e80fc5b6" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.226206 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8622e2f03164dd211ab27739e2adc696156772b3afda95d37b177426e80fc5b6"} err="failed to get container status \"8622e2f03164dd211ab27739e2adc696156772b3afda95d37b177426e80fc5b6\": rpc error: code = NotFound desc = could not find container \"8622e2f03164dd211ab27739e2adc696156772b3afda95d37b177426e80fc5b6\": container with ID starting with 8622e2f03164dd211ab27739e2adc696156772b3afda95d37b177426e80fc5b6 not found: ID does not exist" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.226219 4933 scope.go:117] "RemoveContainer" containerID="368668889d693f9de2ccc2afce5cfe8616e8f1d8bf44d05875fb069940c4ec6d" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.226534 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"368668889d693f9de2ccc2afce5cfe8616e8f1d8bf44d05875fb069940c4ec6d"} err="failed to get container status \"368668889d693f9de2ccc2afce5cfe8616e8f1d8bf44d05875fb069940c4ec6d\": rpc error: code = NotFound desc = could not find container \"368668889d693f9de2ccc2afce5cfe8616e8f1d8bf44d05875fb069940c4ec6d\": container with ID starting with 368668889d693f9de2ccc2afce5cfe8616e8f1d8bf44d05875fb069940c4ec6d not found: ID does not exist" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.226576 4933 scope.go:117] "RemoveContainer" containerID="8622e2f03164dd211ab27739e2adc696156772b3afda95d37b177426e80fc5b6" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.226940 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8622e2f03164dd211ab27739e2adc696156772b3afda95d37b177426e80fc5b6"} err="failed to get container status \"8622e2f03164dd211ab27739e2adc696156772b3afda95d37b177426e80fc5b6\": rpc error: code = NotFound desc = could not find container \"8622e2f03164dd211ab27739e2adc696156772b3afda95d37b177426e80fc5b6\": container with ID starting with 8622e2f03164dd211ab27739e2adc696156772b3afda95d37b177426e80fc5b6 not found: ID does not exist" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.246202 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.392818548 podStartE2EDuration="9.246179827s" podCreationTimestamp="2026-01-22 06:05:59 +0000 UTC" firstStartedPulling="2026-01-22 06:06:00.961606574 +0000 UTC m=+1208.798731927" lastFinishedPulling="2026-01-22 06:06:07.814967843 +0000 UTC m=+1215.652093206" observedRunningTime="2026-01-22 06:06:08.233144681 +0000 UTC m=+1216.070270034" watchObservedRunningTime="2026-01-22 06:06:08.246179827 +0000 UTC m=+1216.083305180" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.267544 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.275591 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.291967 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 22 06:06:08 crc kubenswrapper[4933]: E0122 06:06:08.292451 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9349cd0-4ca8-4508-9a4c-a0951af5a2a8" containerName="cinder-api" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.292474 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9349cd0-4ca8-4508-9a4c-a0951af5a2a8" containerName="cinder-api" Jan 22 06:06:08 crc kubenswrapper[4933]: E0122 06:06:08.292505 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9349cd0-4ca8-4508-9a4c-a0951af5a2a8" containerName="cinder-api-log" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.292514 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9349cd0-4ca8-4508-9a4c-a0951af5a2a8" containerName="cinder-api-log" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.292725 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9349cd0-4ca8-4508-9a4c-a0951af5a2a8" containerName="cinder-api-log" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.292756 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9349cd0-4ca8-4508-9a4c-a0951af5a2a8" containerName="cinder-api" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.293880 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.295601 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.296434 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.296652 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.301864 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-566788757d-gkrdt" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.308180 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.406450 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.406795 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-config-data-custom\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.406823 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/603c9f42-93c4-4268-b513-d2309571ac20-etc-machine-id\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.406908 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-scripts\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.406931 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mjclj\" (UniqueName: \"kubernetes.io/projected/603c9f42-93c4-4268-b513-d2309571ac20-kube-api-access-mjclj\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.406974 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-public-tls-certs\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.407003 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.407098 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-config-data\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.407152 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/603c9f42-93c4-4268-b513-d2309571ac20-logs\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.502493 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9349cd0-4ca8-4508-9a4c-a0951af5a2a8" path="/var/lib/kubelet/pods/b9349cd0-4ca8-4508-9a4c-a0951af5a2a8/volumes" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.508620 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-scripts\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.508659 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mjclj\" (UniqueName: \"kubernetes.io/projected/603c9f42-93c4-4268-b513-d2309571ac20-kube-api-access-mjclj\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.508690 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-public-tls-certs\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.508711 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.508783 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-config-data\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.508829 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/603c9f42-93c4-4268-b513-d2309571ac20-logs\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.508868 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.508900 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-config-data-custom\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.508935 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/603c9f42-93c4-4268-b513-d2309571ac20-etc-machine-id\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.509007 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/603c9f42-93c4-4268-b513-d2309571ac20-etc-machine-id\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.509485 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/603c9f42-93c4-4268-b513-d2309571ac20-logs\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.513585 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.514334 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-config-data\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.514532 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.514688 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-public-tls-certs\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.515177 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-config-data-custom\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.521548 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-856bccf57c-l2f82" podUID="6541a01b-555e-4734-8eb8-bc63625dd293" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.154:9696/\": dial tcp 10.217.0.154:9696: connect: connection refused" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.531561 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-scripts\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.549551 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mjclj\" (UniqueName: \"kubernetes.io/projected/603c9f42-93c4-4268-b513-d2309571ac20-kube-api-access-mjclj\") pod \"cinder-api-0\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " pod="openstack/cinder-api-0" Jan 22 06:06:08 crc kubenswrapper[4933]: I0122 06:06:08.619187 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 06:06:09 crc kubenswrapper[4933]: I0122 06:06:09.161744 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 22 06:06:09 crc kubenswrapper[4933]: I0122 06:06:09.210501 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"603c9f42-93c4-4268-b513-d2309571ac20","Type":"ContainerStarted","Data":"8aba54981644ff82dc6633776e219d26daca6577f1495bfce8dc2469b5517867"} Jan 22 06:06:10 crc kubenswrapper[4933]: I0122 06:06:10.223319 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"603c9f42-93c4-4268-b513-d2309571ac20","Type":"ContainerStarted","Data":"2f820ff473f51e41089b39b460bd1b0e17ef3dee077f8634fc530dbd7212251b"} Jan 22 06:06:10 crc kubenswrapper[4933]: I0122 06:06:10.942657 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:06:10 crc kubenswrapper[4933]: I0122 06:06:10.943037 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:06:11 crc kubenswrapper[4933]: I0122 06:06:11.233354 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"603c9f42-93c4-4268-b513-d2309571ac20","Type":"ContainerStarted","Data":"a5442678c4d8db8a7d32c5e8c0b8ef799742f21cd0c0918ab7cc4e0588f933fc"} Jan 22 06:06:11 crc kubenswrapper[4933]: I0122 06:06:11.233600 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 22 06:06:11 crc kubenswrapper[4933]: I0122 06:06:11.265423 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.265403667 podStartE2EDuration="3.265403667s" podCreationTimestamp="2026-01-22 06:06:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:06:11.250975696 +0000 UTC m=+1219.088101049" watchObservedRunningTime="2026-01-22 06:06:11.265403667 +0000 UTC m=+1219.102529030" Jan 22 06:06:11 crc kubenswrapper[4933]: I0122 06:06:11.521226 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 22 06:06:11 crc kubenswrapper[4933]: I0122 06:06:11.652298 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:06:11 crc kubenswrapper[4933]: I0122 06:06:11.728608 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b9c8b59c-cbvvm"] Jan 22 06:06:11 crc kubenswrapper[4933]: I0122 06:06:11.728849 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" podUID="e98f5ce4-ac57-4a9e-9ffa-8d48a238e064" containerName="dnsmasq-dns" containerID="cri-o://4d6483276b97f7c59ec0cf5441b440eca417184a7ba22e5b4af01d24fc16568e" gracePeriod=10 Jan 22 06:06:11 crc kubenswrapper[4933]: I0122 06:06:11.774042 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:11 crc kubenswrapper[4933]: I0122 06:06:11.829092 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.025607 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.222439 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.250595 4933 generic.go:334] "Generic (PLEG): container finished" podID="e98f5ce4-ac57-4a9e-9ffa-8d48a238e064" containerID="4d6483276b97f7c59ec0cf5441b440eca417184a7ba22e5b4af01d24fc16568e" exitCode=0 Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.250694 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.250720 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" event={"ID":"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064","Type":"ContainerDied","Data":"4d6483276b97f7c59ec0cf5441b440eca417184a7ba22e5b4af01d24fc16568e"} Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.250774 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b9c8b59c-cbvvm" event={"ID":"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064","Type":"ContainerDied","Data":"88293620d576b74ef169edbb8151035e945734fede38301bbc82ce5fab6843f3"} Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.250848 4933 scope.go:117] "RemoveContainer" containerID="4d6483276b97f7c59ec0cf5441b440eca417184a7ba22e5b4af01d24fc16568e" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.296608 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.308371 4933 scope.go:117] "RemoveContainer" containerID="e8ff61a17541a5e8a5ec72dd298103ce7744ae1ca696ca271d5cc3c0051d62f3" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.357068 4933 scope.go:117] "RemoveContainer" containerID="4d6483276b97f7c59ec0cf5441b440eca417184a7ba22e5b4af01d24fc16568e" Jan 22 06:06:12 crc kubenswrapper[4933]: E0122 06:06:12.372324 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d6483276b97f7c59ec0cf5441b440eca417184a7ba22e5b4af01d24fc16568e\": container with ID starting with 4d6483276b97f7c59ec0cf5441b440eca417184a7ba22e5b4af01d24fc16568e not found: ID does not exist" containerID="4d6483276b97f7c59ec0cf5441b440eca417184a7ba22e5b4af01d24fc16568e" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.372372 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d6483276b97f7c59ec0cf5441b440eca417184a7ba22e5b4af01d24fc16568e"} err="failed to get container status \"4d6483276b97f7c59ec0cf5441b440eca417184a7ba22e5b4af01d24fc16568e\": rpc error: code = NotFound desc = could not find container \"4d6483276b97f7c59ec0cf5441b440eca417184a7ba22e5b4af01d24fc16568e\": container with ID starting with 4d6483276b97f7c59ec0cf5441b440eca417184a7ba22e5b4af01d24fc16568e not found: ID does not exist" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.372397 4933 scope.go:117] "RemoveContainer" containerID="e8ff61a17541a5e8a5ec72dd298103ce7744ae1ca696ca271d5cc3c0051d62f3" Jan 22 06:06:12 crc kubenswrapper[4933]: E0122 06:06:12.373962 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8ff61a17541a5e8a5ec72dd298103ce7744ae1ca696ca271d5cc3c0051d62f3\": container with ID starting with e8ff61a17541a5e8a5ec72dd298103ce7744ae1ca696ca271d5cc3c0051d62f3 not found: ID does not exist" containerID="e8ff61a17541a5e8a5ec72dd298103ce7744ae1ca696ca271d5cc3c0051d62f3" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.373989 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8ff61a17541a5e8a5ec72dd298103ce7744ae1ca696ca271d5cc3c0051d62f3"} err="failed to get container status \"e8ff61a17541a5e8a5ec72dd298103ce7744ae1ca696ca271d5cc3c0051d62f3\": rpc error: code = NotFound desc = could not find container \"e8ff61a17541a5e8a5ec72dd298103ce7744ae1ca696ca271d5cc3c0051d62f3\": container with ID starting with e8ff61a17541a5e8a5ec72dd298103ce7744ae1ca696ca271d5cc3c0051d62f3 not found: ID does not exist" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.397469 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-ovsdbserver-nb\") pod \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.397549 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-dns-svc\") pod \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.397566 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-ovsdbserver-sb\") pod \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.397582 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-dns-swift-storage-0\") pod \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.397608 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4ktv\" (UniqueName: \"kubernetes.io/projected/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-kube-api-access-w4ktv\") pod \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.397629 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-config\") pod \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\" (UID: \"e98f5ce4-ac57-4a9e-9ffa-8d48a238e064\") " Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.420756 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-kube-api-access-w4ktv" (OuterVolumeSpecName: "kube-api-access-w4ktv") pod "e98f5ce4-ac57-4a9e-9ffa-8d48a238e064" (UID: "e98f5ce4-ac57-4a9e-9ffa-8d48a238e064"). InnerVolumeSpecName "kube-api-access-w4ktv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.472907 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-config" (OuterVolumeSpecName: "config") pod "e98f5ce4-ac57-4a9e-9ffa-8d48a238e064" (UID: "e98f5ce4-ac57-4a9e-9ffa-8d48a238e064"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.501526 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4ktv\" (UniqueName: \"kubernetes.io/projected/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-kube-api-access-w4ktv\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.501558 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.505790 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e98f5ce4-ac57-4a9e-9ffa-8d48a238e064" (UID: "e98f5ce4-ac57-4a9e-9ffa-8d48a238e064"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.505810 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e98f5ce4-ac57-4a9e-9ffa-8d48a238e064" (UID: "e98f5ce4-ac57-4a9e-9ffa-8d48a238e064"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.515605 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e98f5ce4-ac57-4a9e-9ffa-8d48a238e064" (UID: "e98f5ce4-ac57-4a9e-9ffa-8d48a238e064"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.516038 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e98f5ce4-ac57-4a9e-9ffa-8d48a238e064" (UID: "e98f5ce4-ac57-4a9e-9ffa-8d48a238e064"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.577711 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b9c8b59c-cbvvm"] Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.587223 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b9c8b59c-cbvvm"] Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.603140 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.603171 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.603181 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:12 crc kubenswrapper[4933]: I0122 06:06:12.603189 4933 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:13 crc kubenswrapper[4933]: I0122 06:06:13.259907 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="9654ff73-fb3b-4003-b49a-45c98879a7a1" containerName="cinder-scheduler" containerID="cri-o://4688290a43c3cf36678f1573f589a3512f5f42ee365373b5e64aa3b27eed1ef4" gracePeriod=30 Jan 22 06:06:13 crc kubenswrapper[4933]: I0122 06:06:13.259950 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="9654ff73-fb3b-4003-b49a-45c98879a7a1" containerName="probe" containerID="cri-o://8f755668d9241345f086ca8331cf5575ffa315a5b0d5ff45f93ab91543269ac8" gracePeriod=30 Jan 22 06:06:14 crc kubenswrapper[4933]: I0122 06:06:14.269955 4933 generic.go:334] "Generic (PLEG): container finished" podID="9654ff73-fb3b-4003-b49a-45c98879a7a1" containerID="8f755668d9241345f086ca8331cf5575ffa315a5b0d5ff45f93ab91543269ac8" exitCode=0 Jan 22 06:06:14 crc kubenswrapper[4933]: I0122 06:06:14.270008 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9654ff73-fb3b-4003-b49a-45c98879a7a1","Type":"ContainerDied","Data":"8f755668d9241345f086ca8331cf5575ffa315a5b0d5ff45f93ab91543269ac8"} Jan 22 06:06:14 crc kubenswrapper[4933]: I0122 06:06:14.504839 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e98f5ce4-ac57-4a9e-9ffa-8d48a238e064" path="/var/lib/kubelet/pods/e98f5ce4-ac57-4a9e-9ffa-8d48a238e064/volumes" Jan 22 06:06:14 crc kubenswrapper[4933]: I0122 06:06:14.527047 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.269251 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.287097 4933 generic.go:334] "Generic (PLEG): container finished" podID="9654ff73-fb3b-4003-b49a-45c98879a7a1" containerID="4688290a43c3cf36678f1573f589a3512f5f42ee365373b5e64aa3b27eed1ef4" exitCode=0 Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.287144 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9654ff73-fb3b-4003-b49a-45c98879a7a1","Type":"ContainerDied","Data":"4688290a43c3cf36678f1573f589a3512f5f42ee365373b5e64aa3b27eed1ef4"} Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.287176 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9654ff73-fb3b-4003-b49a-45c98879a7a1","Type":"ContainerDied","Data":"d47828d66720508abd004a09bd2d2c9a810a576c88e7b432b0cd456c782be480"} Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.287199 4933 scope.go:117] "RemoveContainer" containerID="8f755668d9241345f086ca8331cf5575ffa315a5b0d5ff45f93ab91543269ac8" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.287348 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.320123 4933 scope.go:117] "RemoveContainer" containerID="4688290a43c3cf36678f1573f589a3512f5f42ee365373b5e64aa3b27eed1ef4" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.370868 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9654ff73-fb3b-4003-b49a-45c98879a7a1-etc-machine-id\") pod \"9654ff73-fb3b-4003-b49a-45c98879a7a1\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.370911 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v52b9\" (UniqueName: \"kubernetes.io/projected/9654ff73-fb3b-4003-b49a-45c98879a7a1-kube-api-access-v52b9\") pod \"9654ff73-fb3b-4003-b49a-45c98879a7a1\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.370940 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-config-data\") pod \"9654ff73-fb3b-4003-b49a-45c98879a7a1\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.370977 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-scripts\") pod \"9654ff73-fb3b-4003-b49a-45c98879a7a1\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.371013 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-config-data-custom\") pod \"9654ff73-fb3b-4003-b49a-45c98879a7a1\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.371051 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-combined-ca-bundle\") pod \"9654ff73-fb3b-4003-b49a-45c98879a7a1\" (UID: \"9654ff73-fb3b-4003-b49a-45c98879a7a1\") " Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.372090 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9654ff73-fb3b-4003-b49a-45c98879a7a1-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "9654ff73-fb3b-4003-b49a-45c98879a7a1" (UID: "9654ff73-fb3b-4003-b49a-45c98879a7a1"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.375275 4933 scope.go:117] "RemoveContainer" containerID="8f755668d9241345f086ca8331cf5575ffa315a5b0d5ff45f93ab91543269ac8" Jan 22 06:06:16 crc kubenswrapper[4933]: E0122 06:06:16.387278 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f755668d9241345f086ca8331cf5575ffa315a5b0d5ff45f93ab91543269ac8\": container with ID starting with 8f755668d9241345f086ca8331cf5575ffa315a5b0d5ff45f93ab91543269ac8 not found: ID does not exist" containerID="8f755668d9241345f086ca8331cf5575ffa315a5b0d5ff45f93ab91543269ac8" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.387329 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f755668d9241345f086ca8331cf5575ffa315a5b0d5ff45f93ab91543269ac8"} err="failed to get container status \"8f755668d9241345f086ca8331cf5575ffa315a5b0d5ff45f93ab91543269ac8\": rpc error: code = NotFound desc = could not find container \"8f755668d9241345f086ca8331cf5575ffa315a5b0d5ff45f93ab91543269ac8\": container with ID starting with 8f755668d9241345f086ca8331cf5575ffa315a5b0d5ff45f93ab91543269ac8 not found: ID does not exist" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.387356 4933 scope.go:117] "RemoveContainer" containerID="4688290a43c3cf36678f1573f589a3512f5f42ee365373b5e64aa3b27eed1ef4" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.387575 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9654ff73-fb3b-4003-b49a-45c98879a7a1-kube-api-access-v52b9" (OuterVolumeSpecName: "kube-api-access-v52b9") pod "9654ff73-fb3b-4003-b49a-45c98879a7a1" (UID: "9654ff73-fb3b-4003-b49a-45c98879a7a1"). InnerVolumeSpecName "kube-api-access-v52b9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.391258 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-scripts" (OuterVolumeSpecName: "scripts") pod "9654ff73-fb3b-4003-b49a-45c98879a7a1" (UID: "9654ff73-fb3b-4003-b49a-45c98879a7a1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:16 crc kubenswrapper[4933]: E0122 06:06:16.399247 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4688290a43c3cf36678f1573f589a3512f5f42ee365373b5e64aa3b27eed1ef4\": container with ID starting with 4688290a43c3cf36678f1573f589a3512f5f42ee365373b5e64aa3b27eed1ef4 not found: ID does not exist" containerID="4688290a43c3cf36678f1573f589a3512f5f42ee365373b5e64aa3b27eed1ef4" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.399331 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4688290a43c3cf36678f1573f589a3512f5f42ee365373b5e64aa3b27eed1ef4"} err="failed to get container status \"4688290a43c3cf36678f1573f589a3512f5f42ee365373b5e64aa3b27eed1ef4\": rpc error: code = NotFound desc = could not find container \"4688290a43c3cf36678f1573f589a3512f5f42ee365373b5e64aa3b27eed1ef4\": container with ID starting with 4688290a43c3cf36678f1573f589a3512f5f42ee365373b5e64aa3b27eed1ef4 not found: ID does not exist" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.418335 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "9654ff73-fb3b-4003-b49a-45c98879a7a1" (UID: "9654ff73-fb3b-4003-b49a-45c98879a7a1"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.455263 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9654ff73-fb3b-4003-b49a-45c98879a7a1" (UID: "9654ff73-fb3b-4003-b49a-45c98879a7a1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.473676 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.473707 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.473719 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.473729 4933 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9654ff73-fb3b-4003-b49a-45c98879a7a1-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.473737 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v52b9\" (UniqueName: \"kubernetes.io/projected/9654ff73-fb3b-4003-b49a-45c98879a7a1-kube-api-access-v52b9\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.522302 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-config-data" (OuterVolumeSpecName: "config-data") pod "9654ff73-fb3b-4003-b49a-45c98879a7a1" (UID: "9654ff73-fb3b-4003-b49a-45c98879a7a1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.575660 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9654ff73-fb3b-4003-b49a-45c98879a7a1-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.610803 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.626712 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.665141 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 06:06:16 crc kubenswrapper[4933]: E0122 06:06:16.665527 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9654ff73-fb3b-4003-b49a-45c98879a7a1" containerName="probe" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.665547 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9654ff73-fb3b-4003-b49a-45c98879a7a1" containerName="probe" Jan 22 06:06:16 crc kubenswrapper[4933]: E0122 06:06:16.665570 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e98f5ce4-ac57-4a9e-9ffa-8d48a238e064" containerName="init" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.665576 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e98f5ce4-ac57-4a9e-9ffa-8d48a238e064" containerName="init" Jan 22 06:06:16 crc kubenswrapper[4933]: E0122 06:06:16.665587 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e98f5ce4-ac57-4a9e-9ffa-8d48a238e064" containerName="dnsmasq-dns" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.665593 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e98f5ce4-ac57-4a9e-9ffa-8d48a238e064" containerName="dnsmasq-dns" Jan 22 06:06:16 crc kubenswrapper[4933]: E0122 06:06:16.665813 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9654ff73-fb3b-4003-b49a-45c98879a7a1" containerName="cinder-scheduler" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.665824 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9654ff73-fb3b-4003-b49a-45c98879a7a1" containerName="cinder-scheduler" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.665987 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="e98f5ce4-ac57-4a9e-9ffa-8d48a238e064" containerName="dnsmasq-dns" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.666016 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="9654ff73-fb3b-4003-b49a-45c98879a7a1" containerName="probe" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.666031 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="9654ff73-fb3b-4003-b49a-45c98879a7a1" containerName="cinder-scheduler" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.666909 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.670717 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.673718 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.778773 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.778840 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-scripts\") pod \"cinder-scheduler-0\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.778882 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.778922 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzz4j\" (UniqueName: \"kubernetes.io/projected/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-kube-api-access-wzz4j\") pod \"cinder-scheduler-0\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.779108 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data\") pod \"cinder-scheduler-0\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.779364 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.880760 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.880873 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzz4j\" (UniqueName: \"kubernetes.io/projected/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-kube-api-access-wzz4j\") pod \"cinder-scheduler-0\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.880984 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data\") pod \"cinder-scheduler-0\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.881065 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.881220 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.881273 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-scripts\") pod \"cinder-scheduler-0\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.882102 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.886482 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-scripts\") pod \"cinder-scheduler-0\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.886747 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.886840 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.887121 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data\") pod \"cinder-scheduler-0\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.901559 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzz4j\" (UniqueName: \"kubernetes.io/projected/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-kube-api-access-wzz4j\") pod \"cinder-scheduler-0\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " pod="openstack/cinder-scheduler-0" Jan 22 06:06:16 crc kubenswrapper[4933]: I0122 06:06:16.990675 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 06:06:17 crc kubenswrapper[4933]: I0122 06:06:17.464495 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 06:06:18 crc kubenswrapper[4933]: I0122 06:06:18.322006 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9","Type":"ContainerStarted","Data":"e136629b351ca188667a0d656d1460f094277accee28f74429184e904516ae0c"} Jan 22 06:06:18 crc kubenswrapper[4933]: I0122 06:06:18.322469 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9","Type":"ContainerStarted","Data":"2327536d5ea9d48fbf38777fee202924a5b1fae43f6ea007a86d7bd093d310c4"} Jan 22 06:06:18 crc kubenswrapper[4933]: I0122 06:06:18.500420 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9654ff73-fb3b-4003-b49a-45c98879a7a1" path="/var/lib/kubelet/pods/9654ff73-fb3b-4003-b49a-45c98879a7a1/volumes" Jan 22 06:06:18 crc kubenswrapper[4933]: I0122 06:06:18.627455 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:18 crc kubenswrapper[4933]: I0122 06:06:18.665939 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:06:18 crc kubenswrapper[4933]: I0122 06:06:18.725441 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5c9bd98c56-qnqgq"] Jan 22 06:06:18 crc kubenswrapper[4933]: I0122 06:06:18.725663 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5c9bd98c56-qnqgq" podUID="2d683dcb-f19b-4f7c-a6e5-69ec3a93208b" containerName="barbican-api-log" containerID="cri-o://d01bdab4b1711810909a920cee5293034b71009acdfd620b98e33f77e1c8919b" gracePeriod=30 Jan 22 06:06:18 crc kubenswrapper[4933]: I0122 06:06:18.726068 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5c9bd98c56-qnqgq" podUID="2d683dcb-f19b-4f7c-a6e5-69ec3a93208b" containerName="barbican-api" containerID="cri-o://b688c9df9ddf18e461a754a176dde6318fee2f52bdd4e4de166be8f749491199" gracePeriod=30 Jan 22 06:06:18 crc kubenswrapper[4933]: I0122 06:06:18.983963 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 22 06:06:18 crc kubenswrapper[4933]: I0122 06:06:18.985773 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 06:06:18 crc kubenswrapper[4933]: I0122 06:06:18.988100 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 22 06:06:18 crc kubenswrapper[4933]: I0122 06:06:18.988601 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 22 06:06:18 crc kubenswrapper[4933]: I0122 06:06:18.989325 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-2kc7q" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.009996 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.118269 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/17936edc-037e-4613-89a9-b9edcfed3521-openstack-config-secret\") pod \"openstackclient\" (UID: \"17936edc-037e-4613-89a9-b9edcfed3521\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.118332 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17936edc-037e-4613-89a9-b9edcfed3521-combined-ca-bundle\") pod \"openstackclient\" (UID: \"17936edc-037e-4613-89a9-b9edcfed3521\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.118405 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wt9lh\" (UniqueName: \"kubernetes.io/projected/17936edc-037e-4613-89a9-b9edcfed3521-kube-api-access-wt9lh\") pod \"openstackclient\" (UID: \"17936edc-037e-4613-89a9-b9edcfed3521\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.118430 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/17936edc-037e-4613-89a9-b9edcfed3521-openstack-config\") pod \"openstackclient\" (UID: \"17936edc-037e-4613-89a9-b9edcfed3521\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.219494 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/17936edc-037e-4613-89a9-b9edcfed3521-openstack-config-secret\") pod \"openstackclient\" (UID: \"17936edc-037e-4613-89a9-b9edcfed3521\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.219558 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17936edc-037e-4613-89a9-b9edcfed3521-combined-ca-bundle\") pod \"openstackclient\" (UID: \"17936edc-037e-4613-89a9-b9edcfed3521\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.219629 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wt9lh\" (UniqueName: \"kubernetes.io/projected/17936edc-037e-4613-89a9-b9edcfed3521-kube-api-access-wt9lh\") pod \"openstackclient\" (UID: \"17936edc-037e-4613-89a9-b9edcfed3521\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.219648 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/17936edc-037e-4613-89a9-b9edcfed3521-openstack-config\") pod \"openstackclient\" (UID: \"17936edc-037e-4613-89a9-b9edcfed3521\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.220941 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/17936edc-037e-4613-89a9-b9edcfed3521-openstack-config\") pod \"openstackclient\" (UID: \"17936edc-037e-4613-89a9-b9edcfed3521\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.226417 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/17936edc-037e-4613-89a9-b9edcfed3521-openstack-config-secret\") pod \"openstackclient\" (UID: \"17936edc-037e-4613-89a9-b9edcfed3521\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.236260 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17936edc-037e-4613-89a9-b9edcfed3521-combined-ca-bundle\") pod \"openstackclient\" (UID: \"17936edc-037e-4613-89a9-b9edcfed3521\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.261298 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wt9lh\" (UniqueName: \"kubernetes.io/projected/17936edc-037e-4613-89a9-b9edcfed3521-kube-api-access-wt9lh\") pod \"openstackclient\" (UID: \"17936edc-037e-4613-89a9-b9edcfed3521\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.263282 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.263922 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.275357 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.326961 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.328043 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.350756 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.401180 4933 generic.go:334] "Generic (PLEG): container finished" podID="2d683dcb-f19b-4f7c-a6e5-69ec3a93208b" containerID="d01bdab4b1711810909a920cee5293034b71009acdfd620b98e33f77e1c8919b" exitCode=143 Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.401285 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5c9bd98c56-qnqgq" event={"ID":"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b","Type":"ContainerDied","Data":"d01bdab4b1711810909a920cee5293034b71009acdfd620b98e33f77e1c8919b"} Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.417382 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9","Type":"ContainerStarted","Data":"47d5a47f9c85c2d9dc14b7850a92bcef2cae8e70655cb013dd920c1d39eb6587"} Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.433887 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8m7pt\" (UniqueName: \"kubernetes.io/projected/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-kube-api-access-8m7pt\") pod \"openstackclient\" (UID: \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.433971 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-openstack-config-secret\") pod \"openstackclient\" (UID: \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.434035 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-openstack-config\") pod \"openstackclient\" (UID: \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.434066 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-combined-ca-bundle\") pod \"openstackclient\" (UID: \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.450620 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.450599432 podStartE2EDuration="3.450599432s" podCreationTimestamp="2026-01-22 06:06:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:06:19.444316826 +0000 UTC m=+1227.281442189" watchObservedRunningTime="2026-01-22 06:06:19.450599432 +0000 UTC m=+1227.287724785" Jan 22 06:06:19 crc kubenswrapper[4933]: E0122 06:06:19.531770 4933 log.go:32] "RunPodSandbox from runtime service failed" err=< Jan 22 06:06:19 crc kubenswrapper[4933]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_17936edc-037e-4613-89a9-b9edcfed3521_0(21cfcb3c9a3f00209145776b6ddb7fa2f00e5b405821dc537cb9f59a29d6dcdc): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"21cfcb3c9a3f00209145776b6ddb7fa2f00e5b405821dc537cb9f59a29d6dcdc" Netns:"/var/run/netns/9022d8a8-3ea6-4786-a750-7cf253760bf8" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=21cfcb3c9a3f00209145776b6ddb7fa2f00e5b405821dc537cb9f59a29d6dcdc;K8S_POD_UID=17936edc-037e-4613-89a9-b9edcfed3521" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/17936edc-037e-4613-89a9-b9edcfed3521]: expected pod UID "17936edc-037e-4613-89a9-b9edcfed3521" but got "45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6" from Kube API Jan 22 06:06:19 crc kubenswrapper[4933]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Jan 22 06:06:19 crc kubenswrapper[4933]: > Jan 22 06:06:19 crc kubenswrapper[4933]: E0122 06:06:19.531834 4933 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Jan 22 06:06:19 crc kubenswrapper[4933]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_17936edc-037e-4613-89a9-b9edcfed3521_0(21cfcb3c9a3f00209145776b6ddb7fa2f00e5b405821dc537cb9f59a29d6dcdc): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"21cfcb3c9a3f00209145776b6ddb7fa2f00e5b405821dc537cb9f59a29d6dcdc" Netns:"/var/run/netns/9022d8a8-3ea6-4786-a750-7cf253760bf8" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=21cfcb3c9a3f00209145776b6ddb7fa2f00e5b405821dc537cb9f59a29d6dcdc;K8S_POD_UID=17936edc-037e-4613-89a9-b9edcfed3521" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/17936edc-037e-4613-89a9-b9edcfed3521]: expected pod UID "17936edc-037e-4613-89a9-b9edcfed3521" but got "45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6" from Kube API Jan 22 06:06:19 crc kubenswrapper[4933]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Jan 22 06:06:19 crc kubenswrapper[4933]: > pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.535993 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8m7pt\" (UniqueName: \"kubernetes.io/projected/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-kube-api-access-8m7pt\") pod \"openstackclient\" (UID: \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.538188 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-openstack-config-secret\") pod \"openstackclient\" (UID: \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.538396 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-openstack-config\") pod \"openstackclient\" (UID: \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.538473 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-combined-ca-bundle\") pod \"openstackclient\" (UID: \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.542736 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-openstack-config\") pod \"openstackclient\" (UID: \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.546920 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-combined-ca-bundle\") pod \"openstackclient\" (UID: \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.560100 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-openstack-config-secret\") pod \"openstackclient\" (UID: \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.560480 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8m7pt\" (UniqueName: \"kubernetes.io/projected/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-kube-api-access-8m7pt\") pod \"openstackclient\" (UID: \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\") " pod="openstack/openstackclient" Jan 22 06:06:19 crc kubenswrapper[4933]: I0122 06:06:19.736275 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 06:06:20 crc kubenswrapper[4933]: I0122 06:06:20.271667 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 22 06:06:20 crc kubenswrapper[4933]: I0122 06:06:20.424454 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 06:06:20 crc kubenswrapper[4933]: I0122 06:06:20.424537 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6","Type":"ContainerStarted","Data":"28dd4a798a838a8a5e69600ddb1b333e55f4b9eb0229ef9741a520c4b3243282"} Jan 22 06:06:20 crc kubenswrapper[4933]: I0122 06:06:20.426905 4933 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="17936edc-037e-4613-89a9-b9edcfed3521" podUID="45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6" Jan 22 06:06:20 crc kubenswrapper[4933]: I0122 06:06:20.438203 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 06:06:20 crc kubenswrapper[4933]: I0122 06:06:20.555621 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/17936edc-037e-4613-89a9-b9edcfed3521-openstack-config-secret\") pod \"17936edc-037e-4613-89a9-b9edcfed3521\" (UID: \"17936edc-037e-4613-89a9-b9edcfed3521\") " Jan 22 06:06:20 crc kubenswrapper[4933]: I0122 06:06:20.555824 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wt9lh\" (UniqueName: \"kubernetes.io/projected/17936edc-037e-4613-89a9-b9edcfed3521-kube-api-access-wt9lh\") pod \"17936edc-037e-4613-89a9-b9edcfed3521\" (UID: \"17936edc-037e-4613-89a9-b9edcfed3521\") " Jan 22 06:06:20 crc kubenswrapper[4933]: I0122 06:06:20.555898 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/17936edc-037e-4613-89a9-b9edcfed3521-openstack-config\") pod \"17936edc-037e-4613-89a9-b9edcfed3521\" (UID: \"17936edc-037e-4613-89a9-b9edcfed3521\") " Jan 22 06:06:20 crc kubenswrapper[4933]: I0122 06:06:20.555971 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17936edc-037e-4613-89a9-b9edcfed3521-combined-ca-bundle\") pod \"17936edc-037e-4613-89a9-b9edcfed3521\" (UID: \"17936edc-037e-4613-89a9-b9edcfed3521\") " Jan 22 06:06:20 crc kubenswrapper[4933]: I0122 06:06:20.556437 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/17936edc-037e-4613-89a9-b9edcfed3521-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "17936edc-037e-4613-89a9-b9edcfed3521" (UID: "17936edc-037e-4613-89a9-b9edcfed3521"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:06:20 crc kubenswrapper[4933]: I0122 06:06:20.556617 4933 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/17936edc-037e-4613-89a9-b9edcfed3521-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:20 crc kubenswrapper[4933]: I0122 06:06:20.561174 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17936edc-037e-4613-89a9-b9edcfed3521-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "17936edc-037e-4613-89a9-b9edcfed3521" (UID: "17936edc-037e-4613-89a9-b9edcfed3521"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:20 crc kubenswrapper[4933]: I0122 06:06:20.561568 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17936edc-037e-4613-89a9-b9edcfed3521-kube-api-access-wt9lh" (OuterVolumeSpecName: "kube-api-access-wt9lh") pod "17936edc-037e-4613-89a9-b9edcfed3521" (UID: "17936edc-037e-4613-89a9-b9edcfed3521"). InnerVolumeSpecName "kube-api-access-wt9lh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:20 crc kubenswrapper[4933]: I0122 06:06:20.561712 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17936edc-037e-4613-89a9-b9edcfed3521-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "17936edc-037e-4613-89a9-b9edcfed3521" (UID: "17936edc-037e-4613-89a9-b9edcfed3521"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:20 crc kubenswrapper[4933]: I0122 06:06:20.658357 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17936edc-037e-4613-89a9-b9edcfed3521-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:20 crc kubenswrapper[4933]: I0122 06:06:20.658643 4933 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/17936edc-037e-4613-89a9-b9edcfed3521-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:20 crc kubenswrapper[4933]: I0122 06:06:20.658654 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wt9lh\" (UniqueName: \"kubernetes.io/projected/17936edc-037e-4613-89a9-b9edcfed3521-kube-api-access-wt9lh\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.366382 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.444461 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.457907 4933 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="17936edc-037e-4613-89a9-b9edcfed3521" podUID="45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.798902 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-67fd8f79cc-pb6vw"] Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.800594 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.803482 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.803575 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.803738 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.818619 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-67fd8f79cc-pb6vw"] Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.880026 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-config-data\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.880100 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-internal-tls-certs\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.880209 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zr542\" (UniqueName: \"kubernetes.io/projected/62dded9b-a123-4bd0-ab4c-8de7680be023-kube-api-access-zr542\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.880272 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62dded9b-a123-4bd0-ab4c-8de7680be023-run-httpd\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.880296 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62dded9b-a123-4bd0-ab4c-8de7680be023-log-httpd\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.880348 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-combined-ca-bundle\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.880445 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-public-tls-certs\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.880533 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/62dded9b-a123-4bd0-ab4c-8de7680be023-etc-swift\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.974669 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5c9bd98c56-qnqgq" podUID="2d683dcb-f19b-4f7c-a6e5-69ec3a93208b" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": read tcp 10.217.0.2:56496->10.217.0.159:9311: read: connection reset by peer" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.974708 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5c9bd98c56-qnqgq" podUID="2d683dcb-f19b-4f7c-a6e5-69ec3a93208b" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": read tcp 10.217.0.2:56510->10.217.0.159:9311: read: connection reset by peer" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.982143 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-public-tls-certs\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.982210 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/62dded9b-a123-4bd0-ab4c-8de7680be023-etc-swift\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.982282 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-config-data\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.982313 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-internal-tls-certs\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.982340 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zr542\" (UniqueName: \"kubernetes.io/projected/62dded9b-a123-4bd0-ab4c-8de7680be023-kube-api-access-zr542\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.982362 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62dded9b-a123-4bd0-ab4c-8de7680be023-run-httpd\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.982380 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62dded9b-a123-4bd0-ab4c-8de7680be023-log-httpd\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.982412 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-combined-ca-bundle\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.982846 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62dded9b-a123-4bd0-ab4c-8de7680be023-run-httpd\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.982939 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62dded9b-a123-4bd0-ab4c-8de7680be023-log-httpd\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.987631 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-public-tls-certs\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.988539 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-internal-tls-certs\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.991041 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/62dded9b-a123-4bd0-ab4c-8de7680be023-etc-swift\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.991152 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 22 06:06:21 crc kubenswrapper[4933]: I0122 06:06:21.991675 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-combined-ca-bundle\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.000390 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-config-data\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.001257 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zr542\" (UniqueName: \"kubernetes.io/projected/62dded9b-a123-4bd0-ab4c-8de7680be023-kube-api-access-zr542\") pod \"swift-proxy-67fd8f79cc-pb6vw\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.116128 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.464644 4933 generic.go:334] "Generic (PLEG): container finished" podID="2d683dcb-f19b-4f7c-a6e5-69ec3a93208b" containerID="b688c9df9ddf18e461a754a176dde6318fee2f52bdd4e4de166be8f749491199" exitCode=0 Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.464684 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5c9bd98c56-qnqgq" event={"ID":"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b","Type":"ContainerDied","Data":"b688c9df9ddf18e461a754a176dde6318fee2f52bdd4e4de166be8f749491199"} Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.464708 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5c9bd98c56-qnqgq" event={"ID":"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b","Type":"ContainerDied","Data":"4f51c665c42e2aa8e4d95dfbc88fb6a4fda0d3957307a464cba02c0d11625657"} Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.464718 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4f51c665c42e2aa8e4d95dfbc88fb6a4fda0d3957307a464cba02c0d11625657" Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.478918 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.512643 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17936edc-037e-4613-89a9-b9edcfed3521" path="/var/lib/kubelet/pods/17936edc-037e-4613-89a9-b9edcfed3521/volumes" Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.619432 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-config-data\") pod \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.619489 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-config-data-custom\") pod \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.619536 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rq7cj\" (UniqueName: \"kubernetes.io/projected/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-kube-api-access-rq7cj\") pod \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.619581 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-combined-ca-bundle\") pod \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.620764 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-logs\") pod \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\" (UID: \"2d683dcb-f19b-4f7c-a6e5-69ec3a93208b\") " Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.622589 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-logs" (OuterVolumeSpecName: "logs") pod "2d683dcb-f19b-4f7c-a6e5-69ec3a93208b" (UID: "2d683dcb-f19b-4f7c-a6e5-69ec3a93208b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.624840 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-kube-api-access-rq7cj" (OuterVolumeSpecName: "kube-api-access-rq7cj") pod "2d683dcb-f19b-4f7c-a6e5-69ec3a93208b" (UID: "2d683dcb-f19b-4f7c-a6e5-69ec3a93208b"). InnerVolumeSpecName "kube-api-access-rq7cj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.625664 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "2d683dcb-f19b-4f7c-a6e5-69ec3a93208b" (UID: "2d683dcb-f19b-4f7c-a6e5-69ec3a93208b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.655519 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2d683dcb-f19b-4f7c-a6e5-69ec3a93208b" (UID: "2d683dcb-f19b-4f7c-a6e5-69ec3a93208b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.685491 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-config-data" (OuterVolumeSpecName: "config-data") pod "2d683dcb-f19b-4f7c-a6e5-69ec3a93208b" (UID: "2d683dcb-f19b-4f7c-a6e5-69ec3a93208b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.724270 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.724316 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.724331 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.724345 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rq7cj\" (UniqueName: \"kubernetes.io/projected/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-kube-api-access-rq7cj\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.724357 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:22 crc kubenswrapper[4933]: I0122 06:06:22.732173 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-67fd8f79cc-pb6vw"] Jan 22 06:06:22 crc kubenswrapper[4933]: W0122 06:06:22.740132 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62dded9b_a123_4bd0_ab4c_8de7680be023.slice/crio-0f49032ef93cc426d09840c8c0816a8c8e3a6a7ec82dcc55f9af579a1026986a WatchSource:0}: Error finding container 0f49032ef93cc426d09840c8c0816a8c8e3a6a7ec82dcc55f9af579a1026986a: Status 404 returned error can't find the container with id 0f49032ef93cc426d09840c8c0816a8c8e3a6a7ec82dcc55f9af579a1026986a Jan 22 06:06:23 crc kubenswrapper[4933]: I0122 06:06:23.475035 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5c9bd98c56-qnqgq" Jan 22 06:06:23 crc kubenswrapper[4933]: I0122 06:06:23.475659 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-67fd8f79cc-pb6vw" event={"ID":"62dded9b-a123-4bd0-ab4c-8de7680be023","Type":"ContainerStarted","Data":"88766015ee280911eb0e2545ab5037d409220a2d72a53bab39fa2849ee4efc90"} Jan 22 06:06:23 crc kubenswrapper[4933]: I0122 06:06:23.475687 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-67fd8f79cc-pb6vw" event={"ID":"62dded9b-a123-4bd0-ab4c-8de7680be023","Type":"ContainerStarted","Data":"23a7c678de86633dca6c9c40d455ce3ac68ebfa5501131949cd43ab694fb34bd"} Jan 22 06:06:23 crc kubenswrapper[4933]: I0122 06:06:23.475700 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:23 crc kubenswrapper[4933]: I0122 06:06:23.475709 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-67fd8f79cc-pb6vw" event={"ID":"62dded9b-a123-4bd0-ab4c-8de7680be023","Type":"ContainerStarted","Data":"0f49032ef93cc426d09840c8c0816a8c8e3a6a7ec82dcc55f9af579a1026986a"} Jan 22 06:06:23 crc kubenswrapper[4933]: I0122 06:06:23.475727 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:23 crc kubenswrapper[4933]: I0122 06:06:23.502593 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-67fd8f79cc-pb6vw" podStartSLOduration=2.502580333 podStartE2EDuration="2.502580333s" podCreationTimestamp="2026-01-22 06:06:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:06:23.500453881 +0000 UTC m=+1231.337579224" watchObservedRunningTime="2026-01-22 06:06:23.502580333 +0000 UTC m=+1231.339705686" Jan 22 06:06:23 crc kubenswrapper[4933]: I0122 06:06:23.523942 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5c9bd98c56-qnqgq"] Jan 22 06:06:23 crc kubenswrapper[4933]: I0122 06:06:23.531766 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-5c9bd98c56-qnqgq"] Jan 22 06:06:23 crc kubenswrapper[4933]: I0122 06:06:23.695388 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:23 crc kubenswrapper[4933]: I0122 06:06:23.695954 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerName="proxy-httpd" containerID="cri-o://4a1ff673babb55ae0c8b13a685ef7dc0941d53b2e94847d0a4636f75c60b8a64" gracePeriod=30 Jan 22 06:06:23 crc kubenswrapper[4933]: I0122 06:06:23.696368 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerName="sg-core" containerID="cri-o://940b655bb0c7de4fcc417570ee7c8fa73e8ae31c8c84741e9fc12144df9a7083" gracePeriod=30 Jan 22 06:06:23 crc kubenswrapper[4933]: I0122 06:06:23.696485 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerName="ceilometer-notification-agent" containerID="cri-o://18274df194c4d9c5bf0d3aee6ee63f64398c344ac19e30ce8f1aeb774060aae9" gracePeriod=30 Jan 22 06:06:23 crc kubenswrapper[4933]: I0122 06:06:23.700707 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerName="ceilometer-central-agent" containerID="cri-o://fcfa447ba938851d83bd12a97b48467f27657a61dfbaff803278e1ce781d42d5" gracePeriod=30 Jan 22 06:06:23 crc kubenswrapper[4933]: I0122 06:06:23.705362 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.160:3000/\": EOF" Jan 22 06:06:24 crc kubenswrapper[4933]: I0122 06:06:24.491015 4933 generic.go:334] "Generic (PLEG): container finished" podID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerID="4a1ff673babb55ae0c8b13a685ef7dc0941d53b2e94847d0a4636f75c60b8a64" exitCode=0 Jan 22 06:06:24 crc kubenswrapper[4933]: I0122 06:06:24.491595 4933 generic.go:334] "Generic (PLEG): container finished" podID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerID="940b655bb0c7de4fcc417570ee7c8fa73e8ae31c8c84741e9fc12144df9a7083" exitCode=2 Jan 22 06:06:24 crc kubenswrapper[4933]: I0122 06:06:24.491607 4933 generic.go:334] "Generic (PLEG): container finished" podID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerID="fcfa447ba938851d83bd12a97b48467f27657a61dfbaff803278e1ce781d42d5" exitCode=0 Jan 22 06:06:24 crc kubenswrapper[4933]: I0122 06:06:24.511030 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d683dcb-f19b-4f7c-a6e5-69ec3a93208b" path="/var/lib/kubelet/pods/2d683dcb-f19b-4f7c-a6e5-69ec3a93208b/volumes" Jan 22 06:06:24 crc kubenswrapper[4933]: I0122 06:06:24.512025 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a","Type":"ContainerDied","Data":"4a1ff673babb55ae0c8b13a685ef7dc0941d53b2e94847d0a4636f75c60b8a64"} Jan 22 06:06:24 crc kubenswrapper[4933]: I0122 06:06:24.512215 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a","Type":"ContainerDied","Data":"940b655bb0c7de4fcc417570ee7c8fa73e8ae31c8c84741e9fc12144df9a7083"} Jan 22 06:06:24 crc kubenswrapper[4933]: I0122 06:06:24.512299 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a","Type":"ContainerDied","Data":"fcfa447ba938851d83bd12a97b48467f27657a61dfbaff803278e1ce781d42d5"} Jan 22 06:06:25 crc kubenswrapper[4933]: I0122 06:06:25.501570 4933 generic.go:334] "Generic (PLEG): container finished" podID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerID="18274df194c4d9c5bf0d3aee6ee63f64398c344ac19e30ce8f1aeb774060aae9" exitCode=0 Jan 22 06:06:25 crc kubenswrapper[4933]: I0122 06:06:25.501918 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a","Type":"ContainerDied","Data":"18274df194c4d9c5bf0d3aee6ee63f64398c344ac19e30ce8f1aeb774060aae9"} Jan 22 06:06:27 crc kubenswrapper[4933]: I0122 06:06:27.128386 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:27 crc kubenswrapper[4933]: I0122 06:06:27.222884 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.691153 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.727996 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7rk2v\" (UniqueName: \"kubernetes.io/projected/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-kube-api-access-7rk2v\") pod \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.728148 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-config-data\") pod \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.728218 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-run-httpd\") pod \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.728253 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-sg-core-conf-yaml\") pod \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.728286 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-scripts\") pod \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.728316 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-log-httpd\") pod \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.728363 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-combined-ca-bundle\") pod \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\" (UID: \"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a\") " Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.733293 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-kube-api-access-7rk2v" (OuterVolumeSpecName: "kube-api-access-7rk2v") pod "ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" (UID: "ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a"). InnerVolumeSpecName "kube-api-access-7rk2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.735472 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" (UID: "ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.738632 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" (UID: "ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.746443 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-scripts" (OuterVolumeSpecName: "scripts") pod "ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" (UID: "ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.766432 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" (UID: "ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.830153 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7rk2v\" (UniqueName: \"kubernetes.io/projected/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-kube-api-access-7rk2v\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.830185 4933 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.830194 4933 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.830203 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.830211 4933 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.860155 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" (UID: "ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.877234 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-config-data" (OuterVolumeSpecName: "config-data") pod "ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" (UID: "ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.932312 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:30 crc kubenswrapper[4933]: I0122 06:06:30.932361 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.565951 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a","Type":"ContainerDied","Data":"d5804d8c32ca60e82b2c78ccf1929b1e73eb175e5c95007d7cb2d9ade0334748"} Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.566004 4933 scope.go:117] "RemoveContainer" containerID="4a1ff673babb55ae0c8b13a685ef7dc0941d53b2e94847d0a4636f75c60b8a64" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.566004 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.572257 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6","Type":"ContainerStarted","Data":"8bf7a48a2b547ef8e38d6ce658ae3a575bfebda764938fc766292bd884b6d211"} Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.591641 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.209817496 podStartE2EDuration="12.591618969s" podCreationTimestamp="2026-01-22 06:06:19 +0000 UTC" firstStartedPulling="2026-01-22 06:06:20.297185183 +0000 UTC m=+1228.134310536" lastFinishedPulling="2026-01-22 06:06:30.678986646 +0000 UTC m=+1238.516112009" observedRunningTime="2026-01-22 06:06:31.589800301 +0000 UTC m=+1239.426925664" watchObservedRunningTime="2026-01-22 06:06:31.591618969 +0000 UTC m=+1239.428744342" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.628062 4933 scope.go:117] "RemoveContainer" containerID="940b655bb0c7de4fcc417570ee7c8fa73e8ae31c8c84741e9fc12144df9a7083" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.631306 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.645841 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.652662 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:31 crc kubenswrapper[4933]: E0122 06:06:31.655697 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerName="ceilometer-notification-agent" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.655734 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerName="ceilometer-notification-agent" Jan 22 06:06:31 crc kubenswrapper[4933]: E0122 06:06:31.655755 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerName="sg-core" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.655765 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerName="sg-core" Jan 22 06:06:31 crc kubenswrapper[4933]: E0122 06:06:31.655777 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerName="ceilometer-central-agent" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.655786 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerName="ceilometer-central-agent" Jan 22 06:06:31 crc kubenswrapper[4933]: E0122 06:06:31.655801 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d683dcb-f19b-4f7c-a6e5-69ec3a93208b" containerName="barbican-api-log" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.655809 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d683dcb-f19b-4f7c-a6e5-69ec3a93208b" containerName="barbican-api-log" Jan 22 06:06:31 crc kubenswrapper[4933]: E0122 06:06:31.655855 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d683dcb-f19b-4f7c-a6e5-69ec3a93208b" containerName="barbican-api" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.655863 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d683dcb-f19b-4f7c-a6e5-69ec3a93208b" containerName="barbican-api" Jan 22 06:06:31 crc kubenswrapper[4933]: E0122 06:06:31.655885 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerName="proxy-httpd" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.655893 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerName="proxy-httpd" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.656206 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d683dcb-f19b-4f7c-a6e5-69ec3a93208b" containerName="barbican-api-log" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.656230 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerName="ceilometer-notification-agent" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.656247 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerName="ceilometer-central-agent" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.656261 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerName="sg-core" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.656276 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d683dcb-f19b-4f7c-a6e5-69ec3a93208b" containerName="barbican-api" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.656289 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerName="proxy-httpd" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.658235 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.659783 4933 scope.go:117] "RemoveContainer" containerID="18274df194c4d9c5bf0d3aee6ee63f64398c344ac19e30ce8f1aeb774060aae9" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.668521 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.668622 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.689024 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.706990 4933 scope.go:117] "RemoveContainer" containerID="fcfa447ba938851d83bd12a97b48467f27657a61dfbaff803278e1ce781d42d5" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.745883 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-config-data\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.746289 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.746372 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bd8j7\" (UniqueName: \"kubernetes.io/projected/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-kube-api-access-bd8j7\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.746440 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-scripts\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.746693 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-run-httpd\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.746743 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.746864 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-log-httpd\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.848902 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-config-data\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.848986 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.849106 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bd8j7\" (UniqueName: \"kubernetes.io/projected/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-kube-api-access-bd8j7\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.849222 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-scripts\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.849387 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-run-httpd\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.849447 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.849577 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-log-httpd\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.850236 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-run-httpd\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.850634 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-log-httpd\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.855984 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.858022 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-config-data\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.861140 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.875526 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-scripts\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.887506 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bd8j7\" (UniqueName: \"kubernetes.io/projected/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-kube-api-access-bd8j7\") pod \"ceilometer-0\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " pod="openstack/ceilometer-0" Jan 22 06:06:31 crc kubenswrapper[4933]: I0122 06:06:31.981424 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:06:32 crc kubenswrapper[4933]: I0122 06:06:32.121818 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:06:32 crc kubenswrapper[4933]: I0122 06:06:32.439111 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 06:06:32 crc kubenswrapper[4933]: I0122 06:06:32.439676 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a93245a9-0cb1-4b24-810e-ad085418a134" containerName="glance-log" containerID="cri-o://b9b670df062fab12248a75e5b85ce25fafa409f7873fbafd6c269b8edfd15364" gracePeriod=30 Jan 22 06:06:32 crc kubenswrapper[4933]: I0122 06:06:32.439735 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a93245a9-0cb1-4b24-810e-ad085418a134" containerName="glance-httpd" containerID="cri-o://0e3dae9acf13e4c101ed9038cebd3f5dfa8ae112611c63de76c052a41cacaa61" gracePeriod=30 Jan 22 06:06:32 crc kubenswrapper[4933]: W0122 06:06:32.487887 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c6ad13a_b4ab_4db3_a39f_b110c543d8fb.slice/crio-3706651b4df03e02a688cce2415c18d131d7f7f5f1199cce36acf8d9c366a892 WatchSource:0}: Error finding container 3706651b4df03e02a688cce2415c18d131d7f7f5f1199cce36acf8d9c366a892: Status 404 returned error can't find the container with id 3706651b4df03e02a688cce2415c18d131d7f7f5f1199cce36acf8d9c366a892 Jan 22 06:06:32 crc kubenswrapper[4933]: I0122 06:06:32.488695 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:32 crc kubenswrapper[4933]: I0122 06:06:32.501101 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" path="/var/lib/kubelet/pods/ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a/volumes" Jan 22 06:06:32 crc kubenswrapper[4933]: I0122 06:06:32.585020 4933 generic.go:334] "Generic (PLEG): container finished" podID="a93245a9-0cb1-4b24-810e-ad085418a134" containerID="b9b670df062fab12248a75e5b85ce25fafa409f7873fbafd6c269b8edfd15364" exitCode=143 Jan 22 06:06:32 crc kubenswrapper[4933]: I0122 06:06:32.585113 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a93245a9-0cb1-4b24-810e-ad085418a134","Type":"ContainerDied","Data":"b9b670df062fab12248a75e5b85ce25fafa409f7873fbafd6c269b8edfd15364"} Jan 22 06:06:32 crc kubenswrapper[4933]: I0122 06:06:32.586731 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb","Type":"ContainerStarted","Data":"3706651b4df03e02a688cce2415c18d131d7f7f5f1199cce36acf8d9c366a892"} Jan 22 06:06:33 crc kubenswrapper[4933]: I0122 06:06:33.205770 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:33 crc kubenswrapper[4933]: I0122 06:06:33.598586 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb","Type":"ContainerStarted","Data":"a2020de58eac7eef7181f4e5147562a2842d385a34d8e7eff987f562ab53c5fe"} Jan 22 06:06:34 crc kubenswrapper[4933]: I0122 06:06:34.608680 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb","Type":"ContainerStarted","Data":"f22b8ced7a10678e9471d69d9050a4dc0c3096a40104936c2b0ef31bef437ff0"} Jan 22 06:06:34 crc kubenswrapper[4933]: I0122 06:06:34.608962 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb","Type":"ContainerStarted","Data":"9cb6ffe51137f02eae74717eddeffb1853f3e9b9878a68197d6fb1c2315be8e7"} Jan 22 06:06:34 crc kubenswrapper[4933]: I0122 06:06:34.898366 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-8sz22"] Jan 22 06:06:34 crc kubenswrapper[4933]: I0122 06:06:34.900006 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-8sz22" Jan 22 06:06:34 crc kubenswrapper[4933]: I0122 06:06:34.911708 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-8sz22"] Jan 22 06:06:34 crc kubenswrapper[4933]: I0122 06:06:34.987702 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 06:06:34 crc kubenswrapper[4933]: I0122 06:06:34.990328 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f332d598-c2fa-4ed1-9b08-9245f85185af" containerName="glance-log" containerID="cri-o://460c308d9a9fca5408912df645075971fee07fee0e34840d93858c38da9642d9" gracePeriod=30 Jan 22 06:06:34 crc kubenswrapper[4933]: I0122 06:06:34.990430 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f332d598-c2fa-4ed1-9b08-9245f85185af" containerName="glance-httpd" containerID="cri-o://97322d5aa3a394dfde9104353fcf318859f9b7d5d72a2357a8c11f88a0684f57" gracePeriod=30 Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.010532 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-9cz7d"] Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.013123 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5tkp\" (UniqueName: \"kubernetes.io/projected/b01a1347-77f8-4f4a-b98b-862d96c7c55c-kube-api-access-h5tkp\") pod \"nova-api-db-create-8sz22\" (UID: \"b01a1347-77f8-4f4a-b98b-862d96c7c55c\") " pod="openstack/nova-api-db-create-8sz22" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.013358 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b01a1347-77f8-4f4a-b98b-862d96c7c55c-operator-scripts\") pod \"nova-api-db-create-8sz22\" (UID: \"b01a1347-77f8-4f4a-b98b-862d96c7c55c\") " pod="openstack/nova-api-db-create-8sz22" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.015756 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-9cz7d" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.021516 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-9cz7d"] Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.045139 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-b23f-account-create-update-sfkx5"] Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.056134 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b23f-account-create-update-sfkx5" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.059127 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.095945 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-b23f-account-create-update-sfkx5"] Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.132616 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8b9ck\" (UniqueName: \"kubernetes.io/projected/38d9042f-25f0-439a-9911-944297684f27-kube-api-access-8b9ck\") pod \"nova-cell0-db-create-9cz7d\" (UID: \"38d9042f-25f0-439a-9911-944297684f27\") " pod="openstack/nova-cell0-db-create-9cz7d" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.132678 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38d9042f-25f0-439a-9911-944297684f27-operator-scripts\") pod \"nova-cell0-db-create-9cz7d\" (UID: \"38d9042f-25f0-439a-9911-944297684f27\") " pod="openstack/nova-cell0-db-create-9cz7d" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.132744 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5tkp\" (UniqueName: \"kubernetes.io/projected/b01a1347-77f8-4f4a-b98b-862d96c7c55c-kube-api-access-h5tkp\") pod \"nova-api-db-create-8sz22\" (UID: \"b01a1347-77f8-4f4a-b98b-862d96c7c55c\") " pod="openstack/nova-api-db-create-8sz22" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.132762 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qmmj\" (UniqueName: \"kubernetes.io/projected/c15a0f2e-dcbe-4197-bdcd-f50425d09e80-kube-api-access-2qmmj\") pod \"nova-api-b23f-account-create-update-sfkx5\" (UID: \"c15a0f2e-dcbe-4197-bdcd-f50425d09e80\") " pod="openstack/nova-api-b23f-account-create-update-sfkx5" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.132799 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c15a0f2e-dcbe-4197-bdcd-f50425d09e80-operator-scripts\") pod \"nova-api-b23f-account-create-update-sfkx5\" (UID: \"c15a0f2e-dcbe-4197-bdcd-f50425d09e80\") " pod="openstack/nova-api-b23f-account-create-update-sfkx5" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.132841 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b01a1347-77f8-4f4a-b98b-862d96c7c55c-operator-scripts\") pod \"nova-api-db-create-8sz22\" (UID: \"b01a1347-77f8-4f4a-b98b-862d96c7c55c\") " pod="openstack/nova-api-db-create-8sz22" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.133617 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b01a1347-77f8-4f4a-b98b-862d96c7c55c-operator-scripts\") pod \"nova-api-db-create-8sz22\" (UID: \"b01a1347-77f8-4f4a-b98b-862d96c7c55c\") " pod="openstack/nova-api-db-create-8sz22" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.154626 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5tkp\" (UniqueName: \"kubernetes.io/projected/b01a1347-77f8-4f4a-b98b-862d96c7c55c-kube-api-access-h5tkp\") pod \"nova-api-db-create-8sz22\" (UID: \"b01a1347-77f8-4f4a-b98b-862d96c7c55c\") " pod="openstack/nova-api-db-create-8sz22" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.204656 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-dg7dg"] Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.206012 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dg7dg" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.218618 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-8sz22" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.218661 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-f8f6-account-create-update-4r24j"] Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.226383 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f8f6-account-create-update-4r24j" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.227386 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-dg7dg"] Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.231535 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.234378 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8b9ck\" (UniqueName: \"kubernetes.io/projected/38d9042f-25f0-439a-9911-944297684f27-kube-api-access-8b9ck\") pod \"nova-cell0-db-create-9cz7d\" (UID: \"38d9042f-25f0-439a-9911-944297684f27\") " pod="openstack/nova-cell0-db-create-9cz7d" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.234423 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrjvp\" (UniqueName: \"kubernetes.io/projected/b2d004aa-1c9d-428c-b3f3-851b50d53cc1-kube-api-access-lrjvp\") pod \"nova-cell0-f8f6-account-create-update-4r24j\" (UID: \"b2d004aa-1c9d-428c-b3f3-851b50d53cc1\") " pod="openstack/nova-cell0-f8f6-account-create-update-4r24j" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.234466 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38d9042f-25f0-439a-9911-944297684f27-operator-scripts\") pod \"nova-cell0-db-create-9cz7d\" (UID: \"38d9042f-25f0-439a-9911-944297684f27\") " pod="openstack/nova-cell0-db-create-9cz7d" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.234526 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lqkd\" (UniqueName: \"kubernetes.io/projected/6c1ba58d-aa1c-49e8-9975-319ff8cbdec5-kube-api-access-6lqkd\") pod \"nova-cell1-db-create-dg7dg\" (UID: \"6c1ba58d-aa1c-49e8-9975-319ff8cbdec5\") " pod="openstack/nova-cell1-db-create-dg7dg" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.234570 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qmmj\" (UniqueName: \"kubernetes.io/projected/c15a0f2e-dcbe-4197-bdcd-f50425d09e80-kube-api-access-2qmmj\") pod \"nova-api-b23f-account-create-update-sfkx5\" (UID: \"c15a0f2e-dcbe-4197-bdcd-f50425d09e80\") " pod="openstack/nova-api-b23f-account-create-update-sfkx5" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.234597 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2d004aa-1c9d-428c-b3f3-851b50d53cc1-operator-scripts\") pod \"nova-cell0-f8f6-account-create-update-4r24j\" (UID: \"b2d004aa-1c9d-428c-b3f3-851b50d53cc1\") " pod="openstack/nova-cell0-f8f6-account-create-update-4r24j" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.234641 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c15a0f2e-dcbe-4197-bdcd-f50425d09e80-operator-scripts\") pod \"nova-api-b23f-account-create-update-sfkx5\" (UID: \"c15a0f2e-dcbe-4197-bdcd-f50425d09e80\") " pod="openstack/nova-api-b23f-account-create-update-sfkx5" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.234700 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c1ba58d-aa1c-49e8-9975-319ff8cbdec5-operator-scripts\") pod \"nova-cell1-db-create-dg7dg\" (UID: \"6c1ba58d-aa1c-49e8-9975-319ff8cbdec5\") " pod="openstack/nova-cell1-db-create-dg7dg" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.237648 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38d9042f-25f0-439a-9911-944297684f27-operator-scripts\") pod \"nova-cell0-db-create-9cz7d\" (UID: \"38d9042f-25f0-439a-9911-944297684f27\") " pod="openstack/nova-cell0-db-create-9cz7d" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.238409 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c15a0f2e-dcbe-4197-bdcd-f50425d09e80-operator-scripts\") pod \"nova-api-b23f-account-create-update-sfkx5\" (UID: \"c15a0f2e-dcbe-4197-bdcd-f50425d09e80\") " pod="openstack/nova-api-b23f-account-create-update-sfkx5" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.238735 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-f8f6-account-create-update-4r24j"] Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.256670 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8b9ck\" (UniqueName: \"kubernetes.io/projected/38d9042f-25f0-439a-9911-944297684f27-kube-api-access-8b9ck\") pod \"nova-cell0-db-create-9cz7d\" (UID: \"38d9042f-25f0-439a-9911-944297684f27\") " pod="openstack/nova-cell0-db-create-9cz7d" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.278901 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qmmj\" (UniqueName: \"kubernetes.io/projected/c15a0f2e-dcbe-4197-bdcd-f50425d09e80-kube-api-access-2qmmj\") pod \"nova-api-b23f-account-create-update-sfkx5\" (UID: \"c15a0f2e-dcbe-4197-bdcd-f50425d09e80\") " pod="openstack/nova-api-b23f-account-create-update-sfkx5" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.336082 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c1ba58d-aa1c-49e8-9975-319ff8cbdec5-operator-scripts\") pod \"nova-cell1-db-create-dg7dg\" (UID: \"6c1ba58d-aa1c-49e8-9975-319ff8cbdec5\") " pod="openstack/nova-cell1-db-create-dg7dg" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.336467 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrjvp\" (UniqueName: \"kubernetes.io/projected/b2d004aa-1c9d-428c-b3f3-851b50d53cc1-kube-api-access-lrjvp\") pod \"nova-cell0-f8f6-account-create-update-4r24j\" (UID: \"b2d004aa-1c9d-428c-b3f3-851b50d53cc1\") " pod="openstack/nova-cell0-f8f6-account-create-update-4r24j" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.336524 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6lqkd\" (UniqueName: \"kubernetes.io/projected/6c1ba58d-aa1c-49e8-9975-319ff8cbdec5-kube-api-access-6lqkd\") pod \"nova-cell1-db-create-dg7dg\" (UID: \"6c1ba58d-aa1c-49e8-9975-319ff8cbdec5\") " pod="openstack/nova-cell1-db-create-dg7dg" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.336557 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2d004aa-1c9d-428c-b3f3-851b50d53cc1-operator-scripts\") pod \"nova-cell0-f8f6-account-create-update-4r24j\" (UID: \"b2d004aa-1c9d-428c-b3f3-851b50d53cc1\") " pod="openstack/nova-cell0-f8f6-account-create-update-4r24j" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.337441 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2d004aa-1c9d-428c-b3f3-851b50d53cc1-operator-scripts\") pod \"nova-cell0-f8f6-account-create-update-4r24j\" (UID: \"b2d004aa-1c9d-428c-b3f3-851b50d53cc1\") " pod="openstack/nova-cell0-f8f6-account-create-update-4r24j" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.338007 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c1ba58d-aa1c-49e8-9975-319ff8cbdec5-operator-scripts\") pod \"nova-cell1-db-create-dg7dg\" (UID: \"6c1ba58d-aa1c-49e8-9975-319ff8cbdec5\") " pod="openstack/nova-cell1-db-create-dg7dg" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.338301 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-9cz7d" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.355642 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lqkd\" (UniqueName: \"kubernetes.io/projected/6c1ba58d-aa1c-49e8-9975-319ff8cbdec5-kube-api-access-6lqkd\") pod \"nova-cell1-db-create-dg7dg\" (UID: \"6c1ba58d-aa1c-49e8-9975-319ff8cbdec5\") " pod="openstack/nova-cell1-db-create-dg7dg" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.357297 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrjvp\" (UniqueName: \"kubernetes.io/projected/b2d004aa-1c9d-428c-b3f3-851b50d53cc1-kube-api-access-lrjvp\") pod \"nova-cell0-f8f6-account-create-update-4r24j\" (UID: \"b2d004aa-1c9d-428c-b3f3-851b50d53cc1\") " pod="openstack/nova-cell0-f8f6-account-create-update-4r24j" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.401596 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b23f-account-create-update-sfkx5" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.402231 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-548e-account-create-update-k4rxd"] Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.403583 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-548e-account-create-update-k4rxd" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.414451 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.431650 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-548e-account-create-update-k4rxd"] Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.529913 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dg7dg" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.546131 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hfgh\" (UniqueName: \"kubernetes.io/projected/18be1708-2025-4b39-a74c-fe83cf4744ad-kube-api-access-6hfgh\") pod \"nova-cell1-548e-account-create-update-k4rxd\" (UID: \"18be1708-2025-4b39-a74c-fe83cf4744ad\") " pod="openstack/nova-cell1-548e-account-create-update-k4rxd" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.546206 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18be1708-2025-4b39-a74c-fe83cf4744ad-operator-scripts\") pod \"nova-cell1-548e-account-create-update-k4rxd\" (UID: \"18be1708-2025-4b39-a74c-fe83cf4744ad\") " pod="openstack/nova-cell1-548e-account-create-update-k4rxd" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.628678 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f8f6-account-create-update-4r24j" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.642773 4933 generic.go:334] "Generic (PLEG): container finished" podID="f332d598-c2fa-4ed1-9b08-9245f85185af" containerID="460c308d9a9fca5408912df645075971fee07fee0e34840d93858c38da9642d9" exitCode=143 Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.643091 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f332d598-c2fa-4ed1-9b08-9245f85185af","Type":"ContainerDied","Data":"460c308d9a9fca5408912df645075971fee07fee0e34840d93858c38da9642d9"} Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.648605 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hfgh\" (UniqueName: \"kubernetes.io/projected/18be1708-2025-4b39-a74c-fe83cf4744ad-kube-api-access-6hfgh\") pod \"nova-cell1-548e-account-create-update-k4rxd\" (UID: \"18be1708-2025-4b39-a74c-fe83cf4744ad\") " pod="openstack/nova-cell1-548e-account-create-update-k4rxd" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.648693 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18be1708-2025-4b39-a74c-fe83cf4744ad-operator-scripts\") pod \"nova-cell1-548e-account-create-update-k4rxd\" (UID: \"18be1708-2025-4b39-a74c-fe83cf4744ad\") " pod="openstack/nova-cell1-548e-account-create-update-k4rxd" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.649590 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18be1708-2025-4b39-a74c-fe83cf4744ad-operator-scripts\") pod \"nova-cell1-548e-account-create-update-k4rxd\" (UID: \"18be1708-2025-4b39-a74c-fe83cf4744ad\") " pod="openstack/nova-cell1-548e-account-create-update-k4rxd" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.674795 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hfgh\" (UniqueName: \"kubernetes.io/projected/18be1708-2025-4b39-a74c-fe83cf4744ad-kube-api-access-6hfgh\") pod \"nova-cell1-548e-account-create-update-k4rxd\" (UID: \"18be1708-2025-4b39-a74c-fe83cf4744ad\") " pod="openstack/nova-cell1-548e-account-create-update-k4rxd" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.741545 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-548e-account-create-update-k4rxd" Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.933282 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-8sz22"] Jan 22 06:06:35 crc kubenswrapper[4933]: I0122 06:06:35.995980 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.074472 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5fc9d4fd76-k4qh6"] Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.074679 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5fc9d4fd76-k4qh6" podUID="3945a3af-6972-419a-a60a-9f7b4b329fb1" containerName="neutron-api" containerID="cri-o://cfbad903f2c92b82331e9650e7037487d2a1ca46f3cf0f1c99591c5b68a30640" gracePeriod=30 Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.075422 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5fc9d4fd76-k4qh6" podUID="3945a3af-6972-419a-a60a-9f7b4b329fb1" containerName="neutron-httpd" containerID="cri-o://be9888b51e1450feb8c2ff1783a022dd5a5c8ddb8ef679f21b8c63b678dafbea" gracePeriod=30 Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.142521 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-9cz7d"] Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.150337 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-b23f-account-create-update-sfkx5"] Jan 22 06:06:36 crc kubenswrapper[4933]: W0122 06:06:36.202537 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc15a0f2e_dcbe_4197_bdcd_f50425d09e80.slice/crio-54faf9afa6832fba6fca040afdf93dc493f0cbb799a713c32a2bf46f42b677ce WatchSource:0}: Error finding container 54faf9afa6832fba6fca040afdf93dc493f0cbb799a713c32a2bf46f42b677ce: Status 404 returned error can't find the container with id 54faf9afa6832fba6fca040afdf93dc493f0cbb799a713c32a2bf46f42b677ce Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.341937 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-dg7dg"] Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.451770 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-548e-account-create-update-k4rxd"] Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.459480 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-f8f6-account-create-update-4r24j"] Jan 22 06:06:36 crc kubenswrapper[4933]: W0122 06:06:36.485886 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18be1708_2025_4b39_a74c_fe83cf4744ad.slice/crio-a5016697bd2e034d7a0e5838790cb03339e7b0b52a053d677ddd61bb7e9a6715 WatchSource:0}: Error finding container a5016697bd2e034d7a0e5838790cb03339e7b0b52a053d677ddd61bb7e9a6715: Status 404 returned error can't find the container with id a5016697bd2e034d7a0e5838790cb03339e7b0b52a053d677ddd61bb7e9a6715 Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.666353 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-856bccf57c-l2f82_6541a01b-555e-4734-8eb8-bc63625dd293/neutron-api/0.log" Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.666837 4933 generic.go:334] "Generic (PLEG): container finished" podID="6541a01b-555e-4734-8eb8-bc63625dd293" containerID="f4e82b1bcf25275851a5aef8022cd4bdd910f6f9ab17a41f7a84149b88a2abdc" exitCode=137 Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.666919 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-856bccf57c-l2f82" event={"ID":"6541a01b-555e-4734-8eb8-bc63625dd293","Type":"ContainerDied","Data":"f4e82b1bcf25275851a5aef8022cd4bdd910f6f9ab17a41f7a84149b88a2abdc"} Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.676374 4933 generic.go:334] "Generic (PLEG): container finished" podID="a93245a9-0cb1-4b24-810e-ad085418a134" containerID="0e3dae9acf13e4c101ed9038cebd3f5dfa8ae112611c63de76c052a41cacaa61" exitCode=0 Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.676419 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a93245a9-0cb1-4b24-810e-ad085418a134","Type":"ContainerDied","Data":"0e3dae9acf13e4c101ed9038cebd3f5dfa8ae112611c63de76c052a41cacaa61"} Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.681510 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dg7dg" event={"ID":"6c1ba58d-aa1c-49e8-9975-319ff8cbdec5","Type":"ContainerStarted","Data":"090e33e4d3567f753f608b0bfeb07bd49b6d7637884bebf9129dec720755e05d"} Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.681553 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dg7dg" event={"ID":"6c1ba58d-aa1c-49e8-9975-319ff8cbdec5","Type":"ContainerStarted","Data":"f402ce2d6885f4ecbc9bd9faaa00ad91b64ef9fd47bd97b0922934162ae103b8"} Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.685885 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b23f-account-create-update-sfkx5" event={"ID":"c15a0f2e-dcbe-4197-bdcd-f50425d09e80","Type":"ContainerStarted","Data":"91d388ea73a08e0801ca20093e51fe25125825118703baeb4840b396ba49b75c"} Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.685939 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b23f-account-create-update-sfkx5" event={"ID":"c15a0f2e-dcbe-4197-bdcd-f50425d09e80","Type":"ContainerStarted","Data":"54faf9afa6832fba6fca040afdf93dc493f0cbb799a713c32a2bf46f42b677ce"} Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.703582 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb","Type":"ContainerStarted","Data":"50d24875f78f8e422b947dad7b0bf36b02b0e1f1d85b4ea95011016a824be601"} Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.703780 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerName="proxy-httpd" containerID="cri-o://50d24875f78f8e422b947dad7b0bf36b02b0e1f1d85b4ea95011016a824be601" gracePeriod=30 Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.703799 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerName="ceilometer-central-agent" containerID="cri-o://a2020de58eac7eef7181f4e5147562a2842d385a34d8e7eff987f562ab53c5fe" gracePeriod=30 Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.703918 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.703975 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerName="ceilometer-notification-agent" containerID="cri-o://9cb6ffe51137f02eae74717eddeffb1853f3e9b9878a68197d6fb1c2315be8e7" gracePeriod=30 Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.704036 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerName="sg-core" containerID="cri-o://f22b8ced7a10678e9471d69d9050a4dc0c3096a40104936c2b0ef31bef437ff0" gracePeriod=30 Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.712104 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-dg7dg" podStartSLOduration=1.712063665 podStartE2EDuration="1.712063665s" podCreationTimestamp="2026-01-22 06:06:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:06:36.69919957 +0000 UTC m=+1244.536324923" watchObservedRunningTime="2026-01-22 06:06:36.712063665 +0000 UTC m=+1244.549189018" Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.721147 4933 generic.go:334] "Generic (PLEG): container finished" podID="b01a1347-77f8-4f4a-b98b-862d96c7c55c" containerID="3684c1a9364d58aa65ee2109cc86d4b56f3ea349100b97465ad0582a41b254fd" exitCode=0 Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.721239 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-8sz22" event={"ID":"b01a1347-77f8-4f4a-b98b-862d96c7c55c","Type":"ContainerDied","Data":"3684c1a9364d58aa65ee2109cc86d4b56f3ea349100b97465ad0582a41b254fd"} Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.721288 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-8sz22" event={"ID":"b01a1347-77f8-4f4a-b98b-862d96c7c55c","Type":"ContainerStarted","Data":"caa89398a177d32693976d310615efccf62bc8e462dbaed2f653359fdc0c31c8"} Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.722252 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-b23f-account-create-update-sfkx5" podStartSLOduration=1.722238924 podStartE2EDuration="1.722238924s" podCreationTimestamp="2026-01-22 06:06:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:06:36.721503099 +0000 UTC m=+1244.558628452" watchObservedRunningTime="2026-01-22 06:06:36.722238924 +0000 UTC m=+1244.559364277" Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.736238 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-856bccf57c-l2f82_6541a01b-555e-4734-8eb8-bc63625dd293/neutron-api/0.log" Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.736315 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.741519 4933 generic.go:334] "Generic (PLEG): container finished" podID="3945a3af-6972-419a-a60a-9f7b4b329fb1" containerID="be9888b51e1450feb8c2ff1783a022dd5a5c8ddb8ef679f21b8c63b678dafbea" exitCode=0 Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.741785 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fc9d4fd76-k4qh6" event={"ID":"3945a3af-6972-419a-a60a-9f7b4b329fb1","Type":"ContainerDied","Data":"be9888b51e1450feb8c2ff1783a022dd5a5c8ddb8ef679f21b8c63b678dafbea"} Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.742784 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f8f6-account-create-update-4r24j" event={"ID":"b2d004aa-1c9d-428c-b3f3-851b50d53cc1","Type":"ContainerStarted","Data":"2a6b9c543d1c23a98ebce8fa37aed6c33a2dac250046978c4374a04b9d596100"} Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.743543 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-548e-account-create-update-k4rxd" event={"ID":"18be1708-2025-4b39-a74c-fe83cf4744ad","Type":"ContainerStarted","Data":"a5016697bd2e034d7a0e5838790cb03339e7b0b52a053d677ddd61bb7e9a6715"} Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.745477 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-9cz7d" event={"ID":"38d9042f-25f0-439a-9911-944297684f27","Type":"ContainerStarted","Data":"48c61157f4530cf2b5b3b83a7a41b770aa5408744cfbbb089df03b144095d74c"} Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.745499 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-9cz7d" event={"ID":"38d9042f-25f0-439a-9911-944297684f27","Type":"ContainerStarted","Data":"42ca4055173a08cf3de67be53b86bf7674a836136a440190ce6d17ad71940e4c"} Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.751793 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.178289976 podStartE2EDuration="5.751777292s" podCreationTimestamp="2026-01-22 06:06:31 +0000 UTC" firstStartedPulling="2026-01-22 06:06:32.49011039 +0000 UTC m=+1240.327235743" lastFinishedPulling="2026-01-22 06:06:36.063597706 +0000 UTC m=+1243.900723059" observedRunningTime="2026-01-22 06:06:36.751347453 +0000 UTC m=+1244.588472806" watchObservedRunningTime="2026-01-22 06:06:36.751777292 +0000 UTC m=+1244.588902635" Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.783938 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-ovndb-tls-certs\") pod \"6541a01b-555e-4734-8eb8-bc63625dd293\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.784009 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-config\") pod \"6541a01b-555e-4734-8eb8-bc63625dd293\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.784044 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-combined-ca-bundle\") pod \"6541a01b-555e-4734-8eb8-bc63625dd293\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.784131 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-httpd-config\") pod \"6541a01b-555e-4734-8eb8-bc63625dd293\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.784173 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sdzjg\" (UniqueName: \"kubernetes.io/projected/6541a01b-555e-4734-8eb8-bc63625dd293-kube-api-access-sdzjg\") pod \"6541a01b-555e-4734-8eb8-bc63625dd293\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.784222 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-internal-tls-certs\") pod \"6541a01b-555e-4734-8eb8-bc63625dd293\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.784337 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-public-tls-certs\") pod \"6541a01b-555e-4734-8eb8-bc63625dd293\" (UID: \"6541a01b-555e-4734-8eb8-bc63625dd293\") " Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.812476 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "6541a01b-555e-4734-8eb8-bc63625dd293" (UID: "6541a01b-555e-4734-8eb8-bc63625dd293"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.827554 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6541a01b-555e-4734-8eb8-bc63625dd293-kube-api-access-sdzjg" (OuterVolumeSpecName: "kube-api-access-sdzjg") pod "6541a01b-555e-4734-8eb8-bc63625dd293" (UID: "6541a01b-555e-4734-8eb8-bc63625dd293"). InnerVolumeSpecName "kube-api-access-sdzjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.852431 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-9cz7d" podStartSLOduration=2.852406992 podStartE2EDuration="2.852406992s" podCreationTimestamp="2026-01-22 06:06:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:06:36.816043944 +0000 UTC m=+1244.653169297" watchObservedRunningTime="2026-01-22 06:06:36.852406992 +0000 UTC m=+1244.689532345" Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.886299 4933 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:36 crc kubenswrapper[4933]: I0122 06:06:36.886328 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sdzjg\" (UniqueName: \"kubernetes.io/projected/6541a01b-555e-4734-8eb8-bc63625dd293-kube-api-access-sdzjg\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.023569 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "6541a01b-555e-4734-8eb8-bc63625dd293" (UID: "6541a01b-555e-4734-8eb8-bc63625dd293"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.072226 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6541a01b-555e-4734-8eb8-bc63625dd293" (UID: "6541a01b-555e-4734-8eb8-bc63625dd293"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.092207 4933 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.092243 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.103249 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-config" (OuterVolumeSpecName: "config") pod "6541a01b-555e-4734-8eb8-bc63625dd293" (UID: "6541a01b-555e-4734-8eb8-bc63625dd293"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.116313 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "6541a01b-555e-4734-8eb8-bc63625dd293" (UID: "6541a01b-555e-4734-8eb8-bc63625dd293"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.120253 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "6541a01b-555e-4734-8eb8-bc63625dd293" (UID: "6541a01b-555e-4734-8eb8-bc63625dd293"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.194186 4933 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.194222 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.194232 4933 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6541a01b-555e-4734-8eb8-bc63625dd293-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.417640 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.498841 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"a93245a9-0cb1-4b24-810e-ad085418a134\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.498975 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a93245a9-0cb1-4b24-810e-ad085418a134-httpd-run\") pod \"a93245a9-0cb1-4b24-810e-ad085418a134\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.499152 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a93245a9-0cb1-4b24-810e-ad085418a134-logs\") pod \"a93245a9-0cb1-4b24-810e-ad085418a134\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.499222 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-combined-ca-bundle\") pod \"a93245a9-0cb1-4b24-810e-ad085418a134\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.499247 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-scripts\") pod \"a93245a9-0cb1-4b24-810e-ad085418a134\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.499286 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-config-data\") pod \"a93245a9-0cb1-4b24-810e-ad085418a134\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.499322 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q8dbd\" (UniqueName: \"kubernetes.io/projected/a93245a9-0cb1-4b24-810e-ad085418a134-kube-api-access-q8dbd\") pod \"a93245a9-0cb1-4b24-810e-ad085418a134\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.499379 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-public-tls-certs\") pod \"a93245a9-0cb1-4b24-810e-ad085418a134\" (UID: \"a93245a9-0cb1-4b24-810e-ad085418a134\") " Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.499894 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a93245a9-0cb1-4b24-810e-ad085418a134-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a93245a9-0cb1-4b24-810e-ad085418a134" (UID: "a93245a9-0cb1-4b24-810e-ad085418a134"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.506458 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a93245a9-0cb1-4b24-810e-ad085418a134-kube-api-access-q8dbd" (OuterVolumeSpecName: "kube-api-access-q8dbd") pod "a93245a9-0cb1-4b24-810e-ad085418a134" (UID: "a93245a9-0cb1-4b24-810e-ad085418a134"). InnerVolumeSpecName "kube-api-access-q8dbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.506730 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "a93245a9-0cb1-4b24-810e-ad085418a134" (UID: "a93245a9-0cb1-4b24-810e-ad085418a134"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.506738 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a93245a9-0cb1-4b24-810e-ad085418a134-logs" (OuterVolumeSpecName: "logs") pod "a93245a9-0cb1-4b24-810e-ad085418a134" (UID: "a93245a9-0cb1-4b24-810e-ad085418a134"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.541025 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-scripts" (OuterVolumeSpecName: "scripts") pod "a93245a9-0cb1-4b24-810e-ad085418a134" (UID: "a93245a9-0cb1-4b24-810e-ad085418a134"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.560431 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a93245a9-0cb1-4b24-810e-ad085418a134" (UID: "a93245a9-0cb1-4b24-810e-ad085418a134"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.576440 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-config-data" (OuterVolumeSpecName: "config-data") pod "a93245a9-0cb1-4b24-810e-ad085418a134" (UID: "a93245a9-0cb1-4b24-810e-ad085418a134"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.579323 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "a93245a9-0cb1-4b24-810e-ad085418a134" (UID: "a93245a9-0cb1-4b24-810e-ad085418a134"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.601672 4933 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.601725 4933 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.601738 4933 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a93245a9-0cb1-4b24-810e-ad085418a134-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.601750 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a93245a9-0cb1-4b24-810e-ad085418a134-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.601765 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.601776 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.601787 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a93245a9-0cb1-4b24-810e-ad085418a134-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.601799 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q8dbd\" (UniqueName: \"kubernetes.io/projected/a93245a9-0cb1-4b24-810e-ad085418a134-kube-api-access-q8dbd\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.629697 4933 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.703992 4933 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.757502 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a93245a9-0cb1-4b24-810e-ad085418a134","Type":"ContainerDied","Data":"75900f9e7c11ae16a5f72cd6dd5795ce7a97d5dbc045fafdda9c320a69d2551a"} Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.757561 4933 scope.go:117] "RemoveContainer" containerID="0e3dae9acf13e4c101ed9038cebd3f5dfa8ae112611c63de76c052a41cacaa61" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.757705 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.764746 4933 generic.go:334] "Generic (PLEG): container finished" podID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerID="50d24875f78f8e422b947dad7b0bf36b02b0e1f1d85b4ea95011016a824be601" exitCode=0 Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.764778 4933 generic.go:334] "Generic (PLEG): container finished" podID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerID="f22b8ced7a10678e9471d69d9050a4dc0c3096a40104936c2b0ef31bef437ff0" exitCode=2 Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.764788 4933 generic.go:334] "Generic (PLEG): container finished" podID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerID="9cb6ffe51137f02eae74717eddeffb1853f3e9b9878a68197d6fb1c2315be8e7" exitCode=0 Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.764806 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb","Type":"ContainerDied","Data":"50d24875f78f8e422b947dad7b0bf36b02b0e1f1d85b4ea95011016a824be601"} Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.764879 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb","Type":"ContainerDied","Data":"f22b8ced7a10678e9471d69d9050a4dc0c3096a40104936c2b0ef31bef437ff0"} Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.764893 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb","Type":"ContainerDied","Data":"9cb6ffe51137f02eae74717eddeffb1853f3e9b9878a68197d6fb1c2315be8e7"} Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.766867 4933 generic.go:334] "Generic (PLEG): container finished" podID="b2d004aa-1c9d-428c-b3f3-851b50d53cc1" containerID="156296118b1b88d53b2355e6b83afe711457599e35d08ad1921101045b462f90" exitCode=0 Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.766936 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f8f6-account-create-update-4r24j" event={"ID":"b2d004aa-1c9d-428c-b3f3-851b50d53cc1","Type":"ContainerDied","Data":"156296118b1b88d53b2355e6b83afe711457599e35d08ad1921101045b462f90"} Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.769587 4933 generic.go:334] "Generic (PLEG): container finished" podID="18be1708-2025-4b39-a74c-fe83cf4744ad" containerID="df7de060c1dad8894f50f45e424929c853494018875cf7b6b592ddd7bb3ce606" exitCode=0 Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.769649 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-548e-account-create-update-k4rxd" event={"ID":"18be1708-2025-4b39-a74c-fe83cf4744ad","Type":"ContainerDied","Data":"df7de060c1dad8894f50f45e424929c853494018875cf7b6b592ddd7bb3ce606"} Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.771654 4933 generic.go:334] "Generic (PLEG): container finished" podID="6c1ba58d-aa1c-49e8-9975-319ff8cbdec5" containerID="090e33e4d3567f753f608b0bfeb07bd49b6d7637884bebf9129dec720755e05d" exitCode=0 Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.771722 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dg7dg" event={"ID":"6c1ba58d-aa1c-49e8-9975-319ff8cbdec5","Type":"ContainerDied","Data":"090e33e4d3567f753f608b0bfeb07bd49b6d7637884bebf9129dec720755e05d"} Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.773711 4933 generic.go:334] "Generic (PLEG): container finished" podID="38d9042f-25f0-439a-9911-944297684f27" containerID="48c61157f4530cf2b5b3b83a7a41b770aa5408744cfbbb089df03b144095d74c" exitCode=0 Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.773754 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-9cz7d" event={"ID":"38d9042f-25f0-439a-9911-944297684f27","Type":"ContainerDied","Data":"48c61157f4530cf2b5b3b83a7a41b770aa5408744cfbbb089df03b144095d74c"} Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.776285 4933 generic.go:334] "Generic (PLEG): container finished" podID="c15a0f2e-dcbe-4197-bdcd-f50425d09e80" containerID="91d388ea73a08e0801ca20093e51fe25125825118703baeb4840b396ba49b75c" exitCode=0 Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.776354 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b23f-account-create-update-sfkx5" event={"ID":"c15a0f2e-dcbe-4197-bdcd-f50425d09e80","Type":"ContainerDied","Data":"91d388ea73a08e0801ca20093e51fe25125825118703baeb4840b396ba49b75c"} Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.778615 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-856bccf57c-l2f82_6541a01b-555e-4734-8eb8-bc63625dd293/neutron-api/0.log" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.778840 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-856bccf57c-l2f82" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.780253 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-856bccf57c-l2f82" event={"ID":"6541a01b-555e-4734-8eb8-bc63625dd293","Type":"ContainerDied","Data":"2fa8955d0a6794d8893c22c2a462aa4524ef9380b44afa5d456b1c2baf2e7fe6"} Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.861323 4933 scope.go:117] "RemoveContainer" containerID="b9b670df062fab12248a75e5b85ce25fafa409f7873fbafd6c269b8edfd15364" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.890140 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.911439 4933 scope.go:117] "RemoveContainer" containerID="57d4c641b72ffcca676596e412bcc902119cbf32a7b4fb39d8930b32680d0018" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.916995 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.944232 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-856bccf57c-l2f82"] Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.949544 4933 scope.go:117] "RemoveContainer" containerID="f4e82b1bcf25275851a5aef8022cd4bdd910f6f9ab17a41f7a84149b88a2abdc" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.961676 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-856bccf57c-l2f82"] Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.964946 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 06:06:37 crc kubenswrapper[4933]: E0122 06:06:37.965493 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a93245a9-0cb1-4b24-810e-ad085418a134" containerName="glance-log" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.965545 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a93245a9-0cb1-4b24-810e-ad085418a134" containerName="glance-log" Jan 22 06:06:37 crc kubenswrapper[4933]: E0122 06:06:37.965560 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a93245a9-0cb1-4b24-810e-ad085418a134" containerName="glance-httpd" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.965581 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a93245a9-0cb1-4b24-810e-ad085418a134" containerName="glance-httpd" Jan 22 06:06:37 crc kubenswrapper[4933]: E0122 06:06:37.965684 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6541a01b-555e-4734-8eb8-bc63625dd293" containerName="neutron-httpd" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.965693 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6541a01b-555e-4734-8eb8-bc63625dd293" containerName="neutron-httpd" Jan 22 06:06:37 crc kubenswrapper[4933]: E0122 06:06:37.965710 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6541a01b-555e-4734-8eb8-bc63625dd293" containerName="neutron-api" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.965716 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6541a01b-555e-4734-8eb8-bc63625dd293" containerName="neutron-api" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.965922 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6541a01b-555e-4734-8eb8-bc63625dd293" containerName="neutron-httpd" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.965936 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a93245a9-0cb1-4b24-810e-ad085418a134" containerName="glance-httpd" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.966012 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6541a01b-555e-4734-8eb8-bc63625dd293" containerName="neutron-api" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.966024 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a93245a9-0cb1-4b24-810e-ad085418a134" containerName="glance-log" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.967021 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.970734 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.970843 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 22 06:06:37 crc kubenswrapper[4933]: I0122 06:06:37.976794 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.123549 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-config-data\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.123618 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.123702 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.123800 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-scripts\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.123832 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b4c8b893-2e30-4273-bbec-7ff7efee686e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.123945 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clvhb\" (UniqueName: \"kubernetes.io/projected/b4c8b893-2e30-4273-bbec-7ff7efee686e-kube-api-access-clvhb\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.123987 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4c8b893-2e30-4273-bbec-7ff7efee686e-logs\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.124055 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.209735 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-8sz22" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.225603 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-scripts\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.225647 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b4c8b893-2e30-4273-bbec-7ff7efee686e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.225694 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clvhb\" (UniqueName: \"kubernetes.io/projected/b4c8b893-2e30-4273-bbec-7ff7efee686e-kube-api-access-clvhb\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.225718 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4c8b893-2e30-4273-bbec-7ff7efee686e-logs\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.225752 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.225781 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-config-data\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.225798 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.225838 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.226287 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.226473 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b4c8b893-2e30-4273-bbec-7ff7efee686e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.226863 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4c8b893-2e30-4273-bbec-7ff7efee686e-logs\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.230301 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-scripts\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.230712 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.231218 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-config-data\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.233815 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.249580 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clvhb\" (UniqueName: \"kubernetes.io/projected/b4c8b893-2e30-4273-bbec-7ff7efee686e-kube-api-access-clvhb\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.255963 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.310063 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.326624 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5tkp\" (UniqueName: \"kubernetes.io/projected/b01a1347-77f8-4f4a-b98b-862d96c7c55c-kube-api-access-h5tkp\") pod \"b01a1347-77f8-4f4a-b98b-862d96c7c55c\" (UID: \"b01a1347-77f8-4f4a-b98b-862d96c7c55c\") " Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.326794 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b01a1347-77f8-4f4a-b98b-862d96c7c55c-operator-scripts\") pod \"b01a1347-77f8-4f4a-b98b-862d96c7c55c\" (UID: \"b01a1347-77f8-4f4a-b98b-862d96c7c55c\") " Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.327348 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b01a1347-77f8-4f4a-b98b-862d96c7c55c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b01a1347-77f8-4f4a-b98b-862d96c7c55c" (UID: "b01a1347-77f8-4f4a-b98b-862d96c7c55c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.337248 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b01a1347-77f8-4f4a-b98b-862d96c7c55c-kube-api-access-h5tkp" (OuterVolumeSpecName: "kube-api-access-h5tkp") pod "b01a1347-77f8-4f4a-b98b-862d96c7c55c" (UID: "b01a1347-77f8-4f4a-b98b-862d96c7c55c"). InnerVolumeSpecName "kube-api-access-h5tkp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.428759 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5tkp\" (UniqueName: \"kubernetes.io/projected/b01a1347-77f8-4f4a-b98b-862d96c7c55c-kube-api-access-h5tkp\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.428794 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b01a1347-77f8-4f4a-b98b-862d96c7c55c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.500472 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6541a01b-555e-4734-8eb8-bc63625dd293" path="/var/lib/kubelet/pods/6541a01b-555e-4734-8eb8-bc63625dd293/volumes" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.501566 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a93245a9-0cb1-4b24-810e-ad085418a134" path="/var/lib/kubelet/pods/a93245a9-0cb1-4b24-810e-ad085418a134/volumes" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.807169 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-8sz22" event={"ID":"b01a1347-77f8-4f4a-b98b-862d96c7c55c","Type":"ContainerDied","Data":"caa89398a177d32693976d310615efccf62bc8e462dbaed2f653359fdc0c31c8"} Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.807218 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="caa89398a177d32693976d310615efccf62bc8e462dbaed2f653359fdc0c31c8" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.807281 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-8sz22" Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.813064 4933 generic.go:334] "Generic (PLEG): container finished" podID="f332d598-c2fa-4ed1-9b08-9245f85185af" containerID="97322d5aa3a394dfde9104353fcf318859f9b7d5d72a2357a8c11f88a0684f57" exitCode=0 Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.813357 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f332d598-c2fa-4ed1-9b08-9245f85185af","Type":"ContainerDied","Data":"97322d5aa3a394dfde9104353fcf318859f9b7d5d72a2357a8c11f88a0684f57"} Jan 22 06:06:38 crc kubenswrapper[4933]: I0122 06:06:38.837166 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 06:06:38 crc kubenswrapper[4933]: W0122 06:06:38.893773 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4c8b893_2e30_4273_bbec_7ff7efee686e.slice/crio-3daf3c9a01e2b97020adb3f2480965ebf20834cf860553b875b1f8a5e46cb9a5 WatchSource:0}: Error finding container 3daf3c9a01e2b97020adb3f2480965ebf20834cf860553b875b1f8a5e46cb9a5: Status 404 returned error can't find the container with id 3daf3c9a01e2b97020adb3f2480965ebf20834cf860553b875b1f8a5e46cb9a5 Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.068619 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.143586 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f332d598-c2fa-4ed1-9b08-9245f85185af-httpd-run\") pod \"f332d598-c2fa-4ed1-9b08-9245f85185af\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.143630 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"f332d598-c2fa-4ed1-9b08-9245f85185af\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.143701 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2x7z\" (UniqueName: \"kubernetes.io/projected/f332d598-c2fa-4ed1-9b08-9245f85185af-kube-api-access-z2x7z\") pod \"f332d598-c2fa-4ed1-9b08-9245f85185af\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.143790 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-scripts\") pod \"f332d598-c2fa-4ed1-9b08-9245f85185af\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.143822 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f332d598-c2fa-4ed1-9b08-9245f85185af-logs\") pod \"f332d598-c2fa-4ed1-9b08-9245f85185af\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.143841 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-config-data\") pod \"f332d598-c2fa-4ed1-9b08-9245f85185af\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.143933 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-combined-ca-bundle\") pod \"f332d598-c2fa-4ed1-9b08-9245f85185af\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.143961 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-internal-tls-certs\") pod \"f332d598-c2fa-4ed1-9b08-9245f85185af\" (UID: \"f332d598-c2fa-4ed1-9b08-9245f85185af\") " Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.150389 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f332d598-c2fa-4ed1-9b08-9245f85185af-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f332d598-c2fa-4ed1-9b08-9245f85185af" (UID: "f332d598-c2fa-4ed1-9b08-9245f85185af"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.151703 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f332d598-c2fa-4ed1-9b08-9245f85185af-logs" (OuterVolumeSpecName: "logs") pod "f332d598-c2fa-4ed1-9b08-9245f85185af" (UID: "f332d598-c2fa-4ed1-9b08-9245f85185af"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.154713 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "f332d598-c2fa-4ed1-9b08-9245f85185af" (UID: "f332d598-c2fa-4ed1-9b08-9245f85185af"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.161738 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-scripts" (OuterVolumeSpecName: "scripts") pod "f332d598-c2fa-4ed1-9b08-9245f85185af" (UID: "f332d598-c2fa-4ed1-9b08-9245f85185af"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.163204 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f332d598-c2fa-4ed1-9b08-9245f85185af-kube-api-access-z2x7z" (OuterVolumeSpecName: "kube-api-access-z2x7z") pod "f332d598-c2fa-4ed1-9b08-9245f85185af" (UID: "f332d598-c2fa-4ed1-9b08-9245f85185af"). InnerVolumeSpecName "kube-api-access-z2x7z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.178408 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f332d598-c2fa-4ed1-9b08-9245f85185af" (UID: "f332d598-c2fa-4ed1-9b08-9245f85185af"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.254001 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b23f-account-create-update-sfkx5" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.270145 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "f332d598-c2fa-4ed1-9b08-9245f85185af" (UID: "f332d598-c2fa-4ed1-9b08-9245f85185af"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.270833 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2x7z\" (UniqueName: \"kubernetes.io/projected/f332d598-c2fa-4ed1-9b08-9245f85185af-kube-api-access-z2x7z\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.270865 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.270876 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f332d598-c2fa-4ed1-9b08-9245f85185af-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.270888 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.270898 4933 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.270909 4933 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f332d598-c2fa-4ed1-9b08-9245f85185af-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.270961 4933 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.272023 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-config-data" (OuterVolumeSpecName: "config-data") pod "f332d598-c2fa-4ed1-9b08-9245f85185af" (UID: "f332d598-c2fa-4ed1-9b08-9245f85185af"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.316765 4933 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.373176 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qmmj\" (UniqueName: \"kubernetes.io/projected/c15a0f2e-dcbe-4197-bdcd-f50425d09e80-kube-api-access-2qmmj\") pod \"c15a0f2e-dcbe-4197-bdcd-f50425d09e80\" (UID: \"c15a0f2e-dcbe-4197-bdcd-f50425d09e80\") " Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.373399 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c15a0f2e-dcbe-4197-bdcd-f50425d09e80-operator-scripts\") pod \"c15a0f2e-dcbe-4197-bdcd-f50425d09e80\" (UID: \"c15a0f2e-dcbe-4197-bdcd-f50425d09e80\") " Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.373797 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f332d598-c2fa-4ed1-9b08-9245f85185af-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.373810 4933 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.374499 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c15a0f2e-dcbe-4197-bdcd-f50425d09e80-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c15a0f2e-dcbe-4197-bdcd-f50425d09e80" (UID: "c15a0f2e-dcbe-4197-bdcd-f50425d09e80"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.384557 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c15a0f2e-dcbe-4197-bdcd-f50425d09e80-kube-api-access-2qmmj" (OuterVolumeSpecName: "kube-api-access-2qmmj") pod "c15a0f2e-dcbe-4197-bdcd-f50425d09e80" (UID: "c15a0f2e-dcbe-4197-bdcd-f50425d09e80"). InnerVolumeSpecName "kube-api-access-2qmmj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.475741 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c15a0f2e-dcbe-4197-bdcd-f50425d09e80-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.475766 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qmmj\" (UniqueName: \"kubernetes.io/projected/c15a0f2e-dcbe-4197-bdcd-f50425d09e80-kube-api-access-2qmmj\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.478359 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f8f6-account-create-update-4r24j" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.484667 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-9cz7d" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.504301 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-548e-account-create-update-k4rxd" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.518268 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dg7dg" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.576876 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2d004aa-1c9d-428c-b3f3-851b50d53cc1-operator-scripts\") pod \"b2d004aa-1c9d-428c-b3f3-851b50d53cc1\" (UID: \"b2d004aa-1c9d-428c-b3f3-851b50d53cc1\") " Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.577120 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrjvp\" (UniqueName: \"kubernetes.io/projected/b2d004aa-1c9d-428c-b3f3-851b50d53cc1-kube-api-access-lrjvp\") pod \"b2d004aa-1c9d-428c-b3f3-851b50d53cc1\" (UID: \"b2d004aa-1c9d-428c-b3f3-851b50d53cc1\") " Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.578741 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2d004aa-1c9d-428c-b3f3-851b50d53cc1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b2d004aa-1c9d-428c-b3f3-851b50d53cc1" (UID: "b2d004aa-1c9d-428c-b3f3-851b50d53cc1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.583179 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2d004aa-1c9d-428c-b3f3-851b50d53cc1-kube-api-access-lrjvp" (OuterVolumeSpecName: "kube-api-access-lrjvp") pod "b2d004aa-1c9d-428c-b3f3-851b50d53cc1" (UID: "b2d004aa-1c9d-428c-b3f3-851b50d53cc1"). InnerVolumeSpecName "kube-api-access-lrjvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.678573 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c1ba58d-aa1c-49e8-9975-319ff8cbdec5-operator-scripts\") pod \"6c1ba58d-aa1c-49e8-9975-319ff8cbdec5\" (UID: \"6c1ba58d-aa1c-49e8-9975-319ff8cbdec5\") " Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.678708 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18be1708-2025-4b39-a74c-fe83cf4744ad-operator-scripts\") pod \"18be1708-2025-4b39-a74c-fe83cf4744ad\" (UID: \"18be1708-2025-4b39-a74c-fe83cf4744ad\") " Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.678734 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38d9042f-25f0-439a-9911-944297684f27-operator-scripts\") pod \"38d9042f-25f0-439a-9911-944297684f27\" (UID: \"38d9042f-25f0-439a-9911-944297684f27\") " Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.678755 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hfgh\" (UniqueName: \"kubernetes.io/projected/18be1708-2025-4b39-a74c-fe83cf4744ad-kube-api-access-6hfgh\") pod \"18be1708-2025-4b39-a74c-fe83cf4744ad\" (UID: \"18be1708-2025-4b39-a74c-fe83cf4744ad\") " Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.678788 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8b9ck\" (UniqueName: \"kubernetes.io/projected/38d9042f-25f0-439a-9911-944297684f27-kube-api-access-8b9ck\") pod \"38d9042f-25f0-439a-9911-944297684f27\" (UID: \"38d9042f-25f0-439a-9911-944297684f27\") " Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.678835 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6lqkd\" (UniqueName: \"kubernetes.io/projected/6c1ba58d-aa1c-49e8-9975-319ff8cbdec5-kube-api-access-6lqkd\") pod \"6c1ba58d-aa1c-49e8-9975-319ff8cbdec5\" (UID: \"6c1ba58d-aa1c-49e8-9975-319ff8cbdec5\") " Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.679324 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrjvp\" (UniqueName: \"kubernetes.io/projected/b2d004aa-1c9d-428c-b3f3-851b50d53cc1-kube-api-access-lrjvp\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.679338 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2d004aa-1c9d-428c-b3f3-851b50d53cc1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.680273 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c1ba58d-aa1c-49e8-9975-319ff8cbdec5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6c1ba58d-aa1c-49e8-9975-319ff8cbdec5" (UID: "6c1ba58d-aa1c-49e8-9975-319ff8cbdec5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.680458 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/18be1708-2025-4b39-a74c-fe83cf4744ad-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "18be1708-2025-4b39-a74c-fe83cf4744ad" (UID: "18be1708-2025-4b39-a74c-fe83cf4744ad"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.680654 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38d9042f-25f0-439a-9911-944297684f27-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "38d9042f-25f0-439a-9911-944297684f27" (UID: "38d9042f-25f0-439a-9911-944297684f27"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.685809 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c1ba58d-aa1c-49e8-9975-319ff8cbdec5-kube-api-access-6lqkd" (OuterVolumeSpecName: "kube-api-access-6lqkd") pod "6c1ba58d-aa1c-49e8-9975-319ff8cbdec5" (UID: "6c1ba58d-aa1c-49e8-9975-319ff8cbdec5"). InnerVolumeSpecName "kube-api-access-6lqkd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.685844 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18be1708-2025-4b39-a74c-fe83cf4744ad-kube-api-access-6hfgh" (OuterVolumeSpecName: "kube-api-access-6hfgh") pod "18be1708-2025-4b39-a74c-fe83cf4744ad" (UID: "18be1708-2025-4b39-a74c-fe83cf4744ad"). InnerVolumeSpecName "kube-api-access-6hfgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.686869 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38d9042f-25f0-439a-9911-944297684f27-kube-api-access-8b9ck" (OuterVolumeSpecName: "kube-api-access-8b9ck") pod "38d9042f-25f0-439a-9911-944297684f27" (UID: "38d9042f-25f0-439a-9911-944297684f27"). InnerVolumeSpecName "kube-api-access-8b9ck". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.781416 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c1ba58d-aa1c-49e8-9975-319ff8cbdec5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.781446 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18be1708-2025-4b39-a74c-fe83cf4744ad-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.781456 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38d9042f-25f0-439a-9911-944297684f27-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.781465 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hfgh\" (UniqueName: \"kubernetes.io/projected/18be1708-2025-4b39-a74c-fe83cf4744ad-kube-api-access-6hfgh\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.781475 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8b9ck\" (UniqueName: \"kubernetes.io/projected/38d9042f-25f0-439a-9911-944297684f27-kube-api-access-8b9ck\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.781485 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6lqkd\" (UniqueName: \"kubernetes.io/projected/6c1ba58d-aa1c-49e8-9975-319ff8cbdec5-kube-api-access-6lqkd\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.827118 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f332d598-c2fa-4ed1-9b08-9245f85185af","Type":"ContainerDied","Data":"7b11e623a7157361cfaeb2a2b55d3c72f3c573ad4d40fcc2992cb088ba1d8711"} Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.827189 4933 scope.go:117] "RemoveContainer" containerID="97322d5aa3a394dfde9104353fcf318859f9b7d5d72a2357a8c11f88a0684f57" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.827435 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.837175 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-9cz7d" event={"ID":"38d9042f-25f0-439a-9911-944297684f27","Type":"ContainerDied","Data":"42ca4055173a08cf3de67be53b86bf7674a836136a440190ce6d17ad71940e4c"} Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.837218 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="42ca4055173a08cf3de67be53b86bf7674a836136a440190ce6d17ad71940e4c" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.837292 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-9cz7d" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.841229 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b23f-account-create-update-sfkx5" event={"ID":"c15a0f2e-dcbe-4197-bdcd-f50425d09e80","Type":"ContainerDied","Data":"54faf9afa6832fba6fca040afdf93dc493f0cbb799a713c32a2bf46f42b677ce"} Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.841287 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54faf9afa6832fba6fca040afdf93dc493f0cbb799a713c32a2bf46f42b677ce" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.841368 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b23f-account-create-update-sfkx5" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.857676 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b4c8b893-2e30-4273-bbec-7ff7efee686e","Type":"ContainerStarted","Data":"99672693509f4dfe6f517b698528ee0345ec420a893d4e6ab3cda20b8fb397ea"} Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.857725 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b4c8b893-2e30-4273-bbec-7ff7efee686e","Type":"ContainerStarted","Data":"3daf3c9a01e2b97020adb3f2480965ebf20834cf860553b875b1f8a5e46cb9a5"} Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.860757 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f8f6-account-create-update-4r24j" event={"ID":"b2d004aa-1c9d-428c-b3f3-851b50d53cc1","Type":"ContainerDied","Data":"2a6b9c543d1c23a98ebce8fa37aed6c33a2dac250046978c4374a04b9d596100"} Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.860790 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2a6b9c543d1c23a98ebce8fa37aed6c33a2dac250046978c4374a04b9d596100" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.860851 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f8f6-account-create-update-4r24j" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.873833 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-548e-account-create-update-k4rxd" event={"ID":"18be1708-2025-4b39-a74c-fe83cf4744ad","Type":"ContainerDied","Data":"a5016697bd2e034d7a0e5838790cb03339e7b0b52a053d677ddd61bb7e9a6715"} Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.873892 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a5016697bd2e034d7a0e5838790cb03339e7b0b52a053d677ddd61bb7e9a6715" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.873855 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-548e-account-create-update-k4rxd" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.876590 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-dg7dg" event={"ID":"6c1ba58d-aa1c-49e8-9975-319ff8cbdec5","Type":"ContainerDied","Data":"f402ce2d6885f4ecbc9bd9faaa00ad91b64ef9fd47bd97b0922934162ae103b8"} Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.876633 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f402ce2d6885f4ecbc9bd9faaa00ad91b64ef9fd47bd97b0922934162ae103b8" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.876705 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-dg7dg" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.914889 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.932704 4933 scope.go:117] "RemoveContainer" containerID="460c308d9a9fca5408912df645075971fee07fee0e34840d93858c38da9642d9" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.944771 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.959873 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 06:06:39 crc kubenswrapper[4933]: E0122 06:06:39.960250 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b01a1347-77f8-4f4a-b98b-862d96c7c55c" containerName="mariadb-database-create" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.960266 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="b01a1347-77f8-4f4a-b98b-862d96c7c55c" containerName="mariadb-database-create" Jan 22 06:06:39 crc kubenswrapper[4933]: E0122 06:06:39.960295 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c15a0f2e-dcbe-4197-bdcd-f50425d09e80" containerName="mariadb-account-create-update" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.960302 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c15a0f2e-dcbe-4197-bdcd-f50425d09e80" containerName="mariadb-account-create-update" Jan 22 06:06:39 crc kubenswrapper[4933]: E0122 06:06:39.960315 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c1ba58d-aa1c-49e8-9975-319ff8cbdec5" containerName="mariadb-database-create" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.960321 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c1ba58d-aa1c-49e8-9975-319ff8cbdec5" containerName="mariadb-database-create" Jan 22 06:06:39 crc kubenswrapper[4933]: E0122 06:06:39.960330 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2d004aa-1c9d-428c-b3f3-851b50d53cc1" containerName="mariadb-account-create-update" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.960335 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2d004aa-1c9d-428c-b3f3-851b50d53cc1" containerName="mariadb-account-create-update" Jan 22 06:06:39 crc kubenswrapper[4933]: E0122 06:06:39.960346 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f332d598-c2fa-4ed1-9b08-9245f85185af" containerName="glance-log" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.960351 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f332d598-c2fa-4ed1-9b08-9245f85185af" containerName="glance-log" Jan 22 06:06:39 crc kubenswrapper[4933]: E0122 06:06:39.960364 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38d9042f-25f0-439a-9911-944297684f27" containerName="mariadb-database-create" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.960372 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="38d9042f-25f0-439a-9911-944297684f27" containerName="mariadb-database-create" Jan 22 06:06:39 crc kubenswrapper[4933]: E0122 06:06:39.960383 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f332d598-c2fa-4ed1-9b08-9245f85185af" containerName="glance-httpd" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.960390 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f332d598-c2fa-4ed1-9b08-9245f85185af" containerName="glance-httpd" Jan 22 06:06:39 crc kubenswrapper[4933]: E0122 06:06:39.960399 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18be1708-2025-4b39-a74c-fe83cf4744ad" containerName="mariadb-account-create-update" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.960404 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="18be1708-2025-4b39-a74c-fe83cf4744ad" containerName="mariadb-account-create-update" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.960544 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f332d598-c2fa-4ed1-9b08-9245f85185af" containerName="glance-httpd" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.960557 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2d004aa-1c9d-428c-b3f3-851b50d53cc1" containerName="mariadb-account-create-update" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.960568 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f332d598-c2fa-4ed1-9b08-9245f85185af" containerName="glance-log" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.960581 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="b01a1347-77f8-4f4a-b98b-862d96c7c55c" containerName="mariadb-database-create" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.960593 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="38d9042f-25f0-439a-9911-944297684f27" containerName="mariadb-database-create" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.960603 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="18be1708-2025-4b39-a74c-fe83cf4744ad" containerName="mariadb-account-create-update" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.960612 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="c15a0f2e-dcbe-4197-bdcd-f50425d09e80" containerName="mariadb-account-create-update" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.960626 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c1ba58d-aa1c-49e8-9975-319ff8cbdec5" containerName="mariadb-database-create" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.961451 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.966043 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.966334 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 22 06:06:39 crc kubenswrapper[4933]: I0122 06:06:39.969470 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.086086 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.086161 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9z6f\" (UniqueName: \"kubernetes.io/projected/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-kube-api-access-x9z6f\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.086362 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-logs\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.086450 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.086598 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.086692 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.086976 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.087042 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.190311 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9z6f\" (UniqueName: \"kubernetes.io/projected/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-kube-api-access-x9z6f\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.190367 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-logs\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.190391 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.190423 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.190449 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.190552 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.190575 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.190615 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.191032 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.191379 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.194409 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-logs\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.203062 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.204350 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.208809 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.215260 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.215700 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9z6f\" (UniqueName: \"kubernetes.io/projected/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-kube-api-access-x9z6f\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.244947 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.287740 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.519564 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f332d598-c2fa-4ed1-9b08-9245f85185af" path="/var/lib/kubelet/pods/f332d598-c2fa-4ed1-9b08-9245f85185af/volumes" Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.807940 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.891624 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a2bcbc4b-30c4-4ec8-81bf-6cba18171506","Type":"ContainerStarted","Data":"aee258747d2f542abd72648829ad18150e8f7017099d71ed9dd21d17eb70bd91"} Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.897659 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b4c8b893-2e30-4273-bbec-7ff7efee686e","Type":"ContainerStarted","Data":"80ec94b4b71bb2bb6fa66b1913564ba57af12174724640c7e8888c5d410e87a8"} Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.944568 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:06:40 crc kubenswrapper[4933]: I0122 06:06:40.944939 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:06:41 crc kubenswrapper[4933]: E0122 06:06:41.751071 4933 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3945a3af_6972_419a_a60a_9f7b4b329fb1.slice/crio-cfbad903f2c92b82331e9650e7037487d2a1ca46f3cf0f1c99591c5b68a30640.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3945a3af_6972_419a_a60a_9f7b4b329fb1.slice/crio-conmon-cfbad903f2c92b82331e9650e7037487d2a1ca46f3cf0f1c99591c5b68a30640.scope\": RecentStats: unable to find data in memory cache]" Jan 22 06:06:41 crc kubenswrapper[4933]: I0122 06:06:41.917214 4933 generic.go:334] "Generic (PLEG): container finished" podID="3945a3af-6972-419a-a60a-9f7b4b329fb1" containerID="cfbad903f2c92b82331e9650e7037487d2a1ca46f3cf0f1c99591c5b68a30640" exitCode=0 Jan 22 06:06:41 crc kubenswrapper[4933]: I0122 06:06:41.917282 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fc9d4fd76-k4qh6" event={"ID":"3945a3af-6972-419a-a60a-9f7b4b329fb1","Type":"ContainerDied","Data":"cfbad903f2c92b82331e9650e7037487d2a1ca46f3cf0f1c99591c5b68a30640"} Jan 22 06:06:41 crc kubenswrapper[4933]: I0122 06:06:41.918406 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a2bcbc4b-30c4-4ec8-81bf-6cba18171506","Type":"ContainerStarted","Data":"ec11285d9cdded033c8043b14c5171616e5845163e0327ea1a11b2d67c958235"} Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.175237 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.216681 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.216662063 podStartE2EDuration="5.216662063s" podCreationTimestamp="2026-01-22 06:06:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:06:40.927361392 +0000 UTC m=+1248.764486755" watchObservedRunningTime="2026-01-22 06:06:42.216662063 +0000 UTC m=+1250.053787416" Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.326435 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-config\") pod \"3945a3af-6972-419a-a60a-9f7b4b329fb1\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.326563 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-httpd-config\") pod \"3945a3af-6972-419a-a60a-9f7b4b329fb1\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.326675 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-ovndb-tls-certs\") pod \"3945a3af-6972-419a-a60a-9f7b4b329fb1\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.326767 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-combined-ca-bundle\") pod \"3945a3af-6972-419a-a60a-9f7b4b329fb1\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.326824 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nv4ln\" (UniqueName: \"kubernetes.io/projected/3945a3af-6972-419a-a60a-9f7b4b329fb1-kube-api-access-nv4ln\") pod \"3945a3af-6972-419a-a60a-9f7b4b329fb1\" (UID: \"3945a3af-6972-419a-a60a-9f7b4b329fb1\") " Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.332527 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3945a3af-6972-419a-a60a-9f7b4b329fb1-kube-api-access-nv4ln" (OuterVolumeSpecName: "kube-api-access-nv4ln") pod "3945a3af-6972-419a-a60a-9f7b4b329fb1" (UID: "3945a3af-6972-419a-a60a-9f7b4b329fb1"). InnerVolumeSpecName "kube-api-access-nv4ln". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.332621 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "3945a3af-6972-419a-a60a-9f7b4b329fb1" (UID: "3945a3af-6972-419a-a60a-9f7b4b329fb1"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.386107 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-config" (OuterVolumeSpecName: "config") pod "3945a3af-6972-419a-a60a-9f7b4b329fb1" (UID: "3945a3af-6972-419a-a60a-9f7b4b329fb1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.404920 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3945a3af-6972-419a-a60a-9f7b4b329fb1" (UID: "3945a3af-6972-419a-a60a-9f7b4b329fb1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.414270 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "3945a3af-6972-419a-a60a-9f7b4b329fb1" (UID: "3945a3af-6972-419a-a60a-9f7b4b329fb1"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.428304 4933 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.428333 4933 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.428342 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.428351 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nv4ln\" (UniqueName: \"kubernetes.io/projected/3945a3af-6972-419a-a60a-9f7b4b329fb1-kube-api-access-nv4ln\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.428360 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/3945a3af-6972-419a-a60a-9f7b4b329fb1-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.929357 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a2bcbc4b-30c4-4ec8-81bf-6cba18171506","Type":"ContainerStarted","Data":"7e0168931d199a81b487119803e1944a792da4293cd792448c0c7c6cb8c0b855"} Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.930569 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5fc9d4fd76-k4qh6" event={"ID":"3945a3af-6972-419a-a60a-9f7b4b329fb1","Type":"ContainerDied","Data":"86ac22ff6bcdaec6471813af0798064720fa8800080b411bbc4377f4965630e8"} Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.930602 4933 scope.go:117] "RemoveContainer" containerID="be9888b51e1450feb8c2ff1783a022dd5a5c8ddb8ef679f21b8c63b678dafbea" Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.930701 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5fc9d4fd76-k4qh6" Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.955924 4933 scope.go:117] "RemoveContainer" containerID="cfbad903f2c92b82331e9650e7037487d2a1ca46f3cf0f1c99591c5b68a30640" Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.962842 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.9628223609999997 podStartE2EDuration="3.962822361s" podCreationTimestamp="2026-01-22 06:06:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:06:42.957870959 +0000 UTC m=+1250.794996332" watchObservedRunningTime="2026-01-22 06:06:42.962822361 +0000 UTC m=+1250.799947714" Jan 22 06:06:42 crc kubenswrapper[4933]: I0122 06:06:42.997148 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5fc9d4fd76-k4qh6"] Jan 22 06:06:43 crc kubenswrapper[4933]: I0122 06:06:43.009654 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5fc9d4fd76-k4qh6"] Jan 22 06:06:44 crc kubenswrapper[4933]: I0122 06:06:44.500584 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3945a3af-6972-419a-a60a-9f7b4b329fb1" path="/var/lib/kubelet/pods/3945a3af-6972-419a-a60a-9f7b4b329fb1/volumes" Jan 22 06:06:44 crc kubenswrapper[4933]: I0122 06:06:44.963653 4933 generic.go:334] "Generic (PLEG): container finished" podID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerID="a2020de58eac7eef7181f4e5147562a2842d385a34d8e7eff987f562ab53c5fe" exitCode=0 Jan 22 06:06:44 crc kubenswrapper[4933]: I0122 06:06:44.963737 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb","Type":"ContainerDied","Data":"a2020de58eac7eef7181f4e5147562a2842d385a34d8e7eff987f562ab53c5fe"} Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.119597 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.172413 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-log-httpd\") pod \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.172461 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-sg-core-conf-yaml\") pod \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.172535 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-combined-ca-bundle\") pod \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.172553 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-run-httpd\") pod \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.172653 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-config-data\") pod \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.172677 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bd8j7\" (UniqueName: \"kubernetes.io/projected/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-kube-api-access-bd8j7\") pod \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.172704 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-scripts\") pod \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\" (UID: \"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb\") " Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.173357 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" (UID: "4c6ad13a-b4ab-4db3-a39f-b110c543d8fb"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.173589 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" (UID: "4c6ad13a-b4ab-4db3-a39f-b110c543d8fb"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.178398 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-kube-api-access-bd8j7" (OuterVolumeSpecName: "kube-api-access-bd8j7") pod "4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" (UID: "4c6ad13a-b4ab-4db3-a39f-b110c543d8fb"). InnerVolumeSpecName "kube-api-access-bd8j7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.189278 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-scripts" (OuterVolumeSpecName: "scripts") pod "4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" (UID: "4c6ad13a-b4ab-4db3-a39f-b110c543d8fb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.203342 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" (UID: "4c6ad13a-b4ab-4db3-a39f-b110c543d8fb"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.275971 4933 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.276013 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bd8j7\" (UniqueName: \"kubernetes.io/projected/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-kube-api-access-bd8j7\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.276027 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.276039 4933 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.276050 4933 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.284198 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" (UID: "4c6ad13a-b4ab-4db3-a39f-b110c543d8fb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.305494 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-config-data" (OuterVolumeSpecName: "config-data") pod "4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" (UID: "4c6ad13a-b4ab-4db3-a39f-b110c543d8fb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.378169 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.378424 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.526345 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6mkbd"] Jan 22 06:06:45 crc kubenswrapper[4933]: E0122 06:06:45.527560 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerName="proxy-httpd" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.527646 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerName="proxy-httpd" Jan 22 06:06:45 crc kubenswrapper[4933]: E0122 06:06:45.527713 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerName="ceilometer-notification-agent" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.527764 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerName="ceilometer-notification-agent" Jan 22 06:06:45 crc kubenswrapper[4933]: E0122 06:06:45.527831 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerName="sg-core" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.527881 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerName="sg-core" Jan 22 06:06:45 crc kubenswrapper[4933]: E0122 06:06:45.527932 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3945a3af-6972-419a-a60a-9f7b4b329fb1" containerName="neutron-api" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.527984 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3945a3af-6972-419a-a60a-9f7b4b329fb1" containerName="neutron-api" Jan 22 06:06:45 crc kubenswrapper[4933]: E0122 06:06:45.528043 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3945a3af-6972-419a-a60a-9f7b4b329fb1" containerName="neutron-httpd" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.528119 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3945a3af-6972-419a-a60a-9f7b4b329fb1" containerName="neutron-httpd" Jan 22 06:06:45 crc kubenswrapper[4933]: E0122 06:06:45.528186 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerName="ceilometer-central-agent" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.528237 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerName="ceilometer-central-agent" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.528446 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="3945a3af-6972-419a-a60a-9f7b4b329fb1" containerName="neutron-api" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.528518 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerName="sg-core" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.528577 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerName="ceilometer-central-agent" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.528629 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="3945a3af-6972-419a-a60a-9f7b4b329fb1" containerName="neutron-httpd" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.528692 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerName="ceilometer-notification-agent" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.528746 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" containerName="proxy-httpd" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.529338 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6mkbd" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.531430 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.531645 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.531751 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-65dnh" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.549383 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6mkbd"] Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.591049 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-config-data\") pod \"nova-cell0-conductor-db-sync-6mkbd\" (UID: \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\") " pod="openstack/nova-cell0-conductor-db-sync-6mkbd" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.591132 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-6mkbd\" (UID: \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\") " pod="openstack/nova-cell0-conductor-db-sync-6mkbd" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.591263 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-scripts\") pod \"nova-cell0-conductor-db-sync-6mkbd\" (UID: \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\") " pod="openstack/nova-cell0-conductor-db-sync-6mkbd" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.591321 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pllqq\" (UniqueName: \"kubernetes.io/projected/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-kube-api-access-pllqq\") pod \"nova-cell0-conductor-db-sync-6mkbd\" (UID: \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\") " pod="openstack/nova-cell0-conductor-db-sync-6mkbd" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.693376 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pllqq\" (UniqueName: \"kubernetes.io/projected/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-kube-api-access-pllqq\") pod \"nova-cell0-conductor-db-sync-6mkbd\" (UID: \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\") " pod="openstack/nova-cell0-conductor-db-sync-6mkbd" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.693495 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-config-data\") pod \"nova-cell0-conductor-db-sync-6mkbd\" (UID: \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\") " pod="openstack/nova-cell0-conductor-db-sync-6mkbd" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.693543 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-6mkbd\" (UID: \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\") " pod="openstack/nova-cell0-conductor-db-sync-6mkbd" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.693628 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-scripts\") pod \"nova-cell0-conductor-db-sync-6mkbd\" (UID: \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\") " pod="openstack/nova-cell0-conductor-db-sync-6mkbd" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.698911 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-config-data\") pod \"nova-cell0-conductor-db-sync-6mkbd\" (UID: \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\") " pod="openstack/nova-cell0-conductor-db-sync-6mkbd" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.701650 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-6mkbd\" (UID: \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\") " pod="openstack/nova-cell0-conductor-db-sync-6mkbd" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.707244 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-scripts\") pod \"nova-cell0-conductor-db-sync-6mkbd\" (UID: \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\") " pod="openstack/nova-cell0-conductor-db-sync-6mkbd" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.720976 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pllqq\" (UniqueName: \"kubernetes.io/projected/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-kube-api-access-pllqq\") pod \"nova-cell0-conductor-db-sync-6mkbd\" (UID: \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\") " pod="openstack/nova-cell0-conductor-db-sync-6mkbd" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.894181 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6mkbd" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.980833 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4c6ad13a-b4ab-4db3-a39f-b110c543d8fb","Type":"ContainerDied","Data":"3706651b4df03e02a688cce2415c18d131d7f7f5f1199cce36acf8d9c366a892"} Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.980907 4933 scope.go:117] "RemoveContainer" containerID="50d24875f78f8e422b947dad7b0bf36b02b0e1f1d85b4ea95011016a824be601" Jan 22 06:06:45 crc kubenswrapper[4933]: I0122 06:06:45.980907 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.041065 4933 scope.go:117] "RemoveContainer" containerID="f22b8ced7a10678e9471d69d9050a4dc0c3096a40104936c2b0ef31bef437ff0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.053228 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.059976 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.071917 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.074372 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.078092 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.078284 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.082552 4933 scope.go:117] "RemoveContainer" containerID="9cb6ffe51137f02eae74717eddeffb1853f3e9b9878a68197d6fb1c2315be8e7" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.090722 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.115531 4933 scope.go:117] "RemoveContainer" containerID="a2020de58eac7eef7181f4e5147562a2842d385a34d8e7eff987f562ab53c5fe" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.207141 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/26baa321-d8b2-4641-9a4c-0362950faaab-log-httpd\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.207297 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.207349 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.207593 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-config-data\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.207690 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/26baa321-d8b2-4641-9a4c-0362950faaab-run-httpd\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.207750 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5zz5\" (UniqueName: \"kubernetes.io/projected/26baa321-d8b2-4641-9a4c-0362950faaab-kube-api-access-b5zz5\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.207791 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-scripts\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.309852 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-config-data\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.310141 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/26baa321-d8b2-4641-9a4c-0362950faaab-run-httpd\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.310273 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5zz5\" (UniqueName: \"kubernetes.io/projected/26baa321-d8b2-4641-9a4c-0362950faaab-kube-api-access-b5zz5\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.310360 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-scripts\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.310487 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/26baa321-d8b2-4641-9a4c-0362950faaab-log-httpd\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.310615 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.310696 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/26baa321-d8b2-4641-9a4c-0362950faaab-run-httpd\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.310810 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.310902 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/26baa321-d8b2-4641-9a4c-0362950faaab-log-httpd\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.315697 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-scripts\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.316142 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.316640 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.317155 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-config-data\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.325875 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5zz5\" (UniqueName: \"kubernetes.io/projected/26baa321-d8b2-4641-9a4c-0362950faaab-kube-api-access-b5zz5\") pod \"ceilometer-0\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.414991 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6mkbd"] Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.416870 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.503667 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c6ad13a-b4ab-4db3-a39f-b110c543d8fb" path="/var/lib/kubelet/pods/4c6ad13a-b4ab-4db3-a39f-b110c543d8fb/volumes" Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.876049 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.992754 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"26baa321-d8b2-4641-9a4c-0362950faaab","Type":"ContainerStarted","Data":"0be8c6d28519f5a99f9440e5ae3f823a028de024766de1346ab47fd23f2bf0a5"} Jan 22 06:06:46 crc kubenswrapper[4933]: I0122 06:06:46.994456 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6mkbd" event={"ID":"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e","Type":"ContainerStarted","Data":"70930bf73f7e2f9a01d1d06c9405301f0ea758cfa91b7dc478f9aee6ec971c00"} Jan 22 06:06:47 crc kubenswrapper[4933]: I0122 06:06:47.871666 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:48 crc kubenswrapper[4933]: I0122 06:06:48.010195 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"26baa321-d8b2-4641-9a4c-0362950faaab","Type":"ContainerStarted","Data":"791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304"} Jan 22 06:06:48 crc kubenswrapper[4933]: I0122 06:06:48.310228 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 22 06:06:48 crc kubenswrapper[4933]: I0122 06:06:48.310267 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 22 06:06:48 crc kubenswrapper[4933]: I0122 06:06:48.354494 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 22 06:06:48 crc kubenswrapper[4933]: I0122 06:06:48.359117 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 22 06:06:49 crc kubenswrapper[4933]: I0122 06:06:49.021671 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"26baa321-d8b2-4641-9a4c-0362950faaab","Type":"ContainerStarted","Data":"be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287"} Jan 22 06:06:49 crc kubenswrapper[4933]: I0122 06:06:49.021914 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"26baa321-d8b2-4641-9a4c-0362950faaab","Type":"ContainerStarted","Data":"d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0"} Jan 22 06:06:49 crc kubenswrapper[4933]: I0122 06:06:49.021930 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 06:06:49 crc kubenswrapper[4933]: I0122 06:06:49.021943 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 06:06:50 crc kubenswrapper[4933]: I0122 06:06:50.288948 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 22 06:06:50 crc kubenswrapper[4933]: I0122 06:06:50.289216 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 22 06:06:50 crc kubenswrapper[4933]: I0122 06:06:50.334953 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 22 06:06:50 crc kubenswrapper[4933]: I0122 06:06:50.339806 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 22 06:06:50 crc kubenswrapper[4933]: I0122 06:06:50.885597 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 22 06:06:50 crc kubenswrapper[4933]: I0122 06:06:50.885958 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 22 06:06:51 crc kubenswrapper[4933]: I0122 06:06:51.041968 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 06:06:51 crc kubenswrapper[4933]: I0122 06:06:51.042007 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 06:06:52 crc kubenswrapper[4933]: I0122 06:06:52.920767 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 22 06:06:52 crc kubenswrapper[4933]: I0122 06:06:52.921954 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 22 06:06:55 crc kubenswrapper[4933]: I0122 06:06:55.111137 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6mkbd" event={"ID":"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e","Type":"ContainerStarted","Data":"37b3e40a183805b926f93991f87b98c3916433b4779fa99bc0dfedd6dbd20491"} Jan 22 06:06:55 crc kubenswrapper[4933]: I0122 06:06:55.113642 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"26baa321-d8b2-4641-9a4c-0362950faaab","Type":"ContainerStarted","Data":"e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d"} Jan 22 06:06:55 crc kubenswrapper[4933]: I0122 06:06:55.113802 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="26baa321-d8b2-4641-9a4c-0362950faaab" containerName="ceilometer-central-agent" containerID="cri-o://791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304" gracePeriod=30 Jan 22 06:06:55 crc kubenswrapper[4933]: I0122 06:06:55.113862 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="26baa321-d8b2-4641-9a4c-0362950faaab" containerName="proxy-httpd" containerID="cri-o://e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d" gracePeriod=30 Jan 22 06:06:55 crc kubenswrapper[4933]: I0122 06:06:55.113896 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="26baa321-d8b2-4641-9a4c-0362950faaab" containerName="sg-core" containerID="cri-o://be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287" gracePeriod=30 Jan 22 06:06:55 crc kubenswrapper[4933]: I0122 06:06:55.113896 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 06:06:55 crc kubenswrapper[4933]: I0122 06:06:55.113925 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="26baa321-d8b2-4641-9a4c-0362950faaab" containerName="ceilometer-notification-agent" containerID="cri-o://d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0" gracePeriod=30 Jan 22 06:06:55 crc kubenswrapper[4933]: I0122 06:06:55.146467 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-6mkbd" podStartSLOduration=2.289424258 podStartE2EDuration="10.146441325s" podCreationTimestamp="2026-01-22 06:06:45 +0000 UTC" firstStartedPulling="2026-01-22 06:06:46.399173755 +0000 UTC m=+1254.236299108" lastFinishedPulling="2026-01-22 06:06:54.256190822 +0000 UTC m=+1262.093316175" observedRunningTime="2026-01-22 06:06:55.12778294 +0000 UTC m=+1262.964908333" watchObservedRunningTime="2026-01-22 06:06:55.146441325 +0000 UTC m=+1262.983566688" Jan 22 06:06:55 crc kubenswrapper[4933]: I0122 06:06:55.156464 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.7850377530000001 podStartE2EDuration="9.15643366s" podCreationTimestamp="2026-01-22 06:06:46 +0000 UTC" firstStartedPulling="2026-01-22 06:06:46.880209871 +0000 UTC m=+1254.717335224" lastFinishedPulling="2026-01-22 06:06:54.251605778 +0000 UTC m=+1262.088731131" observedRunningTime="2026-01-22 06:06:55.151971308 +0000 UTC m=+1262.989096671" watchObservedRunningTime="2026-01-22 06:06:55.15643366 +0000 UTC m=+1262.993559033" Jan 22 06:06:55 crc kubenswrapper[4933]: I0122 06:06:55.946521 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.024544 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-config-data\") pod \"26baa321-d8b2-4641-9a4c-0362950faaab\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.024607 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-sg-core-conf-yaml\") pod \"26baa321-d8b2-4641-9a4c-0362950faaab\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.024677 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/26baa321-d8b2-4641-9a4c-0362950faaab-log-httpd\") pod \"26baa321-d8b2-4641-9a4c-0362950faaab\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.024720 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-combined-ca-bundle\") pod \"26baa321-d8b2-4641-9a4c-0362950faaab\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.024757 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/26baa321-d8b2-4641-9a4c-0362950faaab-run-httpd\") pod \"26baa321-d8b2-4641-9a4c-0362950faaab\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.024811 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b5zz5\" (UniqueName: \"kubernetes.io/projected/26baa321-d8b2-4641-9a4c-0362950faaab-kube-api-access-b5zz5\") pod \"26baa321-d8b2-4641-9a4c-0362950faaab\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.024952 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-scripts\") pod \"26baa321-d8b2-4641-9a4c-0362950faaab\" (UID: \"26baa321-d8b2-4641-9a4c-0362950faaab\") " Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.025269 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26baa321-d8b2-4641-9a4c-0362950faaab-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "26baa321-d8b2-4641-9a4c-0362950faaab" (UID: "26baa321-d8b2-4641-9a4c-0362950faaab"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.025282 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26baa321-d8b2-4641-9a4c-0362950faaab-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "26baa321-d8b2-4641-9a4c-0362950faaab" (UID: "26baa321-d8b2-4641-9a4c-0362950faaab"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.025807 4933 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/26baa321-d8b2-4641-9a4c-0362950faaab-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.025835 4933 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/26baa321-d8b2-4641-9a4c-0362950faaab-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.030889 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-scripts" (OuterVolumeSpecName: "scripts") pod "26baa321-d8b2-4641-9a4c-0362950faaab" (UID: "26baa321-d8b2-4641-9a4c-0362950faaab"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.041052 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26baa321-d8b2-4641-9a4c-0362950faaab-kube-api-access-b5zz5" (OuterVolumeSpecName: "kube-api-access-b5zz5") pod "26baa321-d8b2-4641-9a4c-0362950faaab" (UID: "26baa321-d8b2-4641-9a4c-0362950faaab"). InnerVolumeSpecName "kube-api-access-b5zz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.061247 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "26baa321-d8b2-4641-9a4c-0362950faaab" (UID: "26baa321-d8b2-4641-9a4c-0362950faaab"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.101926 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "26baa321-d8b2-4641-9a4c-0362950faaab" (UID: "26baa321-d8b2-4641-9a4c-0362950faaab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.123570 4933 generic.go:334] "Generic (PLEG): container finished" podID="26baa321-d8b2-4641-9a4c-0362950faaab" containerID="e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d" exitCode=0 Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.123604 4933 generic.go:334] "Generic (PLEG): container finished" podID="26baa321-d8b2-4641-9a4c-0362950faaab" containerID="be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287" exitCode=2 Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.123612 4933 generic.go:334] "Generic (PLEG): container finished" podID="26baa321-d8b2-4641-9a4c-0362950faaab" containerID="d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0" exitCode=0 Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.123619 4933 generic.go:334] "Generic (PLEG): container finished" podID="26baa321-d8b2-4641-9a4c-0362950faaab" containerID="791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304" exitCode=0 Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.124376 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.124851 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"26baa321-d8b2-4641-9a4c-0362950faaab","Type":"ContainerDied","Data":"e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d"} Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.124910 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"26baa321-d8b2-4641-9a4c-0362950faaab","Type":"ContainerDied","Data":"be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287"} Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.124924 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"26baa321-d8b2-4641-9a4c-0362950faaab","Type":"ContainerDied","Data":"d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0"} Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.124934 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"26baa321-d8b2-4641-9a4c-0362950faaab","Type":"ContainerDied","Data":"791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304"} Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.124943 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"26baa321-d8b2-4641-9a4c-0362950faaab","Type":"ContainerDied","Data":"0be8c6d28519f5a99f9440e5ae3f823a028de024766de1346ab47fd23f2bf0a5"} Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.124960 4933 scope.go:117] "RemoveContainer" containerID="e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.126883 4933 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.126903 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.126916 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b5zz5\" (UniqueName: \"kubernetes.io/projected/26baa321-d8b2-4641-9a4c-0362950faaab-kube-api-access-b5zz5\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.126928 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.133460 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-config-data" (OuterVolumeSpecName: "config-data") pod "26baa321-d8b2-4641-9a4c-0362950faaab" (UID: "26baa321-d8b2-4641-9a4c-0362950faaab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.151846 4933 scope.go:117] "RemoveContainer" containerID="be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.170510 4933 scope.go:117] "RemoveContainer" containerID="d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.193290 4933 scope.go:117] "RemoveContainer" containerID="791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.210159 4933 scope.go:117] "RemoveContainer" containerID="e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d" Jan 22 06:06:56 crc kubenswrapper[4933]: E0122 06:06:56.210813 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d\": container with ID starting with e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d not found: ID does not exist" containerID="e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.210865 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d"} err="failed to get container status \"e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d\": rpc error: code = NotFound desc = could not find container \"e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d\": container with ID starting with e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d not found: ID does not exist" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.210896 4933 scope.go:117] "RemoveContainer" containerID="be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287" Jan 22 06:06:56 crc kubenswrapper[4933]: E0122 06:06:56.211379 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287\": container with ID starting with be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287 not found: ID does not exist" containerID="be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.211443 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287"} err="failed to get container status \"be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287\": rpc error: code = NotFound desc = could not find container \"be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287\": container with ID starting with be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287 not found: ID does not exist" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.211469 4933 scope.go:117] "RemoveContainer" containerID="d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0" Jan 22 06:06:56 crc kubenswrapper[4933]: E0122 06:06:56.211936 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0\": container with ID starting with d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0 not found: ID does not exist" containerID="d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.211974 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0"} err="failed to get container status \"d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0\": rpc error: code = NotFound desc = could not find container \"d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0\": container with ID starting with d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0 not found: ID does not exist" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.211997 4933 scope.go:117] "RemoveContainer" containerID="791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304" Jan 22 06:06:56 crc kubenswrapper[4933]: E0122 06:06:56.212402 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304\": container with ID starting with 791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304 not found: ID does not exist" containerID="791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.212442 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304"} err="failed to get container status \"791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304\": rpc error: code = NotFound desc = could not find container \"791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304\": container with ID starting with 791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304 not found: ID does not exist" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.212466 4933 scope.go:117] "RemoveContainer" containerID="e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.212772 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d"} err="failed to get container status \"e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d\": rpc error: code = NotFound desc = could not find container \"e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d\": container with ID starting with e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d not found: ID does not exist" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.212793 4933 scope.go:117] "RemoveContainer" containerID="be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.213119 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287"} err="failed to get container status \"be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287\": rpc error: code = NotFound desc = could not find container \"be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287\": container with ID starting with be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287 not found: ID does not exist" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.213143 4933 scope.go:117] "RemoveContainer" containerID="d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.213471 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0"} err="failed to get container status \"d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0\": rpc error: code = NotFound desc = could not find container \"d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0\": container with ID starting with d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0 not found: ID does not exist" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.213494 4933 scope.go:117] "RemoveContainer" containerID="791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.213810 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304"} err="failed to get container status \"791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304\": rpc error: code = NotFound desc = could not find container \"791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304\": container with ID starting with 791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304 not found: ID does not exist" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.213840 4933 scope.go:117] "RemoveContainer" containerID="e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.214174 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d"} err="failed to get container status \"e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d\": rpc error: code = NotFound desc = could not find container \"e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d\": container with ID starting with e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d not found: ID does not exist" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.214200 4933 scope.go:117] "RemoveContainer" containerID="be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.214688 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287"} err="failed to get container status \"be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287\": rpc error: code = NotFound desc = could not find container \"be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287\": container with ID starting with be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287 not found: ID does not exist" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.214713 4933 scope.go:117] "RemoveContainer" containerID="d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.215010 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0"} err="failed to get container status \"d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0\": rpc error: code = NotFound desc = could not find container \"d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0\": container with ID starting with d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0 not found: ID does not exist" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.215033 4933 scope.go:117] "RemoveContainer" containerID="791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.215329 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304"} err="failed to get container status \"791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304\": rpc error: code = NotFound desc = could not find container \"791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304\": container with ID starting with 791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304 not found: ID does not exist" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.215348 4933 scope.go:117] "RemoveContainer" containerID="e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.215657 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d"} err="failed to get container status \"e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d\": rpc error: code = NotFound desc = could not find container \"e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d\": container with ID starting with e4f11c4e07280161c9aeca8892f4cfec950f800c3c971d4e64ea4b943258655d not found: ID does not exist" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.215681 4933 scope.go:117] "RemoveContainer" containerID="be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.215979 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287"} err="failed to get container status \"be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287\": rpc error: code = NotFound desc = could not find container \"be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287\": container with ID starting with be2952988b99cc01c12e721c4878c85a71b73ede52b8c43ceddd48b4f419e287 not found: ID does not exist" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.216002 4933 scope.go:117] "RemoveContainer" containerID="d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.216313 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0"} err="failed to get container status \"d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0\": rpc error: code = NotFound desc = could not find container \"d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0\": container with ID starting with d8e78ee6180e85c13ac5944edf651f15c018ec1bc79745ec52b023525763acc0 not found: ID does not exist" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.216338 4933 scope.go:117] "RemoveContainer" containerID="791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.216632 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304"} err="failed to get container status \"791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304\": rpc error: code = NotFound desc = could not find container \"791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304\": container with ID starting with 791169228be3da788e8b690cb2d7e94b0162c576f65cd9cd016bba83d999e304 not found: ID does not exist" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.228994 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26baa321-d8b2-4641-9a4c-0362950faaab-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.518968 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.519355 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.540270 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:56 crc kubenswrapper[4933]: E0122 06:06:56.540772 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26baa321-d8b2-4641-9a4c-0362950faaab" containerName="sg-core" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.540791 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="26baa321-d8b2-4641-9a4c-0362950faaab" containerName="sg-core" Jan 22 06:06:56 crc kubenswrapper[4933]: E0122 06:06:56.540809 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26baa321-d8b2-4641-9a4c-0362950faaab" containerName="ceilometer-notification-agent" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.540820 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="26baa321-d8b2-4641-9a4c-0362950faaab" containerName="ceilometer-notification-agent" Jan 22 06:06:56 crc kubenswrapper[4933]: E0122 06:06:56.540849 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26baa321-d8b2-4641-9a4c-0362950faaab" containerName="proxy-httpd" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.540861 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="26baa321-d8b2-4641-9a4c-0362950faaab" containerName="proxy-httpd" Jan 22 06:06:56 crc kubenswrapper[4933]: E0122 06:06:56.540873 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26baa321-d8b2-4641-9a4c-0362950faaab" containerName="ceilometer-central-agent" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.540883 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="26baa321-d8b2-4641-9a4c-0362950faaab" containerName="ceilometer-central-agent" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.541153 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="26baa321-d8b2-4641-9a4c-0362950faaab" containerName="ceilometer-central-agent" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.541178 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="26baa321-d8b2-4641-9a4c-0362950faaab" containerName="proxy-httpd" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.541195 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="26baa321-d8b2-4641-9a4c-0362950faaab" containerName="sg-core" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.541215 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="26baa321-d8b2-4641-9a4c-0362950faaab" containerName="ceilometer-notification-agent" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.544840 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.546450 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.547663 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.559182 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.654274 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-config-data\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.654334 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.654350 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.654395 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d1d7390-4868-40b5-ac43-96abd9252e04-log-httpd\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.654529 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d1d7390-4868-40b5-ac43-96abd9252e04-run-httpd\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.654684 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-scripts\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.654715 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lv2wl\" (UniqueName: \"kubernetes.io/projected/0d1d7390-4868-40b5-ac43-96abd9252e04-kube-api-access-lv2wl\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.757357 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-config-data\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.757492 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.757532 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.757618 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d1d7390-4868-40b5-ac43-96abd9252e04-log-httpd\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.757698 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d1d7390-4868-40b5-ac43-96abd9252e04-run-httpd\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.757782 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-scripts\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.757816 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lv2wl\" (UniqueName: \"kubernetes.io/projected/0d1d7390-4868-40b5-ac43-96abd9252e04-kube-api-access-lv2wl\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.758204 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d1d7390-4868-40b5-ac43-96abd9252e04-run-httpd\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.758254 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d1d7390-4868-40b5-ac43-96abd9252e04-log-httpd\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.763720 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.763846 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-config-data\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.776729 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-scripts\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.776843 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.779046 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lv2wl\" (UniqueName: \"kubernetes.io/projected/0d1d7390-4868-40b5-ac43-96abd9252e04-kube-api-access-lv2wl\") pod \"ceilometer-0\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " pod="openstack/ceilometer-0" Jan 22 06:06:56 crc kubenswrapper[4933]: I0122 06:06:56.876685 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:06:57 crc kubenswrapper[4933]: I0122 06:06:57.327439 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:06:58 crc kubenswrapper[4933]: I0122 06:06:58.140028 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d1d7390-4868-40b5-ac43-96abd9252e04","Type":"ContainerStarted","Data":"17ef558b2e3ce5d89b1ba0871ddca9c5d4b5af10a700298681e4f6924dbb72cb"} Jan 22 06:06:58 crc kubenswrapper[4933]: I0122 06:06:58.140371 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d1d7390-4868-40b5-ac43-96abd9252e04","Type":"ContainerStarted","Data":"9aff91099be3334d3b3ee9dd4df6de2c4639c630739aef5454544636d5562e3a"} Jan 22 06:06:58 crc kubenswrapper[4933]: I0122 06:06:58.508032 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26baa321-d8b2-4641-9a4c-0362950faaab" path="/var/lib/kubelet/pods/26baa321-d8b2-4641-9a4c-0362950faaab/volumes" Jan 22 06:06:59 crc kubenswrapper[4933]: I0122 06:06:59.166723 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d1d7390-4868-40b5-ac43-96abd9252e04","Type":"ContainerStarted","Data":"4a6af8ab82d0003cf894f64e04d4c95150390778b5de95d3aec191b5bbe1d176"} Jan 22 06:07:00 crc kubenswrapper[4933]: I0122 06:07:00.177206 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d1d7390-4868-40b5-ac43-96abd9252e04","Type":"ContainerStarted","Data":"4fa912c31ace23d9b1716183d292a9805d5d84af11891c1f88df2e35b8e2b928"} Jan 22 06:07:00 crc kubenswrapper[4933]: I0122 06:07:00.362352 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="ff3d18a9-ac4c-41ab-bd2e-3c361b0f428a" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.160:3000/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 06:07:00 crc kubenswrapper[4933]: I0122 06:07:00.546478 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:07:01 crc kubenswrapper[4933]: I0122 06:07:01.187705 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d1d7390-4868-40b5-ac43-96abd9252e04","Type":"ContainerStarted","Data":"44e972369ea8d679a2a1c1d9b719c971cabb2b238812ce778b99798e366f2f8a"} Jan 22 06:07:01 crc kubenswrapper[4933]: I0122 06:07:01.188469 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 06:07:01 crc kubenswrapper[4933]: I0122 06:07:01.188181 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerName="proxy-httpd" containerID="cri-o://44e972369ea8d679a2a1c1d9b719c971cabb2b238812ce778b99798e366f2f8a" gracePeriod=30 Jan 22 06:07:01 crc kubenswrapper[4933]: I0122 06:07:01.188108 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerName="ceilometer-central-agent" containerID="cri-o://17ef558b2e3ce5d89b1ba0871ddca9c5d4b5af10a700298681e4f6924dbb72cb" gracePeriod=30 Jan 22 06:07:01 crc kubenswrapper[4933]: I0122 06:07:01.188258 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerName="sg-core" containerID="cri-o://4fa912c31ace23d9b1716183d292a9805d5d84af11891c1f88df2e35b8e2b928" gracePeriod=30 Jan 22 06:07:01 crc kubenswrapper[4933]: I0122 06:07:01.188225 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerName="ceilometer-notification-agent" containerID="cri-o://4a6af8ab82d0003cf894f64e04d4c95150390778b5de95d3aec191b5bbe1d176" gracePeriod=30 Jan 22 06:07:01 crc kubenswrapper[4933]: I0122 06:07:01.214714 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.191371888 podStartE2EDuration="5.214694867s" podCreationTimestamp="2026-01-22 06:06:56 +0000 UTC" firstStartedPulling="2026-01-22 06:06:57.340330842 +0000 UTC m=+1265.177456195" lastFinishedPulling="2026-01-22 06:07:00.363653821 +0000 UTC m=+1268.200779174" observedRunningTime="2026-01-22 06:07:01.21437782 +0000 UTC m=+1269.051503173" watchObservedRunningTime="2026-01-22 06:07:01.214694867 +0000 UTC m=+1269.051820220" Jan 22 06:07:02 crc kubenswrapper[4933]: I0122 06:07:02.200058 4933 generic.go:334] "Generic (PLEG): container finished" podID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerID="44e972369ea8d679a2a1c1d9b719c971cabb2b238812ce778b99798e366f2f8a" exitCode=0 Jan 22 06:07:02 crc kubenswrapper[4933]: I0122 06:07:02.200135 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d1d7390-4868-40b5-ac43-96abd9252e04","Type":"ContainerDied","Data":"44e972369ea8d679a2a1c1d9b719c971cabb2b238812ce778b99798e366f2f8a"} Jan 22 06:07:02 crc kubenswrapper[4933]: I0122 06:07:02.200146 4933 generic.go:334] "Generic (PLEG): container finished" podID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerID="4fa912c31ace23d9b1716183d292a9805d5d84af11891c1f88df2e35b8e2b928" exitCode=2 Jan 22 06:07:02 crc kubenswrapper[4933]: I0122 06:07:02.200161 4933 generic.go:334] "Generic (PLEG): container finished" podID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerID="4a6af8ab82d0003cf894f64e04d4c95150390778b5de95d3aec191b5bbe1d176" exitCode=0 Jan 22 06:07:02 crc kubenswrapper[4933]: I0122 06:07:02.200174 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d1d7390-4868-40b5-ac43-96abd9252e04","Type":"ContainerDied","Data":"4fa912c31ace23d9b1716183d292a9805d5d84af11891c1f88df2e35b8e2b928"} Jan 22 06:07:02 crc kubenswrapper[4933]: I0122 06:07:02.200186 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d1d7390-4868-40b5-ac43-96abd9252e04","Type":"ContainerDied","Data":"4a6af8ab82d0003cf894f64e04d4c95150390778b5de95d3aec191b5bbe1d176"} Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.141973 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.195207 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-config-data\") pod \"0d1d7390-4868-40b5-ac43-96abd9252e04\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.195295 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-sg-core-conf-yaml\") pod \"0d1d7390-4868-40b5-ac43-96abd9252e04\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.195350 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d1d7390-4868-40b5-ac43-96abd9252e04-run-httpd\") pod \"0d1d7390-4868-40b5-ac43-96abd9252e04\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.195388 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-combined-ca-bundle\") pod \"0d1d7390-4868-40b5-ac43-96abd9252e04\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.195431 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d1d7390-4868-40b5-ac43-96abd9252e04-log-httpd\") pod \"0d1d7390-4868-40b5-ac43-96abd9252e04\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.195474 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-scripts\") pod \"0d1d7390-4868-40b5-ac43-96abd9252e04\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.195526 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lv2wl\" (UniqueName: \"kubernetes.io/projected/0d1d7390-4868-40b5-ac43-96abd9252e04-kube-api-access-lv2wl\") pod \"0d1d7390-4868-40b5-ac43-96abd9252e04\" (UID: \"0d1d7390-4868-40b5-ac43-96abd9252e04\") " Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.195663 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d1d7390-4868-40b5-ac43-96abd9252e04-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0d1d7390-4868-40b5-ac43-96abd9252e04" (UID: "0d1d7390-4868-40b5-ac43-96abd9252e04"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.195925 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d1d7390-4868-40b5-ac43-96abd9252e04-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0d1d7390-4868-40b5-ac43-96abd9252e04" (UID: "0d1d7390-4868-40b5-ac43-96abd9252e04"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.196140 4933 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d1d7390-4868-40b5-ac43-96abd9252e04-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.196162 4933 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0d1d7390-4868-40b5-ac43-96abd9252e04-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.204934 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d1d7390-4868-40b5-ac43-96abd9252e04-kube-api-access-lv2wl" (OuterVolumeSpecName: "kube-api-access-lv2wl") pod "0d1d7390-4868-40b5-ac43-96abd9252e04" (UID: "0d1d7390-4868-40b5-ac43-96abd9252e04"). InnerVolumeSpecName "kube-api-access-lv2wl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.207509 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-scripts" (OuterVolumeSpecName: "scripts") pod "0d1d7390-4868-40b5-ac43-96abd9252e04" (UID: "0d1d7390-4868-40b5-ac43-96abd9252e04"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.226544 4933 generic.go:334] "Generic (PLEG): container finished" podID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerID="17ef558b2e3ce5d89b1ba0871ddca9c5d4b5af10a700298681e4f6924dbb72cb" exitCode=0 Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.226616 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d1d7390-4868-40b5-ac43-96abd9252e04","Type":"ContainerDied","Data":"17ef558b2e3ce5d89b1ba0871ddca9c5d4b5af10a700298681e4f6924dbb72cb"} Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.226657 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0d1d7390-4868-40b5-ac43-96abd9252e04","Type":"ContainerDied","Data":"9aff91099be3334d3b3ee9dd4df6de2c4639c630739aef5454544636d5562e3a"} Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.226682 4933 scope.go:117] "RemoveContainer" containerID="44e972369ea8d679a2a1c1d9b719c971cabb2b238812ce778b99798e366f2f8a" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.226841 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.232086 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0d1d7390-4868-40b5-ac43-96abd9252e04" (UID: "0d1d7390-4868-40b5-ac43-96abd9252e04"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.287861 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0d1d7390-4868-40b5-ac43-96abd9252e04" (UID: "0d1d7390-4868-40b5-ac43-96abd9252e04"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.299263 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.299318 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lv2wl\" (UniqueName: \"kubernetes.io/projected/0d1d7390-4868-40b5-ac43-96abd9252e04-kube-api-access-lv2wl\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.299337 4933 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.299353 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.305516 4933 scope.go:117] "RemoveContainer" containerID="4fa912c31ace23d9b1716183d292a9805d5d84af11891c1f88df2e35b8e2b928" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.325228 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-config-data" (OuterVolumeSpecName: "config-data") pod "0d1d7390-4868-40b5-ac43-96abd9252e04" (UID: "0d1d7390-4868-40b5-ac43-96abd9252e04"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.337586 4933 scope.go:117] "RemoveContainer" containerID="4a6af8ab82d0003cf894f64e04d4c95150390778b5de95d3aec191b5bbe1d176" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.356041 4933 scope.go:117] "RemoveContainer" containerID="17ef558b2e3ce5d89b1ba0871ddca9c5d4b5af10a700298681e4f6924dbb72cb" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.372558 4933 scope.go:117] "RemoveContainer" containerID="44e972369ea8d679a2a1c1d9b719c971cabb2b238812ce778b99798e366f2f8a" Jan 22 06:07:04 crc kubenswrapper[4933]: E0122 06:07:04.377379 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44e972369ea8d679a2a1c1d9b719c971cabb2b238812ce778b99798e366f2f8a\": container with ID starting with 44e972369ea8d679a2a1c1d9b719c971cabb2b238812ce778b99798e366f2f8a not found: ID does not exist" containerID="44e972369ea8d679a2a1c1d9b719c971cabb2b238812ce778b99798e366f2f8a" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.377411 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44e972369ea8d679a2a1c1d9b719c971cabb2b238812ce778b99798e366f2f8a"} err="failed to get container status \"44e972369ea8d679a2a1c1d9b719c971cabb2b238812ce778b99798e366f2f8a\": rpc error: code = NotFound desc = could not find container \"44e972369ea8d679a2a1c1d9b719c971cabb2b238812ce778b99798e366f2f8a\": container with ID starting with 44e972369ea8d679a2a1c1d9b719c971cabb2b238812ce778b99798e366f2f8a not found: ID does not exist" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.377451 4933 scope.go:117] "RemoveContainer" containerID="4fa912c31ace23d9b1716183d292a9805d5d84af11891c1f88df2e35b8e2b928" Jan 22 06:07:04 crc kubenswrapper[4933]: E0122 06:07:04.377705 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fa912c31ace23d9b1716183d292a9805d5d84af11891c1f88df2e35b8e2b928\": container with ID starting with 4fa912c31ace23d9b1716183d292a9805d5d84af11891c1f88df2e35b8e2b928 not found: ID does not exist" containerID="4fa912c31ace23d9b1716183d292a9805d5d84af11891c1f88df2e35b8e2b928" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.377736 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fa912c31ace23d9b1716183d292a9805d5d84af11891c1f88df2e35b8e2b928"} err="failed to get container status \"4fa912c31ace23d9b1716183d292a9805d5d84af11891c1f88df2e35b8e2b928\": rpc error: code = NotFound desc = could not find container \"4fa912c31ace23d9b1716183d292a9805d5d84af11891c1f88df2e35b8e2b928\": container with ID starting with 4fa912c31ace23d9b1716183d292a9805d5d84af11891c1f88df2e35b8e2b928 not found: ID does not exist" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.377754 4933 scope.go:117] "RemoveContainer" containerID="4a6af8ab82d0003cf894f64e04d4c95150390778b5de95d3aec191b5bbe1d176" Jan 22 06:07:04 crc kubenswrapper[4933]: E0122 06:07:04.377944 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a6af8ab82d0003cf894f64e04d4c95150390778b5de95d3aec191b5bbe1d176\": container with ID starting with 4a6af8ab82d0003cf894f64e04d4c95150390778b5de95d3aec191b5bbe1d176 not found: ID does not exist" containerID="4a6af8ab82d0003cf894f64e04d4c95150390778b5de95d3aec191b5bbe1d176" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.377982 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a6af8ab82d0003cf894f64e04d4c95150390778b5de95d3aec191b5bbe1d176"} err="failed to get container status \"4a6af8ab82d0003cf894f64e04d4c95150390778b5de95d3aec191b5bbe1d176\": rpc error: code = NotFound desc = could not find container \"4a6af8ab82d0003cf894f64e04d4c95150390778b5de95d3aec191b5bbe1d176\": container with ID starting with 4a6af8ab82d0003cf894f64e04d4c95150390778b5de95d3aec191b5bbe1d176 not found: ID does not exist" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.377996 4933 scope.go:117] "RemoveContainer" containerID="17ef558b2e3ce5d89b1ba0871ddca9c5d4b5af10a700298681e4f6924dbb72cb" Jan 22 06:07:04 crc kubenswrapper[4933]: E0122 06:07:04.378248 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17ef558b2e3ce5d89b1ba0871ddca9c5d4b5af10a700298681e4f6924dbb72cb\": container with ID starting with 17ef558b2e3ce5d89b1ba0871ddca9c5d4b5af10a700298681e4f6924dbb72cb not found: ID does not exist" containerID="17ef558b2e3ce5d89b1ba0871ddca9c5d4b5af10a700298681e4f6924dbb72cb" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.378267 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17ef558b2e3ce5d89b1ba0871ddca9c5d4b5af10a700298681e4f6924dbb72cb"} err="failed to get container status \"17ef558b2e3ce5d89b1ba0871ddca9c5d4b5af10a700298681e4f6924dbb72cb\": rpc error: code = NotFound desc = could not find container \"17ef558b2e3ce5d89b1ba0871ddca9c5d4b5af10a700298681e4f6924dbb72cb\": container with ID starting with 17ef558b2e3ce5d89b1ba0871ddca9c5d4b5af10a700298681e4f6924dbb72cb not found: ID does not exist" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.401578 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d1d7390-4868-40b5-ac43-96abd9252e04-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.558002 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.573573 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.586632 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:07:04 crc kubenswrapper[4933]: E0122 06:07:04.587210 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerName="ceilometer-notification-agent" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.587242 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerName="ceilometer-notification-agent" Jan 22 06:07:04 crc kubenswrapper[4933]: E0122 06:07:04.587283 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerName="sg-core" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.587295 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerName="sg-core" Jan 22 06:07:04 crc kubenswrapper[4933]: E0122 06:07:04.587326 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerName="ceilometer-central-agent" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.587340 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerName="ceilometer-central-agent" Jan 22 06:07:04 crc kubenswrapper[4933]: E0122 06:07:04.587366 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerName="proxy-httpd" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.587377 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerName="proxy-httpd" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.587727 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerName="proxy-httpd" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.587751 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerName="ceilometer-central-agent" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.587771 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerName="sg-core" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.587801 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d1d7390-4868-40b5-ac43-96abd9252e04" containerName="ceilometer-notification-agent" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.603616 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.603735 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.606417 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.606526 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.706788 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scqlh\" (UniqueName: \"kubernetes.io/projected/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-kube-api-access-scqlh\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.707016 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-scripts\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.707052 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-log-httpd\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.707102 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.707184 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-run-httpd\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.707235 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.707262 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-config-data\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.808956 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-scripts\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.809202 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-log-httpd\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.809243 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.809274 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-run-httpd\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.809296 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.809320 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-config-data\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.809366 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scqlh\" (UniqueName: \"kubernetes.io/projected/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-kube-api-access-scqlh\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.810128 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-log-httpd\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.810524 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-run-httpd\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.815266 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-scripts\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.815789 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.816263 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.820718 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-config-data\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.840044 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scqlh\" (UniqueName: \"kubernetes.io/projected/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-kube-api-access-scqlh\") pod \"ceilometer-0\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " pod="openstack/ceilometer-0" Jan 22 06:07:04 crc kubenswrapper[4933]: I0122 06:07:04.920045 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:07:05 crc kubenswrapper[4933]: I0122 06:07:05.235577 4933 generic.go:334] "Generic (PLEG): container finished" podID="a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e" containerID="37b3e40a183805b926f93991f87b98c3916433b4779fa99bc0dfedd6dbd20491" exitCode=0 Jan 22 06:07:05 crc kubenswrapper[4933]: I0122 06:07:05.235644 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6mkbd" event={"ID":"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e","Type":"ContainerDied","Data":"37b3e40a183805b926f93991f87b98c3916433b4779fa99bc0dfedd6dbd20491"} Jan 22 06:07:05 crc kubenswrapper[4933]: I0122 06:07:05.388869 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:07:05 crc kubenswrapper[4933]: W0122 06:07:05.389350 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7c8f35c1_1dfe_49ed_baaf_62c2a29e0988.slice/crio-8d7f101b176f2bfe22f9c4bf5caf3b2c7b0e56200ceb5a29b885baf832d35d2e WatchSource:0}: Error finding container 8d7f101b176f2bfe22f9c4bf5caf3b2c7b0e56200ceb5a29b885baf832d35d2e: Status 404 returned error can't find the container with id 8d7f101b176f2bfe22f9c4bf5caf3b2c7b0e56200ceb5a29b885baf832d35d2e Jan 22 06:07:06 crc kubenswrapper[4933]: I0122 06:07:06.256672 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988","Type":"ContainerStarted","Data":"3c0f52fe4c2eaed4847457d03b58618cf6c45098bacab8f1363cb2c9df657c71"} Jan 22 06:07:06 crc kubenswrapper[4933]: I0122 06:07:06.256892 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988","Type":"ContainerStarted","Data":"8d7f101b176f2bfe22f9c4bf5caf3b2c7b0e56200ceb5a29b885baf832d35d2e"} Jan 22 06:07:06 crc kubenswrapper[4933]: I0122 06:07:06.508472 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d1d7390-4868-40b5-ac43-96abd9252e04" path="/var/lib/kubelet/pods/0d1d7390-4868-40b5-ac43-96abd9252e04/volumes" Jan 22 06:07:06 crc kubenswrapper[4933]: I0122 06:07:06.591980 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6mkbd" Jan 22 06:07:06 crc kubenswrapper[4933]: I0122 06:07:06.679581 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-config-data\") pod \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\" (UID: \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\") " Jan 22 06:07:06 crc kubenswrapper[4933]: I0122 06:07:06.679967 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pllqq\" (UniqueName: \"kubernetes.io/projected/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-kube-api-access-pllqq\") pod \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\" (UID: \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\") " Jan 22 06:07:06 crc kubenswrapper[4933]: I0122 06:07:06.679990 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-scripts\") pod \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\" (UID: \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\") " Jan 22 06:07:06 crc kubenswrapper[4933]: I0122 06:07:06.680014 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-combined-ca-bundle\") pod \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\" (UID: \"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e\") " Jan 22 06:07:06 crc kubenswrapper[4933]: I0122 06:07:06.683830 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-kube-api-access-pllqq" (OuterVolumeSpecName: "kube-api-access-pllqq") pod "a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e" (UID: "a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e"). InnerVolumeSpecName "kube-api-access-pllqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:07:06 crc kubenswrapper[4933]: I0122 06:07:06.683854 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-scripts" (OuterVolumeSpecName: "scripts") pod "a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e" (UID: "a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:06 crc kubenswrapper[4933]: I0122 06:07:06.704372 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e" (UID: "a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:06 crc kubenswrapper[4933]: I0122 06:07:06.708472 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-config-data" (OuterVolumeSpecName: "config-data") pod "a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e" (UID: "a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:06 crc kubenswrapper[4933]: I0122 06:07:06.781766 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:06 crc kubenswrapper[4933]: I0122 06:07:06.781793 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pllqq\" (UniqueName: \"kubernetes.io/projected/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-kube-api-access-pllqq\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:06 crc kubenswrapper[4933]: I0122 06:07:06.781802 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:06 crc kubenswrapper[4933]: I0122 06:07:06.781810 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.272583 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6mkbd" Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.272574 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6mkbd" event={"ID":"a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e","Type":"ContainerDied","Data":"70930bf73f7e2f9a01d1d06c9405301f0ea758cfa91b7dc478f9aee6ec971c00"} Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.272659 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="70930bf73f7e2f9a01d1d06c9405301f0ea758cfa91b7dc478f9aee6ec971c00" Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.275152 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988","Type":"ContainerStarted","Data":"f495170fc7fbbe5e36e8691d31883e00eab4de587306f0702605a87793290838"} Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.366309 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 06:07:07 crc kubenswrapper[4933]: E0122 06:07:07.367126 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e" containerName="nova-cell0-conductor-db-sync" Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.367163 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e" containerName="nova-cell0-conductor-db-sync" Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.367671 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e" containerName="nova-cell0-conductor-db-sync" Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.369043 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.377439 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.377744 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-65dnh" Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.381952 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.500843 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a944aa66-1c67-4661-968f-e976494cf1eb-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"a944aa66-1c67-4661-968f-e976494cf1eb\") " pod="openstack/nova-cell0-conductor-0" Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.501036 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2n8x\" (UniqueName: \"kubernetes.io/projected/a944aa66-1c67-4661-968f-e976494cf1eb-kube-api-access-z2n8x\") pod \"nova-cell0-conductor-0\" (UID: \"a944aa66-1c67-4661-968f-e976494cf1eb\") " pod="openstack/nova-cell0-conductor-0" Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.501498 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a944aa66-1c67-4661-968f-e976494cf1eb-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"a944aa66-1c67-4661-968f-e976494cf1eb\") " pod="openstack/nova-cell0-conductor-0" Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.603536 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a944aa66-1c67-4661-968f-e976494cf1eb-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"a944aa66-1c67-4661-968f-e976494cf1eb\") " pod="openstack/nova-cell0-conductor-0" Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.603649 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2n8x\" (UniqueName: \"kubernetes.io/projected/a944aa66-1c67-4661-968f-e976494cf1eb-kube-api-access-z2n8x\") pod \"nova-cell0-conductor-0\" (UID: \"a944aa66-1c67-4661-968f-e976494cf1eb\") " pod="openstack/nova-cell0-conductor-0" Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.603841 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a944aa66-1c67-4661-968f-e976494cf1eb-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"a944aa66-1c67-4661-968f-e976494cf1eb\") " pod="openstack/nova-cell0-conductor-0" Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.610355 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a944aa66-1c67-4661-968f-e976494cf1eb-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"a944aa66-1c67-4661-968f-e976494cf1eb\") " pod="openstack/nova-cell0-conductor-0" Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.610414 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a944aa66-1c67-4661-968f-e976494cf1eb-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"a944aa66-1c67-4661-968f-e976494cf1eb\") " pod="openstack/nova-cell0-conductor-0" Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.630836 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2n8x\" (UniqueName: \"kubernetes.io/projected/a944aa66-1c67-4661-968f-e976494cf1eb-kube-api-access-z2n8x\") pod \"nova-cell0-conductor-0\" (UID: \"a944aa66-1c67-4661-968f-e976494cf1eb\") " pod="openstack/nova-cell0-conductor-0" Jan 22 06:07:07 crc kubenswrapper[4933]: I0122 06:07:07.701406 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 22 06:07:08 crc kubenswrapper[4933]: I0122 06:07:08.146932 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 06:07:08 crc kubenswrapper[4933]: W0122 06:07:08.160338 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda944aa66_1c67_4661_968f_e976494cf1eb.slice/crio-84c996ed7bae8a32c9c37d0037c5088f03ab24320305ef6bd0084a699814fbc9 WatchSource:0}: Error finding container 84c996ed7bae8a32c9c37d0037c5088f03ab24320305ef6bd0084a699814fbc9: Status 404 returned error can't find the container with id 84c996ed7bae8a32c9c37d0037c5088f03ab24320305ef6bd0084a699814fbc9 Jan 22 06:07:08 crc kubenswrapper[4933]: I0122 06:07:08.289471 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"a944aa66-1c67-4661-968f-e976494cf1eb","Type":"ContainerStarted","Data":"84c996ed7bae8a32c9c37d0037c5088f03ab24320305ef6bd0084a699814fbc9"} Jan 22 06:07:08 crc kubenswrapper[4933]: I0122 06:07:08.292046 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988","Type":"ContainerStarted","Data":"88e82780f3948a6a6c2385978716afab59c4599b0a203d6440d1f86439981c7f"} Jan 22 06:07:09 crc kubenswrapper[4933]: I0122 06:07:09.300666 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"a944aa66-1c67-4661-968f-e976494cf1eb","Type":"ContainerStarted","Data":"3e78db5f3e39d6ac0502c10f14b200e06daa035604cc2c40311b7a3661cf9a95"} Jan 22 06:07:09 crc kubenswrapper[4933]: I0122 06:07:09.301064 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 22 06:07:09 crc kubenswrapper[4933]: I0122 06:07:09.320972 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.320953509 podStartE2EDuration="2.320953509s" podCreationTimestamp="2026-01-22 06:07:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:07:09.315993768 +0000 UTC m=+1277.153119121" watchObservedRunningTime="2026-01-22 06:07:09.320953509 +0000 UTC m=+1277.158078862" Jan 22 06:07:10 crc kubenswrapper[4933]: I0122 06:07:10.943167 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:07:10 crc kubenswrapper[4933]: I0122 06:07:10.943588 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:07:10 crc kubenswrapper[4933]: I0122 06:07:10.943669 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 06:07:10 crc kubenswrapper[4933]: I0122 06:07:10.944911 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"55d6c6293cbc3ae4b2571461dfbc5b504ef2bc855f8799fa252e05302735e076"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:07:10 crc kubenswrapper[4933]: I0122 06:07:10.945063 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://55d6c6293cbc3ae4b2571461dfbc5b504ef2bc855f8799fa252e05302735e076" gracePeriod=600 Jan 22 06:07:11 crc kubenswrapper[4933]: I0122 06:07:11.321175 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="55d6c6293cbc3ae4b2571461dfbc5b504ef2bc855f8799fa252e05302735e076" exitCode=0 Jan 22 06:07:11 crc kubenswrapper[4933]: I0122 06:07:11.321257 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"55d6c6293cbc3ae4b2571461dfbc5b504ef2bc855f8799fa252e05302735e076"} Jan 22 06:07:11 crc kubenswrapper[4933]: I0122 06:07:11.321477 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"be33ec37115cc1139fae624e9dd3a341abdaf1a2979fabd7ece8f9fddf21ac63"} Jan 22 06:07:11 crc kubenswrapper[4933]: I0122 06:07:11.321516 4933 scope.go:117] "RemoveContainer" containerID="8400a254f9521bb3bb5af6c86bda35345893f7a13920ab409abe36fdefec266d" Jan 22 06:07:14 crc kubenswrapper[4933]: I0122 06:07:14.355706 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988","Type":"ContainerStarted","Data":"11f3d9dd82f57c65075b864eab99bd5cabbd053546d6e7859a9633b88a5506f6"} Jan 22 06:07:14 crc kubenswrapper[4933]: I0122 06:07:14.356397 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 06:07:14 crc kubenswrapper[4933]: I0122 06:07:14.398505 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.531627875 podStartE2EDuration="10.398485613s" podCreationTimestamp="2026-01-22 06:07:04 +0000 UTC" firstStartedPulling="2026-01-22 06:07:05.393747209 +0000 UTC m=+1273.230872562" lastFinishedPulling="2026-01-22 06:07:13.260604947 +0000 UTC m=+1281.097730300" observedRunningTime="2026-01-22 06:07:14.380394771 +0000 UTC m=+1282.217520174" watchObservedRunningTime="2026-01-22 06:07:14.398485613 +0000 UTC m=+1282.235610956" Jan 22 06:07:17 crc kubenswrapper[4933]: I0122 06:07:17.748960 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.379911 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-2cdpf"] Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.381691 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2cdpf" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.385436 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.388539 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.399227 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-2cdpf"] Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.521865 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e591255a-6edc-40de-a50f-2f39d3e9bb59-config-data\") pod \"nova-cell0-cell-mapping-2cdpf\" (UID: \"e591255a-6edc-40de-a50f-2f39d3e9bb59\") " pod="openstack/nova-cell0-cell-mapping-2cdpf" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.521961 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qjk2f\" (UniqueName: \"kubernetes.io/projected/e591255a-6edc-40de-a50f-2f39d3e9bb59-kube-api-access-qjk2f\") pod \"nova-cell0-cell-mapping-2cdpf\" (UID: \"e591255a-6edc-40de-a50f-2f39d3e9bb59\") " pod="openstack/nova-cell0-cell-mapping-2cdpf" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.521999 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e591255a-6edc-40de-a50f-2f39d3e9bb59-scripts\") pod \"nova-cell0-cell-mapping-2cdpf\" (UID: \"e591255a-6edc-40de-a50f-2f39d3e9bb59\") " pod="openstack/nova-cell0-cell-mapping-2cdpf" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.522029 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e591255a-6edc-40de-a50f-2f39d3e9bb59-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-2cdpf\" (UID: \"e591255a-6edc-40de-a50f-2f39d3e9bb59\") " pod="openstack/nova-cell0-cell-mapping-2cdpf" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.571340 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.587854 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.597521 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.635491 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.642041 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e591255a-6edc-40de-a50f-2f39d3e9bb59-config-data\") pod \"nova-cell0-cell-mapping-2cdpf\" (UID: \"e591255a-6edc-40de-a50f-2f39d3e9bb59\") " pod="openstack/nova-cell0-cell-mapping-2cdpf" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.645087 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e591255a-6edc-40de-a50f-2f39d3e9bb59-config-data\") pod \"nova-cell0-cell-mapping-2cdpf\" (UID: \"e591255a-6edc-40de-a50f-2f39d3e9bb59\") " pod="openstack/nova-cell0-cell-mapping-2cdpf" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.663609 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.665099 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.667923 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qjk2f\" (UniqueName: \"kubernetes.io/projected/e591255a-6edc-40de-a50f-2f39d3e9bb59-kube-api-access-qjk2f\") pod \"nova-cell0-cell-mapping-2cdpf\" (UID: \"e591255a-6edc-40de-a50f-2f39d3e9bb59\") " pod="openstack/nova-cell0-cell-mapping-2cdpf" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.668016 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e591255a-6edc-40de-a50f-2f39d3e9bb59-scripts\") pod \"nova-cell0-cell-mapping-2cdpf\" (UID: \"e591255a-6edc-40de-a50f-2f39d3e9bb59\") " pod="openstack/nova-cell0-cell-mapping-2cdpf" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.668100 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e591255a-6edc-40de-a50f-2f39d3e9bb59-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-2cdpf\" (UID: \"e591255a-6edc-40de-a50f-2f39d3e9bb59\") " pod="openstack/nova-cell0-cell-mapping-2cdpf" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.669630 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.674205 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e591255a-6edc-40de-a50f-2f39d3e9bb59-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-2cdpf\" (UID: \"e591255a-6edc-40de-a50f-2f39d3e9bb59\") " pod="openstack/nova-cell0-cell-mapping-2cdpf" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.679503 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e591255a-6edc-40de-a50f-2f39d3e9bb59-scripts\") pod \"nova-cell0-cell-mapping-2cdpf\" (UID: \"e591255a-6edc-40de-a50f-2f39d3e9bb59\") " pod="openstack/nova-cell0-cell-mapping-2cdpf" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.697001 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qjk2f\" (UniqueName: \"kubernetes.io/projected/e591255a-6edc-40de-a50f-2f39d3e9bb59-kube-api-access-qjk2f\") pod \"nova-cell0-cell-mapping-2cdpf\" (UID: \"e591255a-6edc-40de-a50f-2f39d3e9bb59\") " pod="openstack/nova-cell0-cell-mapping-2cdpf" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.698605 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.736110 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2cdpf" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.768146 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.769577 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcm2r\" (UniqueName: \"kubernetes.io/projected/d7d92b38-1d4a-4d36-8c30-c40f747cef77-kube-api-access-mcm2r\") pod \"nova-api-0\" (UID: \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\") " pod="openstack/nova-api-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.769692 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5467292b-d832-437c-9f0f-41441d0da350-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5467292b-d832-437c-9f0f-41441d0da350\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.769779 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7d92b38-1d4a-4d36-8c30-c40f747cef77-logs\") pod \"nova-api-0\" (UID: \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\") " pod="openstack/nova-api-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.769864 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5467292b-d832-437c-9f0f-41441d0da350-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5467292b-d832-437c-9f0f-41441d0da350\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.770013 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4xpd\" (UniqueName: \"kubernetes.io/projected/5467292b-d832-437c-9f0f-41441d0da350-kube-api-access-s4xpd\") pod \"nova-cell1-novncproxy-0\" (UID: \"5467292b-d832-437c-9f0f-41441d0da350\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.769589 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.770226 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7d92b38-1d4a-4d36-8c30-c40f747cef77-config-data\") pod \"nova-api-0\" (UID: \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\") " pod="openstack/nova-api-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.770322 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7d92b38-1d4a-4d36-8c30-c40f747cef77-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\") " pod="openstack/nova-api-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.782773 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.795419 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.814133 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.815652 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.817272 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.840486 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.874187 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7d92b38-1d4a-4d36-8c30-c40f747cef77-config-data\") pod \"nova-api-0\" (UID: \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\") " pod="openstack/nova-api-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.874246 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7d92b38-1d4a-4d36-8c30-c40f747cef77-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\") " pod="openstack/nova-api-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.874278 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcm2r\" (UniqueName: \"kubernetes.io/projected/d7d92b38-1d4a-4d36-8c30-c40f747cef77-kube-api-access-mcm2r\") pod \"nova-api-0\" (UID: \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\") " pod="openstack/nova-api-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.874299 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5467292b-d832-437c-9f0f-41441d0da350-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5467292b-d832-437c-9f0f-41441d0da350\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.874319 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7d92b38-1d4a-4d36-8c30-c40f747cef77-logs\") pod \"nova-api-0\" (UID: \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\") " pod="openstack/nova-api-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.874346 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5467292b-d832-437c-9f0f-41441d0da350-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5467292b-d832-437c-9f0f-41441d0da350\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.874366 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2p9cf\" (UniqueName: \"kubernetes.io/projected/55aecb5f-899c-40e3-9651-0faedac5f801-kube-api-access-2p9cf\") pod \"nova-scheduler-0\" (UID: \"55aecb5f-899c-40e3-9651-0faedac5f801\") " pod="openstack/nova-scheduler-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.874401 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55aecb5f-899c-40e3-9651-0faedac5f801-config-data\") pod \"nova-scheduler-0\" (UID: \"55aecb5f-899c-40e3-9651-0faedac5f801\") " pod="openstack/nova-scheduler-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.874456 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55aecb5f-899c-40e3-9651-0faedac5f801-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"55aecb5f-899c-40e3-9651-0faedac5f801\") " pod="openstack/nova-scheduler-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.874478 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4xpd\" (UniqueName: \"kubernetes.io/projected/5467292b-d832-437c-9f0f-41441d0da350-kube-api-access-s4xpd\") pod \"nova-cell1-novncproxy-0\" (UID: \"5467292b-d832-437c-9f0f-41441d0da350\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.874971 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7d92b38-1d4a-4d36-8c30-c40f747cef77-logs\") pod \"nova-api-0\" (UID: \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\") " pod="openstack/nova-api-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.878860 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5467292b-d832-437c-9f0f-41441d0da350-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5467292b-d832-437c-9f0f-41441d0da350\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.881424 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7d92b38-1d4a-4d36-8c30-c40f747cef77-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\") " pod="openstack/nova-api-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.890800 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5467292b-d832-437c-9f0f-41441d0da350-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5467292b-d832-437c-9f0f-41441d0da350\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.890815 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7d92b38-1d4a-4d36-8c30-c40f747cef77-config-data\") pod \"nova-api-0\" (UID: \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\") " pod="openstack/nova-api-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.895618 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4xpd\" (UniqueName: \"kubernetes.io/projected/5467292b-d832-437c-9f0f-41441d0da350-kube-api-access-s4xpd\") pod \"nova-cell1-novncproxy-0\" (UID: \"5467292b-d832-437c-9f0f-41441d0da350\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.898430 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcm2r\" (UniqueName: \"kubernetes.io/projected/d7d92b38-1d4a-4d36-8c30-c40f747cef77-kube-api-access-mcm2r\") pod \"nova-api-0\" (UID: \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\") " pod="openstack/nova-api-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.919404 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-557bbc7df7-xxdtg"] Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.921029 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.941905 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-557bbc7df7-xxdtg"] Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.961877 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.976099 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba039abf-7ec7-43bb-ad72-e2abd0afe439-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\") " pod="openstack/nova-metadata-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.976194 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba039abf-7ec7-43bb-ad72-e2abd0afe439-logs\") pod \"nova-metadata-0\" (UID: \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\") " pod="openstack/nova-metadata-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.976307 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78wqq\" (UniqueName: \"kubernetes.io/projected/ba039abf-7ec7-43bb-ad72-e2abd0afe439-kube-api-access-78wqq\") pod \"nova-metadata-0\" (UID: \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\") " pod="openstack/nova-metadata-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.976375 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2p9cf\" (UniqueName: \"kubernetes.io/projected/55aecb5f-899c-40e3-9651-0faedac5f801-kube-api-access-2p9cf\") pod \"nova-scheduler-0\" (UID: \"55aecb5f-899c-40e3-9651-0faedac5f801\") " pod="openstack/nova-scheduler-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.976480 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55aecb5f-899c-40e3-9651-0faedac5f801-config-data\") pod \"nova-scheduler-0\" (UID: \"55aecb5f-899c-40e3-9651-0faedac5f801\") " pod="openstack/nova-scheduler-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.976611 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba039abf-7ec7-43bb-ad72-e2abd0afe439-config-data\") pod \"nova-metadata-0\" (UID: \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\") " pod="openstack/nova-metadata-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.976641 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55aecb5f-899c-40e3-9651-0faedac5f801-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"55aecb5f-899c-40e3-9651-0faedac5f801\") " pod="openstack/nova-scheduler-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.981796 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55aecb5f-899c-40e3-9651-0faedac5f801-config-data\") pod \"nova-scheduler-0\" (UID: \"55aecb5f-899c-40e3-9651-0faedac5f801\") " pod="openstack/nova-scheduler-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.982413 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55aecb5f-899c-40e3-9651-0faedac5f801-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"55aecb5f-899c-40e3-9651-0faedac5f801\") " pod="openstack/nova-scheduler-0" Jan 22 06:07:18 crc kubenswrapper[4933]: I0122 06:07:18.997505 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2p9cf\" (UniqueName: \"kubernetes.io/projected/55aecb5f-899c-40e3-9651-0faedac5f801-kube-api-access-2p9cf\") pod \"nova-scheduler-0\" (UID: \"55aecb5f-899c-40e3-9651-0faedac5f801\") " pod="openstack/nova-scheduler-0" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.080239 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-ovsdbserver-sb\") pod \"dnsmasq-dns-557bbc7df7-xxdtg\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.080294 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ct5k4\" (UniqueName: \"kubernetes.io/projected/0206df56-62b3-4d6a-87d3-2819fec42c00-kube-api-access-ct5k4\") pod \"dnsmasq-dns-557bbc7df7-xxdtg\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.080325 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78wqq\" (UniqueName: \"kubernetes.io/projected/ba039abf-7ec7-43bb-ad72-e2abd0afe439-kube-api-access-78wqq\") pod \"nova-metadata-0\" (UID: \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\") " pod="openstack/nova-metadata-0" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.080353 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-config\") pod \"dnsmasq-dns-557bbc7df7-xxdtg\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.080414 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba039abf-7ec7-43bb-ad72-e2abd0afe439-config-data\") pod \"nova-metadata-0\" (UID: \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\") " pod="openstack/nova-metadata-0" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.080433 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-dns-swift-storage-0\") pod \"dnsmasq-dns-557bbc7df7-xxdtg\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.080458 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba039abf-7ec7-43bb-ad72-e2abd0afe439-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\") " pod="openstack/nova-metadata-0" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.080486 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-ovsdbserver-nb\") pod \"dnsmasq-dns-557bbc7df7-xxdtg\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.084054 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-dns-svc\") pod \"dnsmasq-dns-557bbc7df7-xxdtg\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.084175 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba039abf-7ec7-43bb-ad72-e2abd0afe439-logs\") pod \"nova-metadata-0\" (UID: \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\") " pod="openstack/nova-metadata-0" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.087661 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba039abf-7ec7-43bb-ad72-e2abd0afe439-logs\") pod \"nova-metadata-0\" (UID: \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\") " pod="openstack/nova-metadata-0" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.092585 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba039abf-7ec7-43bb-ad72-e2abd0afe439-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\") " pod="openstack/nova-metadata-0" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.092989 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba039abf-7ec7-43bb-ad72-e2abd0afe439-config-data\") pod \"nova-metadata-0\" (UID: \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\") " pod="openstack/nova-metadata-0" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.103272 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78wqq\" (UniqueName: \"kubernetes.io/projected/ba039abf-7ec7-43bb-ad72-e2abd0afe439-kube-api-access-78wqq\") pod \"nova-metadata-0\" (UID: \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\") " pod="openstack/nova-metadata-0" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.174257 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.185511 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-dns-svc\") pod \"dnsmasq-dns-557bbc7df7-xxdtg\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.185598 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-ovsdbserver-sb\") pod \"dnsmasq-dns-557bbc7df7-xxdtg\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.185627 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ct5k4\" (UniqueName: \"kubernetes.io/projected/0206df56-62b3-4d6a-87d3-2819fec42c00-kube-api-access-ct5k4\") pod \"dnsmasq-dns-557bbc7df7-xxdtg\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.185661 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-config\") pod \"dnsmasq-dns-557bbc7df7-xxdtg\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.185713 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-dns-swift-storage-0\") pod \"dnsmasq-dns-557bbc7df7-xxdtg\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.185751 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-ovsdbserver-nb\") pod \"dnsmasq-dns-557bbc7df7-xxdtg\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.186749 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-ovsdbserver-nb\") pod \"dnsmasq-dns-557bbc7df7-xxdtg\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.187305 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-dns-svc\") pod \"dnsmasq-dns-557bbc7df7-xxdtg\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.187840 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-ovsdbserver-sb\") pod \"dnsmasq-dns-557bbc7df7-xxdtg\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.187864 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-config\") pod \"dnsmasq-dns-557bbc7df7-xxdtg\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.188430 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-dns-swift-storage-0\") pod \"dnsmasq-dns-557bbc7df7-xxdtg\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.192597 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.202243 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.213311 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ct5k4\" (UniqueName: \"kubernetes.io/projected/0206df56-62b3-4d6a-87d3-2819fec42c00-kube-api-access-ct5k4\") pod \"dnsmasq-dns-557bbc7df7-xxdtg\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.262147 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.307983 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-2cdpf"] Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.433375 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2cdpf" event={"ID":"e591255a-6edc-40de-a50f-2f39d3e9bb59","Type":"ContainerStarted","Data":"eecf5e6aa6b53e3a1692471b572464acf0d30fb0569d608fb61ead76de27253b"} Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.467611 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.476772 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-x8sfh"] Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.478299 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-x8sfh" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.481709 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.481766 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.491743 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9266f02b-3fef-4566-a9df-9b570f24d845-scripts\") pod \"nova-cell1-conductor-db-sync-x8sfh\" (UID: \"9266f02b-3fef-4566-a9df-9b570f24d845\") " pod="openstack/nova-cell1-conductor-db-sync-x8sfh" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.497433 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9266f02b-3fef-4566-a9df-9b570f24d845-config-data\") pod \"nova-cell1-conductor-db-sync-x8sfh\" (UID: \"9266f02b-3fef-4566-a9df-9b570f24d845\") " pod="openstack/nova-cell1-conductor-db-sync-x8sfh" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.498047 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9266f02b-3fef-4566-a9df-9b570f24d845-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-x8sfh\" (UID: \"9266f02b-3fef-4566-a9df-9b570f24d845\") " pod="openstack/nova-cell1-conductor-db-sync-x8sfh" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.498183 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78phw\" (UniqueName: \"kubernetes.io/projected/9266f02b-3fef-4566-a9df-9b570f24d845-kube-api-access-78phw\") pod \"nova-cell1-conductor-db-sync-x8sfh\" (UID: \"9266f02b-3fef-4566-a9df-9b570f24d845\") " pod="openstack/nova-cell1-conductor-db-sync-x8sfh" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.503700 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-x8sfh"] Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.556345 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.600335 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9266f02b-3fef-4566-a9df-9b570f24d845-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-x8sfh\" (UID: \"9266f02b-3fef-4566-a9df-9b570f24d845\") " pod="openstack/nova-cell1-conductor-db-sync-x8sfh" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.600538 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78phw\" (UniqueName: \"kubernetes.io/projected/9266f02b-3fef-4566-a9df-9b570f24d845-kube-api-access-78phw\") pod \"nova-cell1-conductor-db-sync-x8sfh\" (UID: \"9266f02b-3fef-4566-a9df-9b570f24d845\") " pod="openstack/nova-cell1-conductor-db-sync-x8sfh" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.600620 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9266f02b-3fef-4566-a9df-9b570f24d845-scripts\") pod \"nova-cell1-conductor-db-sync-x8sfh\" (UID: \"9266f02b-3fef-4566-a9df-9b570f24d845\") " pod="openstack/nova-cell1-conductor-db-sync-x8sfh" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.600646 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9266f02b-3fef-4566-a9df-9b570f24d845-config-data\") pod \"nova-cell1-conductor-db-sync-x8sfh\" (UID: \"9266f02b-3fef-4566-a9df-9b570f24d845\") " pod="openstack/nova-cell1-conductor-db-sync-x8sfh" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.606388 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9266f02b-3fef-4566-a9df-9b570f24d845-config-data\") pod \"nova-cell1-conductor-db-sync-x8sfh\" (UID: \"9266f02b-3fef-4566-a9df-9b570f24d845\") " pod="openstack/nova-cell1-conductor-db-sync-x8sfh" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.612541 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9266f02b-3fef-4566-a9df-9b570f24d845-scripts\") pod \"nova-cell1-conductor-db-sync-x8sfh\" (UID: \"9266f02b-3fef-4566-a9df-9b570f24d845\") " pod="openstack/nova-cell1-conductor-db-sync-x8sfh" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.613030 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9266f02b-3fef-4566-a9df-9b570f24d845-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-x8sfh\" (UID: \"9266f02b-3fef-4566-a9df-9b570f24d845\") " pod="openstack/nova-cell1-conductor-db-sync-x8sfh" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.619290 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78phw\" (UniqueName: \"kubernetes.io/projected/9266f02b-3fef-4566-a9df-9b570f24d845-kube-api-access-78phw\") pod \"nova-cell1-conductor-db-sync-x8sfh\" (UID: \"9266f02b-3fef-4566-a9df-9b570f24d845\") " pod="openstack/nova-cell1-conductor-db-sync-x8sfh" Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.827818 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-x8sfh" Jan 22 06:07:19 crc kubenswrapper[4933]: W0122 06:07:19.837307 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55aecb5f_899c_40e3_9651_0faedac5f801.slice/crio-9cfd15393b7e1a71b468baabe1e2e0935069bf33d0506df2b515aa3d9762d5c9 WatchSource:0}: Error finding container 9cfd15393b7e1a71b468baabe1e2e0935069bf33d0506df2b515aa3d9762d5c9: Status 404 returned error can't find the container with id 9cfd15393b7e1a71b468baabe1e2e0935069bf33d0506df2b515aa3d9762d5c9 Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.853890 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.919555 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 06:07:19 crc kubenswrapper[4933]: I0122 06:07:19.997547 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-557bbc7df7-xxdtg"] Jan 22 06:07:20 crc kubenswrapper[4933]: W0122 06:07:20.012602 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0206df56_62b3_4d6a_87d3_2819fec42c00.slice/crio-b0e34ad26ad2323dab47af2977a844caa7ef407dc67659a452f2b11588c07d5e WatchSource:0}: Error finding container b0e34ad26ad2323dab47af2977a844caa7ef407dc67659a452f2b11588c07d5e: Status 404 returned error can't find the container with id b0e34ad26ad2323dab47af2977a844caa7ef407dc67659a452f2b11588c07d5e Jan 22 06:07:20 crc kubenswrapper[4933]: I0122 06:07:20.336756 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-x8sfh"] Jan 22 06:07:20 crc kubenswrapper[4933]: W0122 06:07:20.345409 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9266f02b_3fef_4566_a9df_9b570f24d845.slice/crio-5507701f3394ae59cee1301e7193d193945cf8de1097ef531c79eada3cc4f65f WatchSource:0}: Error finding container 5507701f3394ae59cee1301e7193d193945cf8de1097ef531c79eada3cc4f65f: Status 404 returned error can't find the container with id 5507701f3394ae59cee1301e7193d193945cf8de1097ef531c79eada3cc4f65f Jan 22 06:07:20 crc kubenswrapper[4933]: I0122 06:07:20.447823 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2cdpf" event={"ID":"e591255a-6edc-40de-a50f-2f39d3e9bb59","Type":"ContainerStarted","Data":"17cce92623155f7d8d1837e57204a6dfb1d65a9db999fe215e989e68c83edf20"} Jan 22 06:07:20 crc kubenswrapper[4933]: I0122 06:07:20.449578 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ba039abf-7ec7-43bb-ad72-e2abd0afe439","Type":"ContainerStarted","Data":"efbed6b40df2f8cae8a1f558d55aea9a73af23f1a7b96a2a23f2b5d8035b6bd1"} Jan 22 06:07:20 crc kubenswrapper[4933]: I0122 06:07:20.451833 4933 generic.go:334] "Generic (PLEG): container finished" podID="0206df56-62b3-4d6a-87d3-2819fec42c00" containerID="7be37068f9187d5ae31e28aa151800275f8ca0245ea157c1c68ec74d1cd203a1" exitCode=0 Jan 22 06:07:20 crc kubenswrapper[4933]: I0122 06:07:20.452130 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" event={"ID":"0206df56-62b3-4d6a-87d3-2819fec42c00","Type":"ContainerDied","Data":"7be37068f9187d5ae31e28aa151800275f8ca0245ea157c1c68ec74d1cd203a1"} Jan 22 06:07:20 crc kubenswrapper[4933]: I0122 06:07:20.452201 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" event={"ID":"0206df56-62b3-4d6a-87d3-2819fec42c00","Type":"ContainerStarted","Data":"b0e34ad26ad2323dab47af2977a844caa7ef407dc67659a452f2b11588c07d5e"} Jan 22 06:07:20 crc kubenswrapper[4933]: I0122 06:07:20.455571 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-x8sfh" event={"ID":"9266f02b-3fef-4566-a9df-9b570f24d845","Type":"ContainerStarted","Data":"5507701f3394ae59cee1301e7193d193945cf8de1097ef531c79eada3cc4f65f"} Jan 22 06:07:20 crc kubenswrapper[4933]: I0122 06:07:20.458253 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5467292b-d832-437c-9f0f-41441d0da350","Type":"ContainerStarted","Data":"f917335bf16d6fbe5b484234a7213aa3f38a311d890e4b01783c8d240a895605"} Jan 22 06:07:20 crc kubenswrapper[4933]: I0122 06:07:20.459808 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"55aecb5f-899c-40e3-9651-0faedac5f801","Type":"ContainerStarted","Data":"9cfd15393b7e1a71b468baabe1e2e0935069bf33d0506df2b515aa3d9762d5c9"} Jan 22 06:07:20 crc kubenswrapper[4933]: I0122 06:07:20.487191 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d7d92b38-1d4a-4d36-8c30-c40f747cef77","Type":"ContainerStarted","Data":"530ba728027c0b9cea62bf7cdd0a6a958475c8a2dace288d946f2bed2f664093"} Jan 22 06:07:20 crc kubenswrapper[4933]: I0122 06:07:20.496274 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-2cdpf" podStartSLOduration=2.49625336 podStartE2EDuration="2.49625336s" podCreationTimestamp="2026-01-22 06:07:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:07:20.463129388 +0000 UTC m=+1288.300254751" watchObservedRunningTime="2026-01-22 06:07:20.49625336 +0000 UTC m=+1288.333378713" Jan 22 06:07:21 crc kubenswrapper[4933]: I0122 06:07:21.516361 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" event={"ID":"0206df56-62b3-4d6a-87d3-2819fec42c00","Type":"ContainerStarted","Data":"2e704c0292519dfd8a922c623d94f8f05284f973119ecb1d6554c36fb2c2f49f"} Jan 22 06:07:21 crc kubenswrapper[4933]: I0122 06:07:21.517664 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:21 crc kubenswrapper[4933]: I0122 06:07:21.521582 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-x8sfh" event={"ID":"9266f02b-3fef-4566-a9df-9b570f24d845","Type":"ContainerStarted","Data":"cb606c1f6f736102fcf0079ab6c6d4f8465529171bfeb5a249a5ba85855f1bce"} Jan 22 06:07:21 crc kubenswrapper[4933]: I0122 06:07:21.533663 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" podStartSLOduration=3.533645269 podStartE2EDuration="3.533645269s" podCreationTimestamp="2026-01-22 06:07:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:07:21.532630408 +0000 UTC m=+1289.369755761" watchObservedRunningTime="2026-01-22 06:07:21.533645269 +0000 UTC m=+1289.370770622" Jan 22 06:07:21 crc kubenswrapper[4933]: I0122 06:07:21.560320 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-x8sfh" podStartSLOduration=2.560294137 podStartE2EDuration="2.560294137s" podCreationTimestamp="2026-01-22 06:07:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:07:21.554262803 +0000 UTC m=+1289.391388176" watchObservedRunningTime="2026-01-22 06:07:21.560294137 +0000 UTC m=+1289.397419490" Jan 22 06:07:22 crc kubenswrapper[4933]: I0122 06:07:22.096384 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 06:07:22 crc kubenswrapper[4933]: I0122 06:07:22.104733 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 06:07:23 crc kubenswrapper[4933]: I0122 06:07:23.550144 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ba039abf-7ec7-43bb-ad72-e2abd0afe439","Type":"ContainerStarted","Data":"5b9c14d83c800061f8051520912f10415507b56ca2e8aee72a7d1ee8a0ebee79"} Jan 22 06:07:23 crc kubenswrapper[4933]: I0122 06:07:23.550197 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ba039abf-7ec7-43bb-ad72-e2abd0afe439","Type":"ContainerStarted","Data":"c5ed2708b994d64972c77f751483b57c4a7c6592049bb6b8066c9ae126dd2fe3"} Jan 22 06:07:23 crc kubenswrapper[4933]: I0122 06:07:23.550360 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ba039abf-7ec7-43bb-ad72-e2abd0afe439" containerName="nova-metadata-log" containerID="cri-o://c5ed2708b994d64972c77f751483b57c4a7c6592049bb6b8066c9ae126dd2fe3" gracePeriod=30 Jan 22 06:07:23 crc kubenswrapper[4933]: I0122 06:07:23.550874 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ba039abf-7ec7-43bb-ad72-e2abd0afe439" containerName="nova-metadata-metadata" containerID="cri-o://5b9c14d83c800061f8051520912f10415507b56ca2e8aee72a7d1ee8a0ebee79" gracePeriod=30 Jan 22 06:07:23 crc kubenswrapper[4933]: I0122 06:07:23.555369 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5467292b-d832-437c-9f0f-41441d0da350","Type":"ContainerStarted","Data":"61f6493fabb950de8e2af3aae3dd7bf181fc3faf59f89e9a6161a8bab9d020e0"} Jan 22 06:07:23 crc kubenswrapper[4933]: I0122 06:07:23.555421 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="5467292b-d832-437c-9f0f-41441d0da350" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://61f6493fabb950de8e2af3aae3dd7bf181fc3faf59f89e9a6161a8bab9d020e0" gracePeriod=30 Jan 22 06:07:23 crc kubenswrapper[4933]: I0122 06:07:23.560145 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"55aecb5f-899c-40e3-9651-0faedac5f801","Type":"ContainerStarted","Data":"b22c6743199404c4213ddd23786b457d932b0b9434a7c83fd6bb4caba5288b2f"} Jan 22 06:07:23 crc kubenswrapper[4933]: I0122 06:07:23.564892 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d7d92b38-1d4a-4d36-8c30-c40f747cef77","Type":"ContainerStarted","Data":"c4ede8958fee860753a7531c5a9ee477d9d8860375af7d1500aed807573e8e28"} Jan 22 06:07:23 crc kubenswrapper[4933]: I0122 06:07:23.564949 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d7d92b38-1d4a-4d36-8c30-c40f747cef77","Type":"ContainerStarted","Data":"e3d8d815687c37963ff352278f6cf1f413a293e625d5ac101a5c5cfcc10d8fdb"} Jan 22 06:07:23 crc kubenswrapper[4933]: I0122 06:07:23.579423 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.638966305 podStartE2EDuration="5.579401019s" podCreationTimestamp="2026-01-22 06:07:18 +0000 UTC" firstStartedPulling="2026-01-22 06:07:19.930984372 +0000 UTC m=+1287.768109725" lastFinishedPulling="2026-01-22 06:07:22.871419056 +0000 UTC m=+1290.708544439" observedRunningTime="2026-01-22 06:07:23.571212851 +0000 UTC m=+1291.408338204" watchObservedRunningTime="2026-01-22 06:07:23.579401019 +0000 UTC m=+1291.416526382" Jan 22 06:07:23 crc kubenswrapper[4933]: I0122 06:07:23.599057 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.229848659 podStartE2EDuration="5.599037173s" podCreationTimestamp="2026-01-22 06:07:18 +0000 UTC" firstStartedPulling="2026-01-22 06:07:19.502778904 +0000 UTC m=+1287.339904257" lastFinishedPulling="2026-01-22 06:07:22.871967418 +0000 UTC m=+1290.709092771" observedRunningTime="2026-01-22 06:07:23.587371974 +0000 UTC m=+1291.424497337" watchObservedRunningTime="2026-01-22 06:07:23.599037173 +0000 UTC m=+1291.436162526" Jan 22 06:07:23 crc kubenswrapper[4933]: I0122 06:07:23.616125 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.304578117 podStartE2EDuration="5.616107534s" podCreationTimestamp="2026-01-22 06:07:18 +0000 UTC" firstStartedPulling="2026-01-22 06:07:19.560435401 +0000 UTC m=+1287.397560754" lastFinishedPulling="2026-01-22 06:07:22.871964818 +0000 UTC m=+1290.709090171" observedRunningTime="2026-01-22 06:07:23.601976534 +0000 UTC m=+1291.439101887" watchObservedRunningTime="2026-01-22 06:07:23.616107534 +0000 UTC m=+1291.453232887" Jan 22 06:07:23 crc kubenswrapper[4933]: I0122 06:07:23.618291 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.59738963 podStartE2EDuration="5.618272269s" podCreationTimestamp="2026-01-22 06:07:18 +0000 UTC" firstStartedPulling="2026-01-22 06:07:19.856436269 +0000 UTC m=+1287.693561632" lastFinishedPulling="2026-01-22 06:07:22.877318888 +0000 UTC m=+1290.714444271" observedRunningTime="2026-01-22 06:07:23.617091575 +0000 UTC m=+1291.454216928" watchObservedRunningTime="2026-01-22 06:07:23.618272269 +0000 UTC m=+1291.455397612" Jan 22 06:07:24 crc kubenswrapper[4933]: I0122 06:07:24.175349 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:24 crc kubenswrapper[4933]: I0122 06:07:24.193042 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 22 06:07:24 crc kubenswrapper[4933]: I0122 06:07:24.202797 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 06:07:24 crc kubenswrapper[4933]: I0122 06:07:24.202863 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 06:07:24 crc kubenswrapper[4933]: I0122 06:07:24.592908 4933 generic.go:334] "Generic (PLEG): container finished" podID="ba039abf-7ec7-43bb-ad72-e2abd0afe439" containerID="c5ed2708b994d64972c77f751483b57c4a7c6592049bb6b8066c9ae126dd2fe3" exitCode=143 Jan 22 06:07:24 crc kubenswrapper[4933]: I0122 06:07:24.593006 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ba039abf-7ec7-43bb-ad72-e2abd0afe439","Type":"ContainerDied","Data":"c5ed2708b994d64972c77f751483b57c4a7c6592049bb6b8066c9ae126dd2fe3"} Jan 22 06:07:27 crc kubenswrapper[4933]: I0122 06:07:27.625925 4933 generic.go:334] "Generic (PLEG): container finished" podID="e591255a-6edc-40de-a50f-2f39d3e9bb59" containerID="17cce92623155f7d8d1837e57204a6dfb1d65a9db999fe215e989e68c83edf20" exitCode=0 Jan 22 06:07:27 crc kubenswrapper[4933]: I0122 06:07:27.626388 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2cdpf" event={"ID":"e591255a-6edc-40de-a50f-2f39d3e9bb59","Type":"ContainerDied","Data":"17cce92623155f7d8d1837e57204a6dfb1d65a9db999fe215e989e68c83edf20"} Jan 22 06:07:27 crc kubenswrapper[4933]: I0122 06:07:27.629057 4933 generic.go:334] "Generic (PLEG): container finished" podID="9266f02b-3fef-4566-a9df-9b570f24d845" containerID="cb606c1f6f736102fcf0079ab6c6d4f8465529171bfeb5a249a5ba85855f1bce" exitCode=0 Jan 22 06:07:27 crc kubenswrapper[4933]: I0122 06:07:27.629150 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-x8sfh" event={"ID":"9266f02b-3fef-4566-a9df-9b570f24d845","Type":"ContainerDied","Data":"cb606c1f6f736102fcf0079ab6c6d4f8465529171bfeb5a249a5ba85855f1bce"} Jan 22 06:07:28 crc kubenswrapper[4933]: I0122 06:07:28.963556 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 06:07:28 crc kubenswrapper[4933]: I0122 06:07:28.963969 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.123679 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-x8sfh" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.129325 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2cdpf" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.192987 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.221824 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.253612 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9266f02b-3fef-4566-a9df-9b570f24d845-config-data\") pod \"9266f02b-3fef-4566-a9df-9b570f24d845\" (UID: \"9266f02b-3fef-4566-a9df-9b570f24d845\") " Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.253803 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qjk2f\" (UniqueName: \"kubernetes.io/projected/e591255a-6edc-40de-a50f-2f39d3e9bb59-kube-api-access-qjk2f\") pod \"e591255a-6edc-40de-a50f-2f39d3e9bb59\" (UID: \"e591255a-6edc-40de-a50f-2f39d3e9bb59\") " Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.253832 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e591255a-6edc-40de-a50f-2f39d3e9bb59-scripts\") pod \"e591255a-6edc-40de-a50f-2f39d3e9bb59\" (UID: \"e591255a-6edc-40de-a50f-2f39d3e9bb59\") " Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.253878 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e591255a-6edc-40de-a50f-2f39d3e9bb59-config-data\") pod \"e591255a-6edc-40de-a50f-2f39d3e9bb59\" (UID: \"e591255a-6edc-40de-a50f-2f39d3e9bb59\") " Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.253919 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9266f02b-3fef-4566-a9df-9b570f24d845-combined-ca-bundle\") pod \"9266f02b-3fef-4566-a9df-9b570f24d845\" (UID: \"9266f02b-3fef-4566-a9df-9b570f24d845\") " Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.253955 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e591255a-6edc-40de-a50f-2f39d3e9bb59-combined-ca-bundle\") pod \"e591255a-6edc-40de-a50f-2f39d3e9bb59\" (UID: \"e591255a-6edc-40de-a50f-2f39d3e9bb59\") " Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.253993 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9266f02b-3fef-4566-a9df-9b570f24d845-scripts\") pod \"9266f02b-3fef-4566-a9df-9b570f24d845\" (UID: \"9266f02b-3fef-4566-a9df-9b570f24d845\") " Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.254035 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78phw\" (UniqueName: \"kubernetes.io/projected/9266f02b-3fef-4566-a9df-9b570f24d845-kube-api-access-78phw\") pod \"9266f02b-3fef-4566-a9df-9b570f24d845\" (UID: \"9266f02b-3fef-4566-a9df-9b570f24d845\") " Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.262410 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e591255a-6edc-40de-a50f-2f39d3e9bb59-kube-api-access-qjk2f" (OuterVolumeSpecName: "kube-api-access-qjk2f") pod "e591255a-6edc-40de-a50f-2f39d3e9bb59" (UID: "e591255a-6edc-40de-a50f-2f39d3e9bb59"). InnerVolumeSpecName "kube-api-access-qjk2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.264794 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e591255a-6edc-40de-a50f-2f39d3e9bb59-scripts" (OuterVolumeSpecName: "scripts") pod "e591255a-6edc-40de-a50f-2f39d3e9bb59" (UID: "e591255a-6edc-40de-a50f-2f39d3e9bb59"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.267246 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.279069 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9266f02b-3fef-4566-a9df-9b570f24d845-kube-api-access-78phw" (OuterVolumeSpecName: "kube-api-access-78phw") pod "9266f02b-3fef-4566-a9df-9b570f24d845" (UID: "9266f02b-3fef-4566-a9df-9b570f24d845"). InnerVolumeSpecName "kube-api-access-78phw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.279226 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9266f02b-3fef-4566-a9df-9b570f24d845-scripts" (OuterVolumeSpecName: "scripts") pod "9266f02b-3fef-4566-a9df-9b570f24d845" (UID: "9266f02b-3fef-4566-a9df-9b570f24d845"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.354331 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9266f02b-3fef-4566-a9df-9b570f24d845-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9266f02b-3fef-4566-a9df-9b570f24d845" (UID: "9266f02b-3fef-4566-a9df-9b570f24d845"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.355978 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e591255a-6edc-40de-a50f-2f39d3e9bb59-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e591255a-6edc-40de-a50f-2f39d3e9bb59" (UID: "e591255a-6edc-40de-a50f-2f39d3e9bb59"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.357872 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9266f02b-3fef-4566-a9df-9b570f24d845-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.357898 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e591255a-6edc-40de-a50f-2f39d3e9bb59-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.357908 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9266f02b-3fef-4566-a9df-9b570f24d845-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.357917 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78phw\" (UniqueName: \"kubernetes.io/projected/9266f02b-3fef-4566-a9df-9b570f24d845-kube-api-access-78phw\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.357927 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qjk2f\" (UniqueName: \"kubernetes.io/projected/e591255a-6edc-40de-a50f-2f39d3e9bb59-kube-api-access-qjk2f\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.357935 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e591255a-6edc-40de-a50f-2f39d3e9bb59-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.368579 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e591255a-6edc-40de-a50f-2f39d3e9bb59-config-data" (OuterVolumeSpecName: "config-data") pod "e591255a-6edc-40de-a50f-2f39d3e9bb59" (UID: "e591255a-6edc-40de-a50f-2f39d3e9bb59"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.369769 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75bfc9b94f-blh75"] Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.370235 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" podUID="86d22942-518c-4716-86a2-b0781a2d92ca" containerName="dnsmasq-dns" containerID="cri-o://c9ac846d183cf7936d4607fb4fd576371288fbaf8691ecb2feed707aa3fe2558" gracePeriod=10 Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.391062 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9266f02b-3fef-4566-a9df-9b570f24d845-config-data" (OuterVolumeSpecName: "config-data") pod "9266f02b-3fef-4566-a9df-9b570f24d845" (UID: "9266f02b-3fef-4566-a9df-9b570f24d845"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.460252 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e591255a-6edc-40de-a50f-2f39d3e9bb59-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.460281 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9266f02b-3fef-4566-a9df-9b570f24d845-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.654860 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2cdpf" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.655054 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2cdpf" event={"ID":"e591255a-6edc-40de-a50f-2f39d3e9bb59","Type":"ContainerDied","Data":"eecf5e6aa6b53e3a1692471b572464acf0d30fb0569d608fb61ead76de27253b"} Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.655568 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eecf5e6aa6b53e3a1692471b572464acf0d30fb0569d608fb61ead76de27253b" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.659168 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-x8sfh" event={"ID":"9266f02b-3fef-4566-a9df-9b570f24d845","Type":"ContainerDied","Data":"5507701f3394ae59cee1301e7193d193945cf8de1097ef531c79eada3cc4f65f"} Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.659218 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5507701f3394ae59cee1301e7193d193945cf8de1097ef531c79eada3cc4f65f" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.659278 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-x8sfh" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.702641 4933 generic.go:334] "Generic (PLEG): container finished" podID="86d22942-518c-4716-86a2-b0781a2d92ca" containerID="c9ac846d183cf7936d4607fb4fd576371288fbaf8691ecb2feed707aa3fe2558" exitCode=0 Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.703562 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" event={"ID":"86d22942-518c-4716-86a2-b0781a2d92ca","Type":"ContainerDied","Data":"c9ac846d183cf7936d4607fb4fd576371288fbaf8691ecb2feed707aa3fe2558"} Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.750196 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.755514 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 22 06:07:29 crc kubenswrapper[4933]: E0122 06:07:29.756152 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9266f02b-3fef-4566-a9df-9b570f24d845" containerName="nova-cell1-conductor-db-sync" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.756198 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9266f02b-3fef-4566-a9df-9b570f24d845" containerName="nova-cell1-conductor-db-sync" Jan 22 06:07:29 crc kubenswrapper[4933]: E0122 06:07:29.756242 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e591255a-6edc-40de-a50f-2f39d3e9bb59" containerName="nova-manage" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.756251 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e591255a-6edc-40de-a50f-2f39d3e9bb59" containerName="nova-manage" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.756562 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="e591255a-6edc-40de-a50f-2f39d3e9bb59" containerName="nova-manage" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.756611 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="9266f02b-3fef-4566-a9df-9b570f24d845" containerName="nova-cell1-conductor-db-sync" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.759402 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.773918 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.782902 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.794508 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kp2dj\" (UniqueName: \"kubernetes.io/projected/2a218455-793d-4ccf-880a-d89b28e98b2d-kube-api-access-kp2dj\") pod \"nova-cell1-conductor-0\" (UID: \"2a218455-793d-4ccf-880a-d89b28e98b2d\") " pod="openstack/nova-cell1-conductor-0" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.796241 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a218455-793d-4ccf-880a-d89b28e98b2d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"2a218455-793d-4ccf-880a-d89b28e98b2d\") " pod="openstack/nova-cell1-conductor-0" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.796362 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a218455-793d-4ccf-880a-d89b28e98b2d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"2a218455-793d-4ccf-880a-d89b28e98b2d\") " pod="openstack/nova-cell1-conductor-0" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.873842 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.874354 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d7d92b38-1d4a-4d36-8c30-c40f747cef77" containerName="nova-api-log" containerID="cri-o://e3d8d815687c37963ff352278f6cf1f413a293e625d5ac101a5c5cfcc10d8fdb" gracePeriod=30 Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.875033 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d7d92b38-1d4a-4d36-8c30-c40f747cef77" containerName="nova-api-api" containerID="cri-o://c4ede8958fee860753a7531c5a9ee477d9d8860375af7d1500aed807573e8e28" gracePeriod=30 Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.881702 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d7d92b38-1d4a-4d36-8c30-c40f747cef77" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.186:8774/\": EOF" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.882190 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d7d92b38-1d4a-4d36-8c30-c40f747cef77" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.186:8774/\": EOF" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.910983 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a218455-793d-4ccf-880a-d89b28e98b2d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"2a218455-793d-4ccf-880a-d89b28e98b2d\") " pod="openstack/nova-cell1-conductor-0" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.911019 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a218455-793d-4ccf-880a-d89b28e98b2d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"2a218455-793d-4ccf-880a-d89b28e98b2d\") " pod="openstack/nova-cell1-conductor-0" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.911398 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kp2dj\" (UniqueName: \"kubernetes.io/projected/2a218455-793d-4ccf-880a-d89b28e98b2d-kube-api-access-kp2dj\") pod \"nova-cell1-conductor-0\" (UID: \"2a218455-793d-4ccf-880a-d89b28e98b2d\") " pod="openstack/nova-cell1-conductor-0" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.917715 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a218455-793d-4ccf-880a-d89b28e98b2d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"2a218455-793d-4ccf-880a-d89b28e98b2d\") " pod="openstack/nova-cell1-conductor-0" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.919489 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a218455-793d-4ccf-880a-d89b28e98b2d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"2a218455-793d-4ccf-880a-d89b28e98b2d\") " pod="openstack/nova-cell1-conductor-0" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.927866 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:07:29 crc kubenswrapper[4933]: I0122 06:07:29.933620 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kp2dj\" (UniqueName: \"kubernetes.io/projected/2a218455-793d-4ccf-880a-d89b28e98b2d-kube-api-access-kp2dj\") pod \"nova-cell1-conductor-0\" (UID: \"2a218455-793d-4ccf-880a-d89b28e98b2d\") " pod="openstack/nova-cell1-conductor-0" Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.017712 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xm5z9\" (UniqueName: \"kubernetes.io/projected/86d22942-518c-4716-86a2-b0781a2d92ca-kube-api-access-xm5z9\") pod \"86d22942-518c-4716-86a2-b0781a2d92ca\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.017768 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-dns-svc\") pod \"86d22942-518c-4716-86a2-b0781a2d92ca\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.017853 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-ovsdbserver-nb\") pod \"86d22942-518c-4716-86a2-b0781a2d92ca\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.017922 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-config\") pod \"86d22942-518c-4716-86a2-b0781a2d92ca\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.017971 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-dns-swift-storage-0\") pod \"86d22942-518c-4716-86a2-b0781a2d92ca\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.017988 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-ovsdbserver-sb\") pod \"86d22942-518c-4716-86a2-b0781a2d92ca\" (UID: \"86d22942-518c-4716-86a2-b0781a2d92ca\") " Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.040386 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86d22942-518c-4716-86a2-b0781a2d92ca-kube-api-access-xm5z9" (OuterVolumeSpecName: "kube-api-access-xm5z9") pod "86d22942-518c-4716-86a2-b0781a2d92ca" (UID: "86d22942-518c-4716-86a2-b0781a2d92ca"). InnerVolumeSpecName "kube-api-access-xm5z9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.074486 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "86d22942-518c-4716-86a2-b0781a2d92ca" (UID: "86d22942-518c-4716-86a2-b0781a2d92ca"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.076625 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-config" (OuterVolumeSpecName: "config") pod "86d22942-518c-4716-86a2-b0781a2d92ca" (UID: "86d22942-518c-4716-86a2-b0781a2d92ca"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.089537 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "86d22942-518c-4716-86a2-b0781a2d92ca" (UID: "86d22942-518c-4716-86a2-b0781a2d92ca"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.090373 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "86d22942-518c-4716-86a2-b0781a2d92ca" (UID: "86d22942-518c-4716-86a2-b0781a2d92ca"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.095771 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "86d22942-518c-4716-86a2-b0781a2d92ca" (UID: "86d22942-518c-4716-86a2-b0781a2d92ca"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.120117 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.120168 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.120181 4933 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.120194 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.120205 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xm5z9\" (UniqueName: \"kubernetes.io/projected/86d22942-518c-4716-86a2-b0781a2d92ca-kube-api-access-xm5z9\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.120216 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86d22942-518c-4716-86a2-b0781a2d92ca-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.171933 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.284814 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 06:07:30 crc kubenswrapper[4933]: W0122 06:07:30.668585 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a218455_793d_4ccf_880a_d89b28e98b2d.slice/crio-e4722e0c49280f70153bac74392b77a6e18349fe1106d512c16e872d54701fc8 WatchSource:0}: Error finding container e4722e0c49280f70153bac74392b77a6e18349fe1106d512c16e872d54701fc8: Status 404 returned error can't find the container with id e4722e0c49280f70153bac74392b77a6e18349fe1106d512c16e872d54701fc8 Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.669469 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.712967 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"2a218455-793d-4ccf-880a-d89b28e98b2d","Type":"ContainerStarted","Data":"e4722e0c49280f70153bac74392b77a6e18349fe1106d512c16e872d54701fc8"} Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.716192 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" event={"ID":"86d22942-518c-4716-86a2-b0781a2d92ca","Type":"ContainerDied","Data":"9a00094d926c974b70a8ff269ce55628626015f0326b1e66813b44983ea70e85"} Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.716244 4933 scope.go:117] "RemoveContainer" containerID="c9ac846d183cf7936d4607fb4fd576371288fbaf8691ecb2feed707aa3fe2558" Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.716418 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75bfc9b94f-blh75" Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.723942 4933 generic.go:334] "Generic (PLEG): container finished" podID="d7d92b38-1d4a-4d36-8c30-c40f747cef77" containerID="e3d8d815687c37963ff352278f6cf1f413a293e625d5ac101a5c5cfcc10d8fdb" exitCode=143 Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.724238 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d7d92b38-1d4a-4d36-8c30-c40f747cef77","Type":"ContainerDied","Data":"e3d8d815687c37963ff352278f6cf1f413a293e625d5ac101a5c5cfcc10d8fdb"} Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.745809 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75bfc9b94f-blh75"] Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.754973 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75bfc9b94f-blh75"] Jan 22 06:07:30 crc kubenswrapper[4933]: I0122 06:07:30.756205 4933 scope.go:117] "RemoveContainer" containerID="1f225a85114ce01f83ebbce384017c80893d68c6189a9dd8b7f41d67a05d045d" Jan 22 06:07:31 crc kubenswrapper[4933]: I0122 06:07:31.732963 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"2a218455-793d-4ccf-880a-d89b28e98b2d","Type":"ContainerStarted","Data":"38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6"} Jan 22 06:07:31 crc kubenswrapper[4933]: I0122 06:07:31.733029 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 22 06:07:31 crc kubenswrapper[4933]: I0122 06:07:31.734921 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="55aecb5f-899c-40e3-9651-0faedac5f801" containerName="nova-scheduler-scheduler" containerID="cri-o://b22c6743199404c4213ddd23786b457d932b0b9434a7c83fd6bb4caba5288b2f" gracePeriod=30 Jan 22 06:07:31 crc kubenswrapper[4933]: I0122 06:07:31.750272 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.750253082 podStartE2EDuration="2.750253082s" podCreationTimestamp="2026-01-22 06:07:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:07:31.74534562 +0000 UTC m=+1299.582470973" watchObservedRunningTime="2026-01-22 06:07:31.750253082 +0000 UTC m=+1299.587378435" Jan 22 06:07:32 crc kubenswrapper[4933]: I0122 06:07:32.500580 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86d22942-518c-4716-86a2-b0781a2d92ca" path="/var/lib/kubelet/pods/86d22942-518c-4716-86a2-b0781a2d92ca/volumes" Jan 22 06:07:34 crc kubenswrapper[4933]: E0122 06:07:34.196308 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b22c6743199404c4213ddd23786b457d932b0b9434a7c83fd6bb4caba5288b2f" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 22 06:07:34 crc kubenswrapper[4933]: E0122 06:07:34.198693 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b22c6743199404c4213ddd23786b457d932b0b9434a7c83fd6bb4caba5288b2f" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 22 06:07:34 crc kubenswrapper[4933]: E0122 06:07:34.200474 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b22c6743199404c4213ddd23786b457d932b0b9434a7c83fd6bb4caba5288b2f" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 22 06:07:34 crc kubenswrapper[4933]: E0122 06:07:34.200537 4933 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="55aecb5f-899c-40e3-9651-0faedac5f801" containerName="nova-scheduler-scheduler" Jan 22 06:07:34 crc kubenswrapper[4933]: I0122 06:07:34.771353 4933 generic.go:334] "Generic (PLEG): container finished" podID="55aecb5f-899c-40e3-9651-0faedac5f801" containerID="b22c6743199404c4213ddd23786b457d932b0b9434a7c83fd6bb4caba5288b2f" exitCode=0 Jan 22 06:07:34 crc kubenswrapper[4933]: I0122 06:07:34.771584 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"55aecb5f-899c-40e3-9651-0faedac5f801","Type":"ContainerDied","Data":"b22c6743199404c4213ddd23786b457d932b0b9434a7c83fd6bb4caba5288b2f"} Jan 22 06:07:34 crc kubenswrapper[4933]: I0122 06:07:34.771679 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"55aecb5f-899c-40e3-9651-0faedac5f801","Type":"ContainerDied","Data":"9cfd15393b7e1a71b468baabe1e2e0935069bf33d0506df2b515aa3d9762d5c9"} Jan 22 06:07:34 crc kubenswrapper[4933]: I0122 06:07:34.771695 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9cfd15393b7e1a71b468baabe1e2e0935069bf33d0506df2b515aa3d9762d5c9" Jan 22 06:07:34 crc kubenswrapper[4933]: I0122 06:07:34.831577 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 06:07:34 crc kubenswrapper[4933]: I0122 06:07:34.925013 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 22 06:07:34 crc kubenswrapper[4933]: I0122 06:07:34.963654 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2p9cf\" (UniqueName: \"kubernetes.io/projected/55aecb5f-899c-40e3-9651-0faedac5f801-kube-api-access-2p9cf\") pod \"55aecb5f-899c-40e3-9651-0faedac5f801\" (UID: \"55aecb5f-899c-40e3-9651-0faedac5f801\") " Jan 22 06:07:34 crc kubenswrapper[4933]: I0122 06:07:34.963891 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55aecb5f-899c-40e3-9651-0faedac5f801-combined-ca-bundle\") pod \"55aecb5f-899c-40e3-9651-0faedac5f801\" (UID: \"55aecb5f-899c-40e3-9651-0faedac5f801\") " Jan 22 06:07:34 crc kubenswrapper[4933]: I0122 06:07:34.963948 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55aecb5f-899c-40e3-9651-0faedac5f801-config-data\") pod \"55aecb5f-899c-40e3-9651-0faedac5f801\" (UID: \"55aecb5f-899c-40e3-9651-0faedac5f801\") " Jan 22 06:07:34 crc kubenswrapper[4933]: I0122 06:07:34.970319 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55aecb5f-899c-40e3-9651-0faedac5f801-kube-api-access-2p9cf" (OuterVolumeSpecName: "kube-api-access-2p9cf") pod "55aecb5f-899c-40e3-9651-0faedac5f801" (UID: "55aecb5f-899c-40e3-9651-0faedac5f801"). InnerVolumeSpecName "kube-api-access-2p9cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:07:34 crc kubenswrapper[4933]: I0122 06:07:34.995155 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55aecb5f-899c-40e3-9651-0faedac5f801-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "55aecb5f-899c-40e3-9651-0faedac5f801" (UID: "55aecb5f-899c-40e3-9651-0faedac5f801"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:34 crc kubenswrapper[4933]: I0122 06:07:34.998860 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55aecb5f-899c-40e3-9651-0faedac5f801-config-data" (OuterVolumeSpecName: "config-data") pod "55aecb5f-899c-40e3-9651-0faedac5f801" (UID: "55aecb5f-899c-40e3-9651-0faedac5f801"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.065754 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55aecb5f-899c-40e3-9651-0faedac5f801-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.065795 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2p9cf\" (UniqueName: \"kubernetes.io/projected/55aecb5f-899c-40e3-9651-0faedac5f801-kube-api-access-2p9cf\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.065810 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55aecb5f-899c-40e3-9651-0faedac5f801-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.257677 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.783139 4933 generic.go:334] "Generic (PLEG): container finished" podID="d7d92b38-1d4a-4d36-8c30-c40f747cef77" containerID="c4ede8958fee860753a7531c5a9ee477d9d8860375af7d1500aed807573e8e28" exitCode=0 Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.783224 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.783267 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d7d92b38-1d4a-4d36-8c30-c40f747cef77","Type":"ContainerDied","Data":"c4ede8958fee860753a7531c5a9ee477d9d8860375af7d1500aed807573e8e28"} Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.824538 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.837584 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.848542 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 06:07:35 crc kubenswrapper[4933]: E0122 06:07:35.849362 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86d22942-518c-4716-86a2-b0781a2d92ca" containerName="dnsmasq-dns" Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.849383 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="86d22942-518c-4716-86a2-b0781a2d92ca" containerName="dnsmasq-dns" Jan 22 06:07:35 crc kubenswrapper[4933]: E0122 06:07:35.849419 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86d22942-518c-4716-86a2-b0781a2d92ca" containerName="init" Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.850614 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="86d22942-518c-4716-86a2-b0781a2d92ca" containerName="init" Jan 22 06:07:35 crc kubenswrapper[4933]: E0122 06:07:35.850723 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55aecb5f-899c-40e3-9651-0faedac5f801" containerName="nova-scheduler-scheduler" Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.850749 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="55aecb5f-899c-40e3-9651-0faedac5f801" containerName="nova-scheduler-scheduler" Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.851527 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="86d22942-518c-4716-86a2-b0781a2d92ca" containerName="dnsmasq-dns" Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.851559 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="55aecb5f-899c-40e3-9651-0faedac5f801" containerName="nova-scheduler-scheduler" Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.852504 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.886336 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.897327 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.981594 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.993055 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b3c1d90-1d9a-485f-a6b3-4c527cd173bb-config-data\") pod \"nova-scheduler-0\" (UID: \"9b3c1d90-1d9a-485f-a6b3-4c527cd173bb\") " pod="openstack/nova-scheduler-0" Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.993194 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b3c1d90-1d9a-485f-a6b3-4c527cd173bb-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9b3c1d90-1d9a-485f-a6b3-4c527cd173bb\") " pod="openstack/nova-scheduler-0" Jan 22 06:07:35 crc kubenswrapper[4933]: I0122 06:07:35.993226 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pnb9\" (UniqueName: \"kubernetes.io/projected/9b3c1d90-1d9a-485f-a6b3-4c527cd173bb-kube-api-access-6pnb9\") pod \"nova-scheduler-0\" (UID: \"9b3c1d90-1d9a-485f-a6b3-4c527cd173bb\") " pod="openstack/nova-scheduler-0" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.094488 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7d92b38-1d4a-4d36-8c30-c40f747cef77-combined-ca-bundle\") pod \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\" (UID: \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\") " Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.094853 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcm2r\" (UniqueName: \"kubernetes.io/projected/d7d92b38-1d4a-4d36-8c30-c40f747cef77-kube-api-access-mcm2r\") pod \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\" (UID: \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\") " Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.094974 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7d92b38-1d4a-4d36-8c30-c40f747cef77-config-data\") pod \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\" (UID: \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\") " Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.095135 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7d92b38-1d4a-4d36-8c30-c40f747cef77-logs\") pod \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\" (UID: \"d7d92b38-1d4a-4d36-8c30-c40f747cef77\") " Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.095601 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7d92b38-1d4a-4d36-8c30-c40f747cef77-logs" (OuterVolumeSpecName: "logs") pod "d7d92b38-1d4a-4d36-8c30-c40f747cef77" (UID: "d7d92b38-1d4a-4d36-8c30-c40f747cef77"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.095718 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b3c1d90-1d9a-485f-a6b3-4c527cd173bb-config-data\") pod \"nova-scheduler-0\" (UID: \"9b3c1d90-1d9a-485f-a6b3-4c527cd173bb\") " pod="openstack/nova-scheduler-0" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.095906 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b3c1d90-1d9a-485f-a6b3-4c527cd173bb-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9b3c1d90-1d9a-485f-a6b3-4c527cd173bb\") " pod="openstack/nova-scheduler-0" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.096059 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pnb9\" (UniqueName: \"kubernetes.io/projected/9b3c1d90-1d9a-485f-a6b3-4c527cd173bb-kube-api-access-6pnb9\") pod \"nova-scheduler-0\" (UID: \"9b3c1d90-1d9a-485f-a6b3-4c527cd173bb\") " pod="openstack/nova-scheduler-0" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.096292 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7d92b38-1d4a-4d36-8c30-c40f747cef77-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.100122 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7d92b38-1d4a-4d36-8c30-c40f747cef77-kube-api-access-mcm2r" (OuterVolumeSpecName: "kube-api-access-mcm2r") pod "d7d92b38-1d4a-4d36-8c30-c40f747cef77" (UID: "d7d92b38-1d4a-4d36-8c30-c40f747cef77"). InnerVolumeSpecName "kube-api-access-mcm2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.112566 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b3c1d90-1d9a-485f-a6b3-4c527cd173bb-config-data\") pod \"nova-scheduler-0\" (UID: \"9b3c1d90-1d9a-485f-a6b3-4c527cd173bb\") " pod="openstack/nova-scheduler-0" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.114778 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pnb9\" (UniqueName: \"kubernetes.io/projected/9b3c1d90-1d9a-485f-a6b3-4c527cd173bb-kube-api-access-6pnb9\") pod \"nova-scheduler-0\" (UID: \"9b3c1d90-1d9a-485f-a6b3-4c527cd173bb\") " pod="openstack/nova-scheduler-0" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.120640 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b3c1d90-1d9a-485f-a6b3-4c527cd173bb-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9b3c1d90-1d9a-485f-a6b3-4c527cd173bb\") " pod="openstack/nova-scheduler-0" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.129064 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7d92b38-1d4a-4d36-8c30-c40f747cef77-config-data" (OuterVolumeSpecName: "config-data") pod "d7d92b38-1d4a-4d36-8c30-c40f747cef77" (UID: "d7d92b38-1d4a-4d36-8c30-c40f747cef77"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.131192 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7d92b38-1d4a-4d36-8c30-c40f747cef77-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d7d92b38-1d4a-4d36-8c30-c40f747cef77" (UID: "d7d92b38-1d4a-4d36-8c30-c40f747cef77"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.200277 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcm2r\" (UniqueName: \"kubernetes.io/projected/d7d92b38-1d4a-4d36-8c30-c40f747cef77-kube-api-access-mcm2r\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.200308 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7d92b38-1d4a-4d36-8c30-c40f747cef77-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.200317 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7d92b38-1d4a-4d36-8c30-c40f747cef77-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.274839 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.507669 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55aecb5f-899c-40e3-9651-0faedac5f801" path="/var/lib/kubelet/pods/55aecb5f-899c-40e3-9651-0faedac5f801/volumes" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.763771 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 06:07:36 crc kubenswrapper[4933]: W0122 06:07:36.771401 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b3c1d90_1d9a_485f_a6b3_4c527cd173bb.slice/crio-ba2809474f3529e5eecfbed195dfd81581924c20108b4ba107f210aaef379c89 WatchSource:0}: Error finding container ba2809474f3529e5eecfbed195dfd81581924c20108b4ba107f210aaef379c89: Status 404 returned error can't find the container with id ba2809474f3529e5eecfbed195dfd81581924c20108b4ba107f210aaef379c89 Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.796561 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9b3c1d90-1d9a-485f-a6b3-4c527cd173bb","Type":"ContainerStarted","Data":"ba2809474f3529e5eecfbed195dfd81581924c20108b4ba107f210aaef379c89"} Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.798565 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d7d92b38-1d4a-4d36-8c30-c40f747cef77","Type":"ContainerDied","Data":"530ba728027c0b9cea62bf7cdd0a6a958475c8a2dace288d946f2bed2f664093"} Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.798607 4933 scope.go:117] "RemoveContainer" containerID="c4ede8958fee860753a7531c5a9ee477d9d8860375af7d1500aed807573e8e28" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.798621 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.911981 4933 scope.go:117] "RemoveContainer" containerID="e3d8d815687c37963ff352278f6cf1f413a293e625d5ac101a5c5cfcc10d8fdb" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.937013 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.952648 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.966159 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 22 06:07:36 crc kubenswrapper[4933]: E0122 06:07:36.968912 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7d92b38-1d4a-4d36-8c30-c40f747cef77" containerName="nova-api-api" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.969008 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7d92b38-1d4a-4d36-8c30-c40f747cef77" containerName="nova-api-api" Jan 22 06:07:36 crc kubenswrapper[4933]: E0122 06:07:36.969159 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7d92b38-1d4a-4d36-8c30-c40f747cef77" containerName="nova-api-log" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.969275 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7d92b38-1d4a-4d36-8c30-c40f747cef77" containerName="nova-api-log" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.969614 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7d92b38-1d4a-4d36-8c30-c40f747cef77" containerName="nova-api-log" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.969702 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7d92b38-1d4a-4d36-8c30-c40f747cef77" containerName="nova-api-api" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.971020 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.978905 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 22 06:07:36 crc kubenswrapper[4933]: I0122 06:07:36.979839 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:07:37 crc kubenswrapper[4933]: I0122 06:07:37.121596 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brpdk\" (UniqueName: \"kubernetes.io/projected/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-kube-api-access-brpdk\") pod \"nova-api-0\" (UID: \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\") " pod="openstack/nova-api-0" Jan 22 06:07:37 crc kubenswrapper[4933]: I0122 06:07:37.121670 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-config-data\") pod \"nova-api-0\" (UID: \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\") " pod="openstack/nova-api-0" Jan 22 06:07:37 crc kubenswrapper[4933]: I0122 06:07:37.121726 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-logs\") pod \"nova-api-0\" (UID: \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\") " pod="openstack/nova-api-0" Jan 22 06:07:37 crc kubenswrapper[4933]: I0122 06:07:37.122037 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\") " pod="openstack/nova-api-0" Jan 22 06:07:37 crc kubenswrapper[4933]: I0122 06:07:37.223475 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\") " pod="openstack/nova-api-0" Jan 22 06:07:37 crc kubenswrapper[4933]: I0122 06:07:37.223822 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brpdk\" (UniqueName: \"kubernetes.io/projected/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-kube-api-access-brpdk\") pod \"nova-api-0\" (UID: \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\") " pod="openstack/nova-api-0" Jan 22 06:07:37 crc kubenswrapper[4933]: I0122 06:07:37.224121 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-config-data\") pod \"nova-api-0\" (UID: \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\") " pod="openstack/nova-api-0" Jan 22 06:07:37 crc kubenswrapper[4933]: I0122 06:07:37.224250 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-logs\") pod \"nova-api-0\" (UID: \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\") " pod="openstack/nova-api-0" Jan 22 06:07:37 crc kubenswrapper[4933]: I0122 06:07:37.224710 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-logs\") pod \"nova-api-0\" (UID: \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\") " pod="openstack/nova-api-0" Jan 22 06:07:37 crc kubenswrapper[4933]: I0122 06:07:37.236909 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-config-data\") pod \"nova-api-0\" (UID: \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\") " pod="openstack/nova-api-0" Jan 22 06:07:37 crc kubenswrapper[4933]: I0122 06:07:37.242665 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\") " pod="openstack/nova-api-0" Jan 22 06:07:37 crc kubenswrapper[4933]: I0122 06:07:37.248272 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brpdk\" (UniqueName: \"kubernetes.io/projected/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-kube-api-access-brpdk\") pod \"nova-api-0\" (UID: \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\") " pod="openstack/nova-api-0" Jan 22 06:07:37 crc kubenswrapper[4933]: I0122 06:07:37.287504 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 06:07:37 crc kubenswrapper[4933]: I0122 06:07:37.750207 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:07:37 crc kubenswrapper[4933]: W0122 06:07:37.752491 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd2cec7e1_f904_41dd_9ce9_0ad88b68e717.slice/crio-30291a5d5f08d3a68ba37ec9b2c8ae9dbb3c2a877affd39263d4c4b4dab998c5 WatchSource:0}: Error finding container 30291a5d5f08d3a68ba37ec9b2c8ae9dbb3c2a877affd39263d4c4b4dab998c5: Status 404 returned error can't find the container with id 30291a5d5f08d3a68ba37ec9b2c8ae9dbb3c2a877affd39263d4c4b4dab998c5 Jan 22 06:07:37 crc kubenswrapper[4933]: I0122 06:07:37.808961 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9b3c1d90-1d9a-485f-a6b3-4c527cd173bb","Type":"ContainerStarted","Data":"4d64563ea4c05a55275d0208eb9b3d7076bbf3ac8685166071692de0f119dcbf"} Jan 22 06:07:37 crc kubenswrapper[4933]: I0122 06:07:37.811109 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d2cec7e1-f904-41dd-9ce9-0ad88b68e717","Type":"ContainerStarted","Data":"30291a5d5f08d3a68ba37ec9b2c8ae9dbb3c2a877affd39263d4c4b4dab998c5"} Jan 22 06:07:37 crc kubenswrapper[4933]: I0122 06:07:37.829663 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.8296459130000002 podStartE2EDuration="2.829645913s" podCreationTimestamp="2026-01-22 06:07:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:07:37.825734312 +0000 UTC m=+1305.662859665" watchObservedRunningTime="2026-01-22 06:07:37.829645913 +0000 UTC m=+1305.666771266" Jan 22 06:07:38 crc kubenswrapper[4933]: I0122 06:07:38.502413 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7d92b38-1d4a-4d36-8c30-c40f747cef77" path="/var/lib/kubelet/pods/d7d92b38-1d4a-4d36-8c30-c40f747cef77/volumes" Jan 22 06:07:38 crc kubenswrapper[4933]: I0122 06:07:38.820418 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d2cec7e1-f904-41dd-9ce9-0ad88b68e717","Type":"ContainerStarted","Data":"912e5c54d9b4f5e3b7fc021ca697f8cdbd9d313b028fda518e1e1ce6f83761cd"} Jan 22 06:07:38 crc kubenswrapper[4933]: I0122 06:07:38.820466 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d2cec7e1-f904-41dd-9ce9-0ad88b68e717","Type":"ContainerStarted","Data":"9679f838ab691ab0b7cf0f16a706dd64da7b9ac46ada87fb962e9985a04f62db"} Jan 22 06:07:38 crc kubenswrapper[4933]: I0122 06:07:38.850569 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.850550832 podStartE2EDuration="2.850550832s" podCreationTimestamp="2026-01-22 06:07:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:07:38.850100353 +0000 UTC m=+1306.687225726" watchObservedRunningTime="2026-01-22 06:07:38.850550832 +0000 UTC m=+1306.687676205" Jan 22 06:07:39 crc kubenswrapper[4933]: I0122 06:07:39.346429 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 06:07:39 crc kubenswrapper[4933]: I0122 06:07:39.346631 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="3070f00c-a8be-4606-bf64-53d3e321b329" containerName="kube-state-metrics" containerID="cri-o://cf6f2f928d77621dd900f7415a2f45d2131a39b6c456968dd6db4e3f7965e128" gracePeriod=30 Jan 22 06:07:39 crc kubenswrapper[4933]: I0122 06:07:39.830774 4933 generic.go:334] "Generic (PLEG): container finished" podID="3070f00c-a8be-4606-bf64-53d3e321b329" containerID="cf6f2f928d77621dd900f7415a2f45d2131a39b6c456968dd6db4e3f7965e128" exitCode=2 Jan 22 06:07:39 crc kubenswrapper[4933]: I0122 06:07:39.831014 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"3070f00c-a8be-4606-bf64-53d3e321b329","Type":"ContainerDied","Data":"cf6f2f928d77621dd900f7415a2f45d2131a39b6c456968dd6db4e3f7965e128"} Jan 22 06:07:39 crc kubenswrapper[4933]: I0122 06:07:39.831126 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"3070f00c-a8be-4606-bf64-53d3e321b329","Type":"ContainerDied","Data":"3968ebf4a8c88e1084d1555dc30a5668e20fb9c746eaad03f4f86caf6cb2efba"} Jan 22 06:07:39 crc kubenswrapper[4933]: I0122 06:07:39.831142 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3968ebf4a8c88e1084d1555dc30a5668e20fb9c746eaad03f4f86caf6cb2efba" Jan 22 06:07:39 crc kubenswrapper[4933]: I0122 06:07:39.857649 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 06:07:39 crc kubenswrapper[4933]: I0122 06:07:39.973315 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8wcz\" (UniqueName: \"kubernetes.io/projected/3070f00c-a8be-4606-bf64-53d3e321b329-kube-api-access-h8wcz\") pod \"3070f00c-a8be-4606-bf64-53d3e321b329\" (UID: \"3070f00c-a8be-4606-bf64-53d3e321b329\") " Jan 22 06:07:39 crc kubenswrapper[4933]: I0122 06:07:39.979827 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3070f00c-a8be-4606-bf64-53d3e321b329-kube-api-access-h8wcz" (OuterVolumeSpecName: "kube-api-access-h8wcz") pod "3070f00c-a8be-4606-bf64-53d3e321b329" (UID: "3070f00c-a8be-4606-bf64-53d3e321b329"). InnerVolumeSpecName "kube-api-access-h8wcz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:07:40 crc kubenswrapper[4933]: I0122 06:07:40.076570 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8wcz\" (UniqueName: \"kubernetes.io/projected/3070f00c-a8be-4606-bf64-53d3e321b329-kube-api-access-h8wcz\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:40 crc kubenswrapper[4933]: I0122 06:07:40.838262 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 06:07:40 crc kubenswrapper[4933]: I0122 06:07:40.867869 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 06:07:40 crc kubenswrapper[4933]: I0122 06:07:40.876862 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 06:07:40 crc kubenswrapper[4933]: I0122 06:07:40.894205 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 06:07:40 crc kubenswrapper[4933]: E0122 06:07:40.894731 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3070f00c-a8be-4606-bf64-53d3e321b329" containerName="kube-state-metrics" Jan 22 06:07:40 crc kubenswrapper[4933]: I0122 06:07:40.894762 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3070f00c-a8be-4606-bf64-53d3e321b329" containerName="kube-state-metrics" Jan 22 06:07:40 crc kubenswrapper[4933]: I0122 06:07:40.895273 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="3070f00c-a8be-4606-bf64-53d3e321b329" containerName="kube-state-metrics" Jan 22 06:07:40 crc kubenswrapper[4933]: I0122 06:07:40.896147 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 06:07:40 crc kubenswrapper[4933]: I0122 06:07:40.899355 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 22 06:07:40 crc kubenswrapper[4933]: I0122 06:07:40.900045 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 22 06:07:40 crc kubenswrapper[4933]: I0122 06:07:40.904127 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 06:07:40 crc kubenswrapper[4933]: I0122 06:07:40.991526 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/4886348b-6078-41dc-8fab-a8e2e1c4898d-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"4886348b-6078-41dc-8fab-a8e2e1c4898d\") " pod="openstack/kube-state-metrics-0" Jan 22 06:07:40 crc kubenswrapper[4933]: I0122 06:07:40.991590 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/4886348b-6078-41dc-8fab-a8e2e1c4898d-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"4886348b-6078-41dc-8fab-a8e2e1c4898d\") " pod="openstack/kube-state-metrics-0" Jan 22 06:07:40 crc kubenswrapper[4933]: I0122 06:07:40.991750 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lffj7\" (UniqueName: \"kubernetes.io/projected/4886348b-6078-41dc-8fab-a8e2e1c4898d-kube-api-access-lffj7\") pod \"kube-state-metrics-0\" (UID: \"4886348b-6078-41dc-8fab-a8e2e1c4898d\") " pod="openstack/kube-state-metrics-0" Jan 22 06:07:40 crc kubenswrapper[4933]: I0122 06:07:40.991979 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4886348b-6078-41dc-8fab-a8e2e1c4898d-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"4886348b-6078-41dc-8fab-a8e2e1c4898d\") " pod="openstack/kube-state-metrics-0" Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.093945 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/4886348b-6078-41dc-8fab-a8e2e1c4898d-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"4886348b-6078-41dc-8fab-a8e2e1c4898d\") " pod="openstack/kube-state-metrics-0" Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.094057 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lffj7\" (UniqueName: \"kubernetes.io/projected/4886348b-6078-41dc-8fab-a8e2e1c4898d-kube-api-access-lffj7\") pod \"kube-state-metrics-0\" (UID: \"4886348b-6078-41dc-8fab-a8e2e1c4898d\") " pod="openstack/kube-state-metrics-0" Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.094128 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4886348b-6078-41dc-8fab-a8e2e1c4898d-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"4886348b-6078-41dc-8fab-a8e2e1c4898d\") " pod="openstack/kube-state-metrics-0" Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.094174 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/4886348b-6078-41dc-8fab-a8e2e1c4898d-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"4886348b-6078-41dc-8fab-a8e2e1c4898d\") " pod="openstack/kube-state-metrics-0" Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.098406 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/4886348b-6078-41dc-8fab-a8e2e1c4898d-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"4886348b-6078-41dc-8fab-a8e2e1c4898d\") " pod="openstack/kube-state-metrics-0" Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.099597 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4886348b-6078-41dc-8fab-a8e2e1c4898d-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"4886348b-6078-41dc-8fab-a8e2e1c4898d\") " pod="openstack/kube-state-metrics-0" Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.099661 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/4886348b-6078-41dc-8fab-a8e2e1c4898d-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"4886348b-6078-41dc-8fab-a8e2e1c4898d\") " pod="openstack/kube-state-metrics-0" Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.114786 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lffj7\" (UniqueName: \"kubernetes.io/projected/4886348b-6078-41dc-8fab-a8e2e1c4898d-kube-api-access-lffj7\") pod \"kube-state-metrics-0\" (UID: \"4886348b-6078-41dc-8fab-a8e2e1c4898d\") " pod="openstack/kube-state-metrics-0" Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.194202 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.194459 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerName="ceilometer-central-agent" containerID="cri-o://3c0f52fe4c2eaed4847457d03b58618cf6c45098bacab8f1363cb2c9df657c71" gracePeriod=30 Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.194527 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerName="proxy-httpd" containerID="cri-o://11f3d9dd82f57c65075b864eab99bd5cabbd053546d6e7859a9633b88a5506f6" gracePeriod=30 Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.194562 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerName="ceilometer-notification-agent" containerID="cri-o://f495170fc7fbbe5e36e8691d31883e00eab4de587306f0702605a87793290838" gracePeriod=30 Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.194566 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerName="sg-core" containerID="cri-o://88e82780f3948a6a6c2385978716afab59c4599b0a203d6440d1f86439981c7f" gracePeriod=30 Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.275450 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.275764 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.776549 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 06:07:41 crc kubenswrapper[4933]: W0122 06:07:41.780194 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4886348b_6078_41dc_8fab_a8e2e1c4898d.slice/crio-6b733f58820b28605f33186c29501971ea92d7a0b8029f812c88f4b5fc33485f WatchSource:0}: Error finding container 6b733f58820b28605f33186c29501971ea92d7a0b8029f812c88f4b5fc33485f: Status 404 returned error can't find the container with id 6b733f58820b28605f33186c29501971ea92d7a0b8029f812c88f4b5fc33485f Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.848497 4933 generic.go:334] "Generic (PLEG): container finished" podID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerID="11f3d9dd82f57c65075b864eab99bd5cabbd053546d6e7859a9633b88a5506f6" exitCode=0 Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.848532 4933 generic.go:334] "Generic (PLEG): container finished" podID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerID="88e82780f3948a6a6c2385978716afab59c4599b0a203d6440d1f86439981c7f" exitCode=2 Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.848540 4933 generic.go:334] "Generic (PLEG): container finished" podID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerID="3c0f52fe4c2eaed4847457d03b58618cf6c45098bacab8f1363cb2c9df657c71" exitCode=0 Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.848551 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988","Type":"ContainerDied","Data":"11f3d9dd82f57c65075b864eab99bd5cabbd053546d6e7859a9633b88a5506f6"} Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.848613 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988","Type":"ContainerDied","Data":"88e82780f3948a6a6c2385978716afab59c4599b0a203d6440d1f86439981c7f"} Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.848628 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988","Type":"ContainerDied","Data":"3c0f52fe4c2eaed4847457d03b58618cf6c45098bacab8f1363cb2c9df657c71"} Jan 22 06:07:41 crc kubenswrapper[4933]: I0122 06:07:41.849975 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4886348b-6078-41dc-8fab-a8e2e1c4898d","Type":"ContainerStarted","Data":"6b733f58820b28605f33186c29501971ea92d7a0b8029f812c88f4b5fc33485f"} Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.504341 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3070f00c-a8be-4606-bf64-53d3e321b329" path="/var/lib/kubelet/pods/3070f00c-a8be-4606-bf64-53d3e321b329/volumes" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.861299 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4886348b-6078-41dc-8fab-a8e2e1c4898d","Type":"ContainerStarted","Data":"35c37c5d066e31beb5f68171ba88451347a51fbe1f8550fd13c3cdca949ee224"} Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.862039 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.863694 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.865369 4933 generic.go:334] "Generic (PLEG): container finished" podID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerID="f495170fc7fbbe5e36e8691d31883e00eab4de587306f0702605a87793290838" exitCode=0 Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.865409 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988","Type":"ContainerDied","Data":"f495170fc7fbbe5e36e8691d31883e00eab4de587306f0702605a87793290838"} Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.865434 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988","Type":"ContainerDied","Data":"8d7f101b176f2bfe22f9c4bf5caf3b2c7b0e56200ceb5a29b885baf832d35d2e"} Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.865455 4933 scope.go:117] "RemoveContainer" containerID="11f3d9dd82f57c65075b864eab99bd5cabbd053546d6e7859a9633b88a5506f6" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.888058 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.503464911 podStartE2EDuration="2.888032342s" podCreationTimestamp="2026-01-22 06:07:40 +0000 UTC" firstStartedPulling="2026-01-22 06:07:41.782247066 +0000 UTC m=+1309.619372419" lastFinishedPulling="2026-01-22 06:07:42.166814477 +0000 UTC m=+1310.003939850" observedRunningTime="2026-01-22 06:07:42.882774904 +0000 UTC m=+1310.719900287" watchObservedRunningTime="2026-01-22 06:07:42.888032342 +0000 UTC m=+1310.725157725" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.905448 4933 scope.go:117] "RemoveContainer" containerID="88e82780f3948a6a6c2385978716afab59c4599b0a203d6440d1f86439981c7f" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.929756 4933 scope.go:117] "RemoveContainer" containerID="f495170fc7fbbe5e36e8691d31883e00eab4de587306f0702605a87793290838" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.931688 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-scqlh\" (UniqueName: \"kubernetes.io/projected/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-kube-api-access-scqlh\") pod \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.931814 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-sg-core-conf-yaml\") pod \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.931871 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-log-httpd\") pod \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.931901 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-run-httpd\") pod \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.932000 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-config-data\") pod \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.932160 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-scripts\") pod \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.932221 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-combined-ca-bundle\") pod \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\" (UID: \"7c8f35c1-1dfe-49ed-baaf-62c2a29e0988\") " Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.934270 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" (UID: "7c8f35c1-1dfe-49ed-baaf-62c2a29e0988"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.934788 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" (UID: "7c8f35c1-1dfe-49ed-baaf-62c2a29e0988"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.939239 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-kube-api-access-scqlh" (OuterVolumeSpecName: "kube-api-access-scqlh") pod "7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" (UID: "7c8f35c1-1dfe-49ed-baaf-62c2a29e0988"). InnerVolumeSpecName "kube-api-access-scqlh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.942841 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-scripts" (OuterVolumeSpecName: "scripts") pod "7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" (UID: "7c8f35c1-1dfe-49ed-baaf-62c2a29e0988"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.963135 4933 scope.go:117] "RemoveContainer" containerID="3c0f52fe4c2eaed4847457d03b58618cf6c45098bacab8f1363cb2c9df657c71" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.987110 4933 scope.go:117] "RemoveContainer" containerID="11f3d9dd82f57c65075b864eab99bd5cabbd053546d6e7859a9633b88a5506f6" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.987195 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" (UID: "7c8f35c1-1dfe-49ed-baaf-62c2a29e0988"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:42 crc kubenswrapper[4933]: E0122 06:07:42.987617 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11f3d9dd82f57c65075b864eab99bd5cabbd053546d6e7859a9633b88a5506f6\": container with ID starting with 11f3d9dd82f57c65075b864eab99bd5cabbd053546d6e7859a9633b88a5506f6 not found: ID does not exist" containerID="11f3d9dd82f57c65075b864eab99bd5cabbd053546d6e7859a9633b88a5506f6" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.987656 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11f3d9dd82f57c65075b864eab99bd5cabbd053546d6e7859a9633b88a5506f6"} err="failed to get container status \"11f3d9dd82f57c65075b864eab99bd5cabbd053546d6e7859a9633b88a5506f6\": rpc error: code = NotFound desc = could not find container \"11f3d9dd82f57c65075b864eab99bd5cabbd053546d6e7859a9633b88a5506f6\": container with ID starting with 11f3d9dd82f57c65075b864eab99bd5cabbd053546d6e7859a9633b88a5506f6 not found: ID does not exist" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.987682 4933 scope.go:117] "RemoveContainer" containerID="88e82780f3948a6a6c2385978716afab59c4599b0a203d6440d1f86439981c7f" Jan 22 06:07:42 crc kubenswrapper[4933]: E0122 06:07:42.988007 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88e82780f3948a6a6c2385978716afab59c4599b0a203d6440d1f86439981c7f\": container with ID starting with 88e82780f3948a6a6c2385978716afab59c4599b0a203d6440d1f86439981c7f not found: ID does not exist" containerID="88e82780f3948a6a6c2385978716afab59c4599b0a203d6440d1f86439981c7f" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.988030 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88e82780f3948a6a6c2385978716afab59c4599b0a203d6440d1f86439981c7f"} err="failed to get container status \"88e82780f3948a6a6c2385978716afab59c4599b0a203d6440d1f86439981c7f\": rpc error: code = NotFound desc = could not find container \"88e82780f3948a6a6c2385978716afab59c4599b0a203d6440d1f86439981c7f\": container with ID starting with 88e82780f3948a6a6c2385978716afab59c4599b0a203d6440d1f86439981c7f not found: ID does not exist" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.988045 4933 scope.go:117] "RemoveContainer" containerID="f495170fc7fbbe5e36e8691d31883e00eab4de587306f0702605a87793290838" Jan 22 06:07:42 crc kubenswrapper[4933]: E0122 06:07:42.988431 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f495170fc7fbbe5e36e8691d31883e00eab4de587306f0702605a87793290838\": container with ID starting with f495170fc7fbbe5e36e8691d31883e00eab4de587306f0702605a87793290838 not found: ID does not exist" containerID="f495170fc7fbbe5e36e8691d31883e00eab4de587306f0702605a87793290838" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.988454 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f495170fc7fbbe5e36e8691d31883e00eab4de587306f0702605a87793290838"} err="failed to get container status \"f495170fc7fbbe5e36e8691d31883e00eab4de587306f0702605a87793290838\": rpc error: code = NotFound desc = could not find container \"f495170fc7fbbe5e36e8691d31883e00eab4de587306f0702605a87793290838\": container with ID starting with f495170fc7fbbe5e36e8691d31883e00eab4de587306f0702605a87793290838 not found: ID does not exist" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.988468 4933 scope.go:117] "RemoveContainer" containerID="3c0f52fe4c2eaed4847457d03b58618cf6c45098bacab8f1363cb2c9df657c71" Jan 22 06:07:42 crc kubenswrapper[4933]: E0122 06:07:42.988734 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c0f52fe4c2eaed4847457d03b58618cf6c45098bacab8f1363cb2c9df657c71\": container with ID starting with 3c0f52fe4c2eaed4847457d03b58618cf6c45098bacab8f1363cb2c9df657c71 not found: ID does not exist" containerID="3c0f52fe4c2eaed4847457d03b58618cf6c45098bacab8f1363cb2c9df657c71" Jan 22 06:07:42 crc kubenswrapper[4933]: I0122 06:07:42.988775 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c0f52fe4c2eaed4847457d03b58618cf6c45098bacab8f1363cb2c9df657c71"} err="failed to get container status \"3c0f52fe4c2eaed4847457d03b58618cf6c45098bacab8f1363cb2c9df657c71\": rpc error: code = NotFound desc = could not find container \"3c0f52fe4c2eaed4847457d03b58618cf6c45098bacab8f1363cb2c9df657c71\": container with ID starting with 3c0f52fe4c2eaed4847457d03b58618cf6c45098bacab8f1363cb2c9df657c71 not found: ID does not exist" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.016990 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" (UID: "7c8f35c1-1dfe-49ed-baaf-62c2a29e0988"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.033957 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-scqlh\" (UniqueName: \"kubernetes.io/projected/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-kube-api-access-scqlh\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.033984 4933 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.033993 4933 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.034001 4933 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.034011 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.034019 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.043949 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-config-data" (OuterVolumeSpecName: "config-data") pod "7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" (UID: "7c8f35c1-1dfe-49ed-baaf-62c2a29e0988"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.135311 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.876396 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.908567 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.917023 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.947299 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:07:43 crc kubenswrapper[4933]: E0122 06:07:43.947973 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerName="proxy-httpd" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.948018 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerName="proxy-httpd" Jan 22 06:07:43 crc kubenswrapper[4933]: E0122 06:07:43.948096 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerName="sg-core" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.948109 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerName="sg-core" Jan 22 06:07:43 crc kubenswrapper[4933]: E0122 06:07:43.948206 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerName="ceilometer-notification-agent" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.948217 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerName="ceilometer-notification-agent" Jan 22 06:07:43 crc kubenswrapper[4933]: E0122 06:07:43.948232 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerName="ceilometer-central-agent" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.948240 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerName="ceilometer-central-agent" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.948456 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerName="proxy-httpd" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.948477 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerName="ceilometer-central-agent" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.948489 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerName="sg-core" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.948512 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" containerName="ceilometer-notification-agent" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.954520 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.958650 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.958984 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.959185 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 06:07:43 crc kubenswrapper[4933]: I0122 06:07:43.974195 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.047618 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d083996a-9177-451f-ab93-ea1beffeece6-log-httpd\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.047718 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqwmd\" (UniqueName: \"kubernetes.io/projected/d083996a-9177-451f-ab93-ea1beffeece6-kube-api-access-kqwmd\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.047753 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-config-data\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.047773 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.047797 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.047816 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.047855 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d083996a-9177-451f-ab93-ea1beffeece6-run-httpd\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.047890 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-scripts\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.150893 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqwmd\" (UniqueName: \"kubernetes.io/projected/d083996a-9177-451f-ab93-ea1beffeece6-kube-api-access-kqwmd\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.150968 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-config-data\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.151000 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.151031 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.151066 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.151133 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d083996a-9177-451f-ab93-ea1beffeece6-run-httpd\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.151159 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-scripts\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.151228 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d083996a-9177-451f-ab93-ea1beffeece6-log-httpd\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.151823 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d083996a-9177-451f-ab93-ea1beffeece6-log-httpd\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.166189 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d083996a-9177-451f-ab93-ea1beffeece6-run-httpd\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.168979 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.169690 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-config-data\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.171474 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.172278 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.175070 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqwmd\" (UniqueName: \"kubernetes.io/projected/d083996a-9177-451f-ab93-ea1beffeece6-kube-api-access-kqwmd\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.175227 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-scripts\") pod \"ceilometer-0\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.291022 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.503944 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c8f35c1-1dfe-49ed-baaf-62c2a29e0988" path="/var/lib/kubelet/pods/7c8f35c1-1dfe-49ed-baaf-62c2a29e0988/volumes" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.755824 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.788593 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/kube-state-metrics-0" podUID="3070f00c-a8be-4606-bf64-53d3e321b329" containerName="kube-state-metrics" probeResult="failure" output="Get \"http://10.217.0.107:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 06:07:44 crc kubenswrapper[4933]: I0122 06:07:44.889378 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d083996a-9177-451f-ab93-ea1beffeece6","Type":"ContainerStarted","Data":"b2c3a2c1e8fd82480c088af0cec0ef6935b24a21fa68c346f564f63b608518e6"} Jan 22 06:07:45 crc kubenswrapper[4933]: I0122 06:07:45.903588 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d083996a-9177-451f-ab93-ea1beffeece6","Type":"ContainerStarted","Data":"1fd752b9d239217899d7fef10c605fee1db8714a148703fe3f1509fbfeb62517"} Jan 22 06:07:46 crc kubenswrapper[4933]: I0122 06:07:46.275916 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 22 06:07:46 crc kubenswrapper[4933]: I0122 06:07:46.303829 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 22 06:07:46 crc kubenswrapper[4933]: I0122 06:07:46.922900 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d083996a-9177-451f-ab93-ea1beffeece6","Type":"ContainerStarted","Data":"e7ae00579f01c286af0183394bf4c6e8839f69cb8994d083e06abfd88a61c69a"} Jan 22 06:07:46 crc kubenswrapper[4933]: I0122 06:07:46.954066 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 22 06:07:47 crc kubenswrapper[4933]: I0122 06:07:47.288291 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 06:07:47 crc kubenswrapper[4933]: I0122 06:07:47.288584 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 06:07:47 crc kubenswrapper[4933]: I0122 06:07:47.934238 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d083996a-9177-451f-ab93-ea1beffeece6","Type":"ContainerStarted","Data":"f4312982663741b11b776a870286db1dabd8cedf9122e8e19cf6d7c96825fc87"} Jan 22 06:07:48 crc kubenswrapper[4933]: I0122 06:07:48.372400 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d2cec7e1-f904-41dd-9ce9-0ad88b68e717" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.194:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 06:07:48 crc kubenswrapper[4933]: I0122 06:07:48.372419 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d2cec7e1-f904-41dd-9ce9-0ad88b68e717" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.194:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 06:07:48 crc kubenswrapper[4933]: I0122 06:07:48.947044 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d083996a-9177-451f-ab93-ea1beffeece6","Type":"ContainerStarted","Data":"2683f7e09e142a5fba46404b1d9ea7e4c153717249c0dac778cbb2aba50b8637"} Jan 22 06:07:48 crc kubenswrapper[4933]: I0122 06:07:48.948212 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 06:07:48 crc kubenswrapper[4933]: I0122 06:07:48.990571 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.679622824 podStartE2EDuration="5.990549439s" podCreationTimestamp="2026-01-22 06:07:43 +0000 UTC" firstStartedPulling="2026-01-22 06:07:44.755599967 +0000 UTC m=+1312.592725320" lastFinishedPulling="2026-01-22 06:07:48.066526582 +0000 UTC m=+1315.903651935" observedRunningTime="2026-01-22 06:07:48.978546842 +0000 UTC m=+1316.815672195" watchObservedRunningTime="2026-01-22 06:07:48.990549439 +0000 UTC m=+1316.827674802" Jan 22 06:07:51 crc kubenswrapper[4933]: I0122 06:07:51.296475 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 22 06:07:53 crc kubenswrapper[4933]: I0122 06:07:53.998174 4933 generic.go:334] "Generic (PLEG): container finished" podID="5467292b-d832-437c-9f0f-41441d0da350" containerID="61f6493fabb950de8e2af3aae3dd7bf181fc3faf59f89e9a6161a8bab9d020e0" exitCode=137 Jan 22 06:07:53 crc kubenswrapper[4933]: I0122 06:07:53.998400 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5467292b-d832-437c-9f0f-41441d0da350","Type":"ContainerDied","Data":"61f6493fabb950de8e2af3aae3dd7bf181fc3faf59f89e9a6161a8bab9d020e0"} Jan 22 06:07:53 crc kubenswrapper[4933]: I0122 06:07:53.998846 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5467292b-d832-437c-9f0f-41441d0da350","Type":"ContainerDied","Data":"f917335bf16d6fbe5b484234a7213aa3f38a311d890e4b01783c8d240a895605"} Jan 22 06:07:53 crc kubenswrapper[4933]: I0122 06:07:53.998864 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f917335bf16d6fbe5b484234a7213aa3f38a311d890e4b01783c8d240a895605" Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.002315 4933 generic.go:334] "Generic (PLEG): container finished" podID="ba039abf-7ec7-43bb-ad72-e2abd0afe439" containerID="5b9c14d83c800061f8051520912f10415507b56ca2e8aee72a7d1ee8a0ebee79" exitCode=137 Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.002343 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ba039abf-7ec7-43bb-ad72-e2abd0afe439","Type":"ContainerDied","Data":"5b9c14d83c800061f8051520912f10415507b56ca2e8aee72a7d1ee8a0ebee79"} Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.002358 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ba039abf-7ec7-43bb-ad72-e2abd0afe439","Type":"ContainerDied","Data":"efbed6b40df2f8cae8a1f558d55aea9a73af23f1a7b96a2a23f2b5d8035b6bd1"} Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.002367 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="efbed6b40df2f8cae8a1f558d55aea9a73af23f1a7b96a2a23f2b5d8035b6bd1" Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.068248 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.071357 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.143643 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4xpd\" (UniqueName: \"kubernetes.io/projected/5467292b-d832-437c-9f0f-41441d0da350-kube-api-access-s4xpd\") pod \"5467292b-d832-437c-9f0f-41441d0da350\" (UID: \"5467292b-d832-437c-9f0f-41441d0da350\") " Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.143690 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78wqq\" (UniqueName: \"kubernetes.io/projected/ba039abf-7ec7-43bb-ad72-e2abd0afe439-kube-api-access-78wqq\") pod \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\" (UID: \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\") " Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.143807 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5467292b-d832-437c-9f0f-41441d0da350-combined-ca-bundle\") pod \"5467292b-d832-437c-9f0f-41441d0da350\" (UID: \"5467292b-d832-437c-9f0f-41441d0da350\") " Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.143895 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba039abf-7ec7-43bb-ad72-e2abd0afe439-logs\") pod \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\" (UID: \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\") " Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.143941 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba039abf-7ec7-43bb-ad72-e2abd0afe439-config-data\") pod \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\" (UID: \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\") " Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.143995 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba039abf-7ec7-43bb-ad72-e2abd0afe439-combined-ca-bundle\") pod \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\" (UID: \"ba039abf-7ec7-43bb-ad72-e2abd0afe439\") " Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.144053 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5467292b-d832-437c-9f0f-41441d0da350-config-data\") pod \"5467292b-d832-437c-9f0f-41441d0da350\" (UID: \"5467292b-d832-437c-9f0f-41441d0da350\") " Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.144507 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba039abf-7ec7-43bb-ad72-e2abd0afe439-logs" (OuterVolumeSpecName: "logs") pod "ba039abf-7ec7-43bb-ad72-e2abd0afe439" (UID: "ba039abf-7ec7-43bb-ad72-e2abd0afe439"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.150188 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba039abf-7ec7-43bb-ad72-e2abd0afe439-kube-api-access-78wqq" (OuterVolumeSpecName: "kube-api-access-78wqq") pod "ba039abf-7ec7-43bb-ad72-e2abd0afe439" (UID: "ba039abf-7ec7-43bb-ad72-e2abd0afe439"). InnerVolumeSpecName "kube-api-access-78wqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.150616 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5467292b-d832-437c-9f0f-41441d0da350-kube-api-access-s4xpd" (OuterVolumeSpecName: "kube-api-access-s4xpd") pod "5467292b-d832-437c-9f0f-41441d0da350" (UID: "5467292b-d832-437c-9f0f-41441d0da350"). InnerVolumeSpecName "kube-api-access-s4xpd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.170318 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5467292b-d832-437c-9f0f-41441d0da350-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5467292b-d832-437c-9f0f-41441d0da350" (UID: "5467292b-d832-437c-9f0f-41441d0da350"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.170803 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba039abf-7ec7-43bb-ad72-e2abd0afe439-config-data" (OuterVolumeSpecName: "config-data") pod "ba039abf-7ec7-43bb-ad72-e2abd0afe439" (UID: "ba039abf-7ec7-43bb-ad72-e2abd0afe439"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.174354 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba039abf-7ec7-43bb-ad72-e2abd0afe439-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ba039abf-7ec7-43bb-ad72-e2abd0afe439" (UID: "ba039abf-7ec7-43bb-ad72-e2abd0afe439"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.186738 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5467292b-d832-437c-9f0f-41441d0da350-config-data" (OuterVolumeSpecName: "config-data") pod "5467292b-d832-437c-9f0f-41441d0da350" (UID: "5467292b-d832-437c-9f0f-41441d0da350"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.246823 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba039abf-7ec7-43bb-ad72-e2abd0afe439-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.247125 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba039abf-7ec7-43bb-ad72-e2abd0afe439-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.247218 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba039abf-7ec7-43bb-ad72-e2abd0afe439-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.247323 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5467292b-d832-437c-9f0f-41441d0da350-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.247399 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4xpd\" (UniqueName: \"kubernetes.io/projected/5467292b-d832-437c-9f0f-41441d0da350-kube-api-access-s4xpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.247472 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78wqq\" (UniqueName: \"kubernetes.io/projected/ba039abf-7ec7-43bb-ad72-e2abd0afe439-kube-api-access-78wqq\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:54 crc kubenswrapper[4933]: I0122 06:07:54.247543 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5467292b-d832-437c-9f0f-41441d0da350-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.012518 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.013117 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.049084 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.061027 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.076713 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.089157 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.099918 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 22 06:07:55 crc kubenswrapper[4933]: E0122 06:07:55.100551 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5467292b-d832-437c-9f0f-41441d0da350" containerName="nova-cell1-novncproxy-novncproxy" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.100583 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="5467292b-d832-437c-9f0f-41441d0da350" containerName="nova-cell1-novncproxy-novncproxy" Jan 22 06:07:55 crc kubenswrapper[4933]: E0122 06:07:55.100614 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba039abf-7ec7-43bb-ad72-e2abd0afe439" containerName="nova-metadata-log" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.100629 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba039abf-7ec7-43bb-ad72-e2abd0afe439" containerName="nova-metadata-log" Jan 22 06:07:55 crc kubenswrapper[4933]: E0122 06:07:55.100652 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba039abf-7ec7-43bb-ad72-e2abd0afe439" containerName="nova-metadata-metadata" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.100666 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba039abf-7ec7-43bb-ad72-e2abd0afe439" containerName="nova-metadata-metadata" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.101039 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba039abf-7ec7-43bb-ad72-e2abd0afe439" containerName="nova-metadata-log" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.101113 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba039abf-7ec7-43bb-ad72-e2abd0afe439" containerName="nova-metadata-metadata" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.101132 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="5467292b-d832-437c-9f0f-41441d0da350" containerName="nova-cell1-novncproxy-novncproxy" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.102773 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.107144 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.108240 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.108576 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.109722 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.119298 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.119545 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.120804 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.121033 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.131655 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.163564 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/16fb5bb3-209b-4796-9dec-493ff7db88c5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " pod="openstack/nova-metadata-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.163636 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/16fb5bb3-209b-4796-9dec-493ff7db88c5-logs\") pod \"nova-metadata-0\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " pod="openstack/nova-metadata-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.163693 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njkfk\" (UniqueName: \"kubernetes.io/projected/eb84c6f9-457d-46df-a4de-b5bfe612e945-kube-api-access-njkfk\") pod \"nova-cell1-novncproxy-0\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.163721 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.163744 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16fb5bb3-209b-4796-9dec-493ff7db88c5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " pod="openstack/nova-metadata-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.163819 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16fb5bb3-209b-4796-9dec-493ff7db88c5-config-data\") pod \"nova-metadata-0\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " pod="openstack/nova-metadata-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.163848 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m84mj\" (UniqueName: \"kubernetes.io/projected/16fb5bb3-209b-4796-9dec-493ff7db88c5-kube-api-access-m84mj\") pod \"nova-metadata-0\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " pod="openstack/nova-metadata-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.163917 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.163956 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.163996 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.264636 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/16fb5bb3-209b-4796-9dec-493ff7db88c5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " pod="openstack/nova-metadata-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.264901 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/16fb5bb3-209b-4796-9dec-493ff7db88c5-logs\") pod \"nova-metadata-0\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " pod="openstack/nova-metadata-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.264930 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njkfk\" (UniqueName: \"kubernetes.io/projected/eb84c6f9-457d-46df-a4de-b5bfe612e945-kube-api-access-njkfk\") pod \"nova-cell1-novncproxy-0\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.264947 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.264964 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16fb5bb3-209b-4796-9dec-493ff7db88c5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " pod="openstack/nova-metadata-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.265009 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16fb5bb3-209b-4796-9dec-493ff7db88c5-config-data\") pod \"nova-metadata-0\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " pod="openstack/nova-metadata-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.265026 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m84mj\" (UniqueName: \"kubernetes.io/projected/16fb5bb3-209b-4796-9dec-493ff7db88c5-kube-api-access-m84mj\") pod \"nova-metadata-0\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " pod="openstack/nova-metadata-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.265062 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.265120 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.265141 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.265854 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/16fb5bb3-209b-4796-9dec-493ff7db88c5-logs\") pod \"nova-metadata-0\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " pod="openstack/nova-metadata-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.268762 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.268770 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/16fb5bb3-209b-4796-9dec-493ff7db88c5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " pod="openstack/nova-metadata-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.268837 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16fb5bb3-209b-4796-9dec-493ff7db88c5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " pod="openstack/nova-metadata-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.270215 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.270884 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16fb5bb3-209b-4796-9dec-493ff7db88c5-config-data\") pod \"nova-metadata-0\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " pod="openstack/nova-metadata-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.274163 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.276375 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.283718 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njkfk\" (UniqueName: \"kubernetes.io/projected/eb84c6f9-457d-46df-a4de-b5bfe612e945-kube-api-access-njkfk\") pod \"nova-cell1-novncproxy-0\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.285933 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m84mj\" (UniqueName: \"kubernetes.io/projected/16fb5bb3-209b-4796-9dec-493ff7db88c5-kube-api-access-m84mj\") pod \"nova-metadata-0\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " pod="openstack/nova-metadata-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.438907 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.445439 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.826117 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 06:07:55 crc kubenswrapper[4933]: I0122 06:07:55.920060 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 06:07:55 crc kubenswrapper[4933]: W0122 06:07:55.927248 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16fb5bb3_209b_4796_9dec_493ff7db88c5.slice/crio-20cdf7b7e747fbfe47522131c2c4bb45c6cf78d62e6d1b18bcd891c4ed76100b WatchSource:0}: Error finding container 20cdf7b7e747fbfe47522131c2c4bb45c6cf78d62e6d1b18bcd891c4ed76100b: Status 404 returned error can't find the container with id 20cdf7b7e747fbfe47522131c2c4bb45c6cf78d62e6d1b18bcd891c4ed76100b Jan 22 06:07:56 crc kubenswrapper[4933]: I0122 06:07:56.022144 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"eb84c6f9-457d-46df-a4de-b5bfe612e945","Type":"ContainerStarted","Data":"10bf0fafd478f81ff195714f9c84a05cbe144498335ff4791c6fbfcd8b9b7275"} Jan 22 06:07:56 crc kubenswrapper[4933]: I0122 06:07:56.024110 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"16fb5bb3-209b-4796-9dec-493ff7db88c5","Type":"ContainerStarted","Data":"20cdf7b7e747fbfe47522131c2c4bb45c6cf78d62e6d1b18bcd891c4ed76100b"} Jan 22 06:07:56 crc kubenswrapper[4933]: I0122 06:07:56.526657 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5467292b-d832-437c-9f0f-41441d0da350" path="/var/lib/kubelet/pods/5467292b-d832-437c-9f0f-41441d0da350/volumes" Jan 22 06:07:56 crc kubenswrapper[4933]: I0122 06:07:56.528222 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba039abf-7ec7-43bb-ad72-e2abd0afe439" path="/var/lib/kubelet/pods/ba039abf-7ec7-43bb-ad72-e2abd0afe439/volumes" Jan 22 06:07:57 crc kubenswrapper[4933]: I0122 06:07:57.039991 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"16fb5bb3-209b-4796-9dec-493ff7db88c5","Type":"ContainerStarted","Data":"e6b160829dfa4020312bc79db77e4d8bdf72c9ef8472b02748a3dc1d4c48fcc6"} Jan 22 06:07:57 crc kubenswrapper[4933]: I0122 06:07:57.040057 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"16fb5bb3-209b-4796-9dec-493ff7db88c5","Type":"ContainerStarted","Data":"0e444f954278dd5be49a970362dc635837a430bf7bfaec305702acf410b09db5"} Jan 22 06:07:57 crc kubenswrapper[4933]: I0122 06:07:57.043502 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"eb84c6f9-457d-46df-a4de-b5bfe612e945","Type":"ContainerStarted","Data":"e4c2907944b8cddfe0450299fb215823e03077dd25b111c02c49498256c142f1"} Jan 22 06:07:57 crc kubenswrapper[4933]: I0122 06:07:57.077267 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.07723274 podStartE2EDuration="2.07723274s" podCreationTimestamp="2026-01-22 06:07:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:07:57.066746783 +0000 UTC m=+1324.903872176" watchObservedRunningTime="2026-01-22 06:07:57.07723274 +0000 UTC m=+1324.914358163" Jan 22 06:07:57 crc kubenswrapper[4933]: I0122 06:07:57.098820 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.098799653 podStartE2EDuration="2.098799653s" podCreationTimestamp="2026-01-22 06:07:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:07:57.088401669 +0000 UTC m=+1324.925527052" watchObservedRunningTime="2026-01-22 06:07:57.098799653 +0000 UTC m=+1324.935925006" Jan 22 06:07:57 crc kubenswrapper[4933]: I0122 06:07:57.292199 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 22 06:07:57 crc kubenswrapper[4933]: I0122 06:07:57.293546 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 22 06:07:57 crc kubenswrapper[4933]: I0122 06:07:57.293628 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 22 06:07:57 crc kubenswrapper[4933]: I0122 06:07:57.299837 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.055872 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.062959 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.274559 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ddd577785-b24fs"] Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.275959 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.299352 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ddd577785-b24fs"] Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.332042 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5t62\" (UniqueName: \"kubernetes.io/projected/810720aa-b861-48e5-bd66-b1544f4f683a-kube-api-access-q5t62\") pod \"dnsmasq-dns-5ddd577785-b24fs\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.332137 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-dns-svc\") pod \"dnsmasq-dns-5ddd577785-b24fs\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.332159 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-ovsdbserver-sb\") pod \"dnsmasq-dns-5ddd577785-b24fs\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.332179 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-dns-swift-storage-0\") pod \"dnsmasq-dns-5ddd577785-b24fs\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.332271 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-config\") pod \"dnsmasq-dns-5ddd577785-b24fs\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.332300 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-ovsdbserver-nb\") pod \"dnsmasq-dns-5ddd577785-b24fs\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.434039 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-ovsdbserver-nb\") pod \"dnsmasq-dns-5ddd577785-b24fs\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.434184 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5t62\" (UniqueName: \"kubernetes.io/projected/810720aa-b861-48e5-bd66-b1544f4f683a-kube-api-access-q5t62\") pod \"dnsmasq-dns-5ddd577785-b24fs\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.434243 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-dns-svc\") pod \"dnsmasq-dns-5ddd577785-b24fs\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.434262 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-ovsdbserver-sb\") pod \"dnsmasq-dns-5ddd577785-b24fs\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.434282 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-dns-swift-storage-0\") pod \"dnsmasq-dns-5ddd577785-b24fs\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.434935 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-ovsdbserver-nb\") pod \"dnsmasq-dns-5ddd577785-b24fs\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.434942 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-ovsdbserver-sb\") pod \"dnsmasq-dns-5ddd577785-b24fs\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.435140 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-dns-swift-storage-0\") pod \"dnsmasq-dns-5ddd577785-b24fs\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.435219 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-dns-svc\") pod \"dnsmasq-dns-5ddd577785-b24fs\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.435227 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-config\") pod \"dnsmasq-dns-5ddd577785-b24fs\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.435822 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-config\") pod \"dnsmasq-dns-5ddd577785-b24fs\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.460847 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5t62\" (UniqueName: \"kubernetes.io/projected/810720aa-b861-48e5-bd66-b1544f4f683a-kube-api-access-q5t62\") pod \"dnsmasq-dns-5ddd577785-b24fs\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:58 crc kubenswrapper[4933]: I0122 06:07:58.604717 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:07:59 crc kubenswrapper[4933]: I0122 06:07:59.110885 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ddd577785-b24fs"] Jan 22 06:08:00 crc kubenswrapper[4933]: I0122 06:08:00.073633 4933 generic.go:334] "Generic (PLEG): container finished" podID="810720aa-b861-48e5-bd66-b1544f4f683a" containerID="1d8140b090d28240c97f862586cc231f7cc391744e9a708646c8fbb1d6dc838a" exitCode=0 Jan 22 06:08:00 crc kubenswrapper[4933]: I0122 06:08:00.073726 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ddd577785-b24fs" event={"ID":"810720aa-b861-48e5-bd66-b1544f4f683a","Type":"ContainerDied","Data":"1d8140b090d28240c97f862586cc231f7cc391744e9a708646c8fbb1d6dc838a"} Jan 22 06:08:00 crc kubenswrapper[4933]: I0122 06:08:00.073976 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ddd577785-b24fs" event={"ID":"810720aa-b861-48e5-bd66-b1544f4f683a","Type":"ContainerStarted","Data":"3c3bbc0de597556753dea50b495143395c55d843ef905670d4ed10e80d26ec19"} Jan 22 06:08:00 crc kubenswrapper[4933]: I0122 06:08:00.439993 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 06:08:00 crc kubenswrapper[4933]: I0122 06:08:00.440045 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 06:08:00 crc kubenswrapper[4933]: I0122 06:08:00.446585 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:08:00 crc kubenswrapper[4933]: I0122 06:08:00.455275 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:08:00 crc kubenswrapper[4933]: I0122 06:08:00.455809 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d083996a-9177-451f-ab93-ea1beffeece6" containerName="sg-core" containerID="cri-o://f4312982663741b11b776a870286db1dabd8cedf9122e8e19cf6d7c96825fc87" gracePeriod=30 Jan 22 06:08:00 crc kubenswrapper[4933]: I0122 06:08:00.455937 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d083996a-9177-451f-ab93-ea1beffeece6" containerName="ceilometer-central-agent" containerID="cri-o://1fd752b9d239217899d7fef10c605fee1db8714a148703fe3f1509fbfeb62517" gracePeriod=30 Jan 22 06:08:00 crc kubenswrapper[4933]: I0122 06:08:00.455837 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d083996a-9177-451f-ab93-ea1beffeece6" containerName="ceilometer-notification-agent" containerID="cri-o://e7ae00579f01c286af0183394bf4c6e8839f69cb8994d083e06abfd88a61c69a" gracePeriod=30 Jan 22 06:08:00 crc kubenswrapper[4933]: I0122 06:08:00.455944 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d083996a-9177-451f-ab93-ea1beffeece6" containerName="proxy-httpd" containerID="cri-o://2683f7e09e142a5fba46404b1d9ea7e4c153717249c0dac778cbb2aba50b8637" gracePeriod=30 Jan 22 06:08:00 crc kubenswrapper[4933]: I0122 06:08:00.469300 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="d083996a-9177-451f-ab93-ea1beffeece6" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.196:3000/\": EOF" Jan 22 06:08:01 crc kubenswrapper[4933]: I0122 06:08:01.085830 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ddd577785-b24fs" event={"ID":"810720aa-b861-48e5-bd66-b1544f4f683a","Type":"ContainerStarted","Data":"be973f0c711a601539c68dbefa84d0f75359d8153ac8c7d3bb7dc0a944ae2556"} Jan 22 06:08:01 crc kubenswrapper[4933]: I0122 06:08:01.086285 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:08:01 crc kubenswrapper[4933]: I0122 06:08:01.097670 4933 generic.go:334] "Generic (PLEG): container finished" podID="d083996a-9177-451f-ab93-ea1beffeece6" containerID="2683f7e09e142a5fba46404b1d9ea7e4c153717249c0dac778cbb2aba50b8637" exitCode=0 Jan 22 06:08:01 crc kubenswrapper[4933]: I0122 06:08:01.097722 4933 generic.go:334] "Generic (PLEG): container finished" podID="d083996a-9177-451f-ab93-ea1beffeece6" containerID="f4312982663741b11b776a870286db1dabd8cedf9122e8e19cf6d7c96825fc87" exitCode=2 Jan 22 06:08:01 crc kubenswrapper[4933]: I0122 06:08:01.097732 4933 generic.go:334] "Generic (PLEG): container finished" podID="d083996a-9177-451f-ab93-ea1beffeece6" containerID="1fd752b9d239217899d7fef10c605fee1db8714a148703fe3f1509fbfeb62517" exitCode=0 Jan 22 06:08:01 crc kubenswrapper[4933]: I0122 06:08:01.097764 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d083996a-9177-451f-ab93-ea1beffeece6","Type":"ContainerDied","Data":"2683f7e09e142a5fba46404b1d9ea7e4c153717249c0dac778cbb2aba50b8637"} Jan 22 06:08:01 crc kubenswrapper[4933]: I0122 06:08:01.097816 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d083996a-9177-451f-ab93-ea1beffeece6","Type":"ContainerDied","Data":"f4312982663741b11b776a870286db1dabd8cedf9122e8e19cf6d7c96825fc87"} Jan 22 06:08:01 crc kubenswrapper[4933]: I0122 06:08:01.097830 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d083996a-9177-451f-ab93-ea1beffeece6","Type":"ContainerDied","Data":"1fd752b9d239217899d7fef10c605fee1db8714a148703fe3f1509fbfeb62517"} Jan 22 06:08:01 crc kubenswrapper[4933]: I0122 06:08:01.134449 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5ddd577785-b24fs" podStartSLOduration=3.134429154 podStartE2EDuration="3.134429154s" podCreationTimestamp="2026-01-22 06:07:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:08:01.122183653 +0000 UTC m=+1328.959309026" watchObservedRunningTime="2026-01-22 06:08:01.134429154 +0000 UTC m=+1328.971554507" Jan 22 06:08:01 crc kubenswrapper[4933]: I0122 06:08:01.366745 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:08:01 crc kubenswrapper[4933]: I0122 06:08:01.366959 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d2cec7e1-f904-41dd-9ce9-0ad88b68e717" containerName="nova-api-log" containerID="cri-o://9679f838ab691ab0b7cf0f16a706dd64da7b9ac46ada87fb962e9985a04f62db" gracePeriod=30 Jan 22 06:08:01 crc kubenswrapper[4933]: I0122 06:08:01.367119 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d2cec7e1-f904-41dd-9ce9-0ad88b68e717" containerName="nova-api-api" containerID="cri-o://912e5c54d9b4f5e3b7fc021ca697f8cdbd9d313b028fda518e1e1ce6f83761cd" gracePeriod=30 Jan 22 06:08:02 crc kubenswrapper[4933]: I0122 06:08:02.108664 4933 generic.go:334] "Generic (PLEG): container finished" podID="d2cec7e1-f904-41dd-9ce9-0ad88b68e717" containerID="9679f838ab691ab0b7cf0f16a706dd64da7b9ac46ada87fb962e9985a04f62db" exitCode=143 Jan 22 06:08:02 crc kubenswrapper[4933]: I0122 06:08:02.108760 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d2cec7e1-f904-41dd-9ce9-0ad88b68e717","Type":"ContainerDied","Data":"9679f838ab691ab0b7cf0f16a706dd64da7b9ac46ada87fb962e9985a04f62db"} Jan 22 06:08:02 crc kubenswrapper[4933]: I0122 06:08:02.790542 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:08:02 crc kubenswrapper[4933]: I0122 06:08:02.930603 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-ceilometer-tls-certs\") pod \"d083996a-9177-451f-ab93-ea1beffeece6\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " Jan 22 06:08:02 crc kubenswrapper[4933]: I0122 06:08:02.930949 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-config-data\") pod \"d083996a-9177-451f-ab93-ea1beffeece6\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " Jan 22 06:08:02 crc kubenswrapper[4933]: I0122 06:08:02.931003 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-sg-core-conf-yaml\") pod \"d083996a-9177-451f-ab93-ea1beffeece6\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " Jan 22 06:08:02 crc kubenswrapper[4933]: I0122 06:08:02.931132 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-scripts\") pod \"d083996a-9177-451f-ab93-ea1beffeece6\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " Jan 22 06:08:02 crc kubenswrapper[4933]: I0122 06:08:02.931249 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d083996a-9177-451f-ab93-ea1beffeece6-log-httpd\") pod \"d083996a-9177-451f-ab93-ea1beffeece6\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " Jan 22 06:08:02 crc kubenswrapper[4933]: I0122 06:08:02.931299 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d083996a-9177-451f-ab93-ea1beffeece6-run-httpd\") pod \"d083996a-9177-451f-ab93-ea1beffeece6\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " Jan 22 06:08:02 crc kubenswrapper[4933]: I0122 06:08:02.931355 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqwmd\" (UniqueName: \"kubernetes.io/projected/d083996a-9177-451f-ab93-ea1beffeece6-kube-api-access-kqwmd\") pod \"d083996a-9177-451f-ab93-ea1beffeece6\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " Jan 22 06:08:02 crc kubenswrapper[4933]: I0122 06:08:02.931379 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-combined-ca-bundle\") pod \"d083996a-9177-451f-ab93-ea1beffeece6\" (UID: \"d083996a-9177-451f-ab93-ea1beffeece6\") " Jan 22 06:08:02 crc kubenswrapper[4933]: I0122 06:08:02.932043 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d083996a-9177-451f-ab93-ea1beffeece6-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d083996a-9177-451f-ab93-ea1beffeece6" (UID: "d083996a-9177-451f-ab93-ea1beffeece6"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:08:02 crc kubenswrapper[4933]: I0122 06:08:02.932439 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d083996a-9177-451f-ab93-ea1beffeece6-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d083996a-9177-451f-ab93-ea1beffeece6" (UID: "d083996a-9177-451f-ab93-ea1beffeece6"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:08:02 crc kubenswrapper[4933]: I0122 06:08:02.937423 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-scripts" (OuterVolumeSpecName: "scripts") pod "d083996a-9177-451f-ab93-ea1beffeece6" (UID: "d083996a-9177-451f-ab93-ea1beffeece6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:02 crc kubenswrapper[4933]: I0122 06:08:02.939295 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d083996a-9177-451f-ab93-ea1beffeece6-kube-api-access-kqwmd" (OuterVolumeSpecName: "kube-api-access-kqwmd") pod "d083996a-9177-451f-ab93-ea1beffeece6" (UID: "d083996a-9177-451f-ab93-ea1beffeece6"). InnerVolumeSpecName "kube-api-access-kqwmd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.038048 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d083996a-9177-451f-ab93-ea1beffeece6" (UID: "d083996a-9177-451f-ab93-ea1beffeece6"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.048175 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.048211 4933 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d083996a-9177-451f-ab93-ea1beffeece6-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.048224 4933 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d083996a-9177-451f-ab93-ea1beffeece6-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.048360 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqwmd\" (UniqueName: \"kubernetes.io/projected/d083996a-9177-451f-ab93-ea1beffeece6-kube-api-access-kqwmd\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.048377 4933 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.121192 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "d083996a-9177-451f-ab93-ea1beffeece6" (UID: "d083996a-9177-451f-ab93-ea1beffeece6"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.137568 4933 generic.go:334] "Generic (PLEG): container finished" podID="d083996a-9177-451f-ab93-ea1beffeece6" containerID="e7ae00579f01c286af0183394bf4c6e8839f69cb8994d083e06abfd88a61c69a" exitCode=0 Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.137614 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d083996a-9177-451f-ab93-ea1beffeece6","Type":"ContainerDied","Data":"e7ae00579f01c286af0183394bf4c6e8839f69cb8994d083e06abfd88a61c69a"} Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.137640 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d083996a-9177-451f-ab93-ea1beffeece6","Type":"ContainerDied","Data":"b2c3a2c1e8fd82480c088af0cec0ef6935b24a21fa68c346f564f63b608518e6"} Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.137657 4933 scope.go:117] "RemoveContainer" containerID="2683f7e09e142a5fba46404b1d9ea7e4c153717249c0dac778cbb2aba50b8637" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.137805 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.141551 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d083996a-9177-451f-ab93-ea1beffeece6" (UID: "d083996a-9177-451f-ab93-ea1beffeece6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.153779 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.153806 4933 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.188726 4933 scope.go:117] "RemoveContainer" containerID="f4312982663741b11b776a870286db1dabd8cedf9122e8e19cf6d7c96825fc87" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.226239 4933 scope.go:117] "RemoveContainer" containerID="e7ae00579f01c286af0183394bf4c6e8839f69cb8994d083e06abfd88a61c69a" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.229240 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-config-data" (OuterVolumeSpecName: "config-data") pod "d083996a-9177-451f-ab93-ea1beffeece6" (UID: "d083996a-9177-451f-ab93-ea1beffeece6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.252653 4933 scope.go:117] "RemoveContainer" containerID="1fd752b9d239217899d7fef10c605fee1db8714a148703fe3f1509fbfeb62517" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.254956 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d083996a-9177-451f-ab93-ea1beffeece6-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.300305 4933 scope.go:117] "RemoveContainer" containerID="2683f7e09e142a5fba46404b1d9ea7e4c153717249c0dac778cbb2aba50b8637" Jan 22 06:08:03 crc kubenswrapper[4933]: E0122 06:08:03.301050 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2683f7e09e142a5fba46404b1d9ea7e4c153717249c0dac778cbb2aba50b8637\": container with ID starting with 2683f7e09e142a5fba46404b1d9ea7e4c153717249c0dac778cbb2aba50b8637 not found: ID does not exist" containerID="2683f7e09e142a5fba46404b1d9ea7e4c153717249c0dac778cbb2aba50b8637" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.301215 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2683f7e09e142a5fba46404b1d9ea7e4c153717249c0dac778cbb2aba50b8637"} err="failed to get container status \"2683f7e09e142a5fba46404b1d9ea7e4c153717249c0dac778cbb2aba50b8637\": rpc error: code = NotFound desc = could not find container \"2683f7e09e142a5fba46404b1d9ea7e4c153717249c0dac778cbb2aba50b8637\": container with ID starting with 2683f7e09e142a5fba46404b1d9ea7e4c153717249c0dac778cbb2aba50b8637 not found: ID does not exist" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.301338 4933 scope.go:117] "RemoveContainer" containerID="f4312982663741b11b776a870286db1dabd8cedf9122e8e19cf6d7c96825fc87" Jan 22 06:08:03 crc kubenswrapper[4933]: E0122 06:08:03.301719 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4312982663741b11b776a870286db1dabd8cedf9122e8e19cf6d7c96825fc87\": container with ID starting with f4312982663741b11b776a870286db1dabd8cedf9122e8e19cf6d7c96825fc87 not found: ID does not exist" containerID="f4312982663741b11b776a870286db1dabd8cedf9122e8e19cf6d7c96825fc87" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.301852 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4312982663741b11b776a870286db1dabd8cedf9122e8e19cf6d7c96825fc87"} err="failed to get container status \"f4312982663741b11b776a870286db1dabd8cedf9122e8e19cf6d7c96825fc87\": rpc error: code = NotFound desc = could not find container \"f4312982663741b11b776a870286db1dabd8cedf9122e8e19cf6d7c96825fc87\": container with ID starting with f4312982663741b11b776a870286db1dabd8cedf9122e8e19cf6d7c96825fc87 not found: ID does not exist" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.301973 4933 scope.go:117] "RemoveContainer" containerID="e7ae00579f01c286af0183394bf4c6e8839f69cb8994d083e06abfd88a61c69a" Jan 22 06:08:03 crc kubenswrapper[4933]: E0122 06:08:03.302337 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7ae00579f01c286af0183394bf4c6e8839f69cb8994d083e06abfd88a61c69a\": container with ID starting with e7ae00579f01c286af0183394bf4c6e8839f69cb8994d083e06abfd88a61c69a not found: ID does not exist" containerID="e7ae00579f01c286af0183394bf4c6e8839f69cb8994d083e06abfd88a61c69a" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.302462 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7ae00579f01c286af0183394bf4c6e8839f69cb8994d083e06abfd88a61c69a"} err="failed to get container status \"e7ae00579f01c286af0183394bf4c6e8839f69cb8994d083e06abfd88a61c69a\": rpc error: code = NotFound desc = could not find container \"e7ae00579f01c286af0183394bf4c6e8839f69cb8994d083e06abfd88a61c69a\": container with ID starting with e7ae00579f01c286af0183394bf4c6e8839f69cb8994d083e06abfd88a61c69a not found: ID does not exist" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.302557 4933 scope.go:117] "RemoveContainer" containerID="1fd752b9d239217899d7fef10c605fee1db8714a148703fe3f1509fbfeb62517" Jan 22 06:08:03 crc kubenswrapper[4933]: E0122 06:08:03.303097 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1fd752b9d239217899d7fef10c605fee1db8714a148703fe3f1509fbfeb62517\": container with ID starting with 1fd752b9d239217899d7fef10c605fee1db8714a148703fe3f1509fbfeb62517 not found: ID does not exist" containerID="1fd752b9d239217899d7fef10c605fee1db8714a148703fe3f1509fbfeb62517" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.303247 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fd752b9d239217899d7fef10c605fee1db8714a148703fe3f1509fbfeb62517"} err="failed to get container status \"1fd752b9d239217899d7fef10c605fee1db8714a148703fe3f1509fbfeb62517\": rpc error: code = NotFound desc = could not find container \"1fd752b9d239217899d7fef10c605fee1db8714a148703fe3f1509fbfeb62517\": container with ID starting with 1fd752b9d239217899d7fef10c605fee1db8714a148703fe3f1509fbfeb62517 not found: ID does not exist" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.472887 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.481980 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.493480 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:08:03 crc kubenswrapper[4933]: E0122 06:08:03.493870 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d083996a-9177-451f-ab93-ea1beffeece6" containerName="sg-core" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.493884 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d083996a-9177-451f-ab93-ea1beffeece6" containerName="sg-core" Jan 22 06:08:03 crc kubenswrapper[4933]: E0122 06:08:03.493894 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d083996a-9177-451f-ab93-ea1beffeece6" containerName="proxy-httpd" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.493901 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d083996a-9177-451f-ab93-ea1beffeece6" containerName="proxy-httpd" Jan 22 06:08:03 crc kubenswrapper[4933]: E0122 06:08:03.493917 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d083996a-9177-451f-ab93-ea1beffeece6" containerName="ceilometer-notification-agent" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.493927 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d083996a-9177-451f-ab93-ea1beffeece6" containerName="ceilometer-notification-agent" Jan 22 06:08:03 crc kubenswrapper[4933]: E0122 06:08:03.493964 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d083996a-9177-451f-ab93-ea1beffeece6" containerName="ceilometer-central-agent" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.493971 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d083996a-9177-451f-ab93-ea1beffeece6" containerName="ceilometer-central-agent" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.494141 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="d083996a-9177-451f-ab93-ea1beffeece6" containerName="ceilometer-notification-agent" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.494175 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="d083996a-9177-451f-ab93-ea1beffeece6" containerName="ceilometer-central-agent" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.494182 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="d083996a-9177-451f-ab93-ea1beffeece6" containerName="sg-core" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.494194 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="d083996a-9177-451f-ab93-ea1beffeece6" containerName="proxy-httpd" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.495881 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.498857 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.499056 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.502110 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.513135 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.660563 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.661473 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdx4x\" (UniqueName: \"kubernetes.io/projected/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-kube-api-access-fdx4x\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.661552 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-log-httpd\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.661735 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.661876 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-scripts\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.662025 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-config-data\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.662186 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.662585 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-run-httpd\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.764645 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdx4x\" (UniqueName: \"kubernetes.io/projected/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-kube-api-access-fdx4x\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.764696 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-log-httpd\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.764736 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.764762 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-scripts\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.764797 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-config-data\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.764828 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.764849 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-run-httpd\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.764908 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.765487 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-log-httpd\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.765738 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-run-httpd\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.771313 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.773358 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-scripts\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.777235 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.777880 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.779357 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-config-data\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.782478 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdx4x\" (UniqueName: \"kubernetes.io/projected/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-kube-api-access-fdx4x\") pod \"ceilometer-0\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " pod="openstack/ceilometer-0" Jan 22 06:08:03 crc kubenswrapper[4933]: I0122 06:08:03.824286 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:08:04 crc kubenswrapper[4933]: I0122 06:08:04.303024 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:08:04 crc kubenswrapper[4933]: I0122 06:08:04.317589 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:08:04 crc kubenswrapper[4933]: I0122 06:08:04.327557 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 06:08:04 crc kubenswrapper[4933]: I0122 06:08:04.502288 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d083996a-9177-451f-ab93-ea1beffeece6" path="/var/lib/kubelet/pods/d083996a-9177-451f-ab93-ea1beffeece6/volumes" Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.156732 4933 generic.go:334] "Generic (PLEG): container finished" podID="d2cec7e1-f904-41dd-9ce9-0ad88b68e717" containerID="912e5c54d9b4f5e3b7fc021ca697f8cdbd9d313b028fda518e1e1ce6f83761cd" exitCode=0 Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.156818 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d2cec7e1-f904-41dd-9ce9-0ad88b68e717","Type":"ContainerDied","Data":"912e5c54d9b4f5e3b7fc021ca697f8cdbd9d313b028fda518e1e1ce6f83761cd"} Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.158266 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d9249f-f81c-4306-9bfe-3cdff0db79ed","Type":"ContainerStarted","Data":"578aa44aa3132a9357b820e94381532ddb8bff44dbc1364537365707d07bcff4"} Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.158301 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d9249f-f81c-4306-9bfe-3cdff0db79ed","Type":"ContainerStarted","Data":"9a0adea55dd03ff2d6346191e0241bed6eb10cdf63e91f64eebb1454bfcf71d0"} Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.439394 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.439460 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.445913 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.471536 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.532212 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.703746 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-config-data\") pod \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\" (UID: \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\") " Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.704139 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-combined-ca-bundle\") pod \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\" (UID: \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\") " Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.704194 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-brpdk\" (UniqueName: \"kubernetes.io/projected/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-kube-api-access-brpdk\") pod \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\" (UID: \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\") " Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.704340 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-logs\") pod \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\" (UID: \"d2cec7e1-f904-41dd-9ce9-0ad88b68e717\") " Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.705250 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-logs" (OuterVolumeSpecName: "logs") pod "d2cec7e1-f904-41dd-9ce9-0ad88b68e717" (UID: "d2cec7e1-f904-41dd-9ce9-0ad88b68e717"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.712779 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-kube-api-access-brpdk" (OuterVolumeSpecName: "kube-api-access-brpdk") pod "d2cec7e1-f904-41dd-9ce9-0ad88b68e717" (UID: "d2cec7e1-f904-41dd-9ce9-0ad88b68e717"). InnerVolumeSpecName "kube-api-access-brpdk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.748063 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-config-data" (OuterVolumeSpecName: "config-data") pod "d2cec7e1-f904-41dd-9ce9-0ad88b68e717" (UID: "d2cec7e1-f904-41dd-9ce9-0ad88b68e717"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.763296 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d2cec7e1-f904-41dd-9ce9-0ad88b68e717" (UID: "d2cec7e1-f904-41dd-9ce9-0ad88b68e717"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.806623 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-brpdk\" (UniqueName: \"kubernetes.io/projected/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-kube-api-access-brpdk\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.806689 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.806703 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:05 crc kubenswrapper[4933]: I0122 06:08:05.806714 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2cec7e1-f904-41dd-9ce9-0ad88b68e717-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.178330 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d9249f-f81c-4306-9bfe-3cdff0db79ed","Type":"ContainerStarted","Data":"5c6fd9b0792931299b86c0ea51952034b99e604f97726f9b576398d78d93a906"} Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.183538 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.184572 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d2cec7e1-f904-41dd-9ce9-0ad88b68e717","Type":"ContainerDied","Data":"30291a5d5f08d3a68ba37ec9b2c8ae9dbb3c2a877affd39263d4c4b4dab998c5"} Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.184615 4933 scope.go:117] "RemoveContainer" containerID="912e5c54d9b4f5e3b7fc021ca697f8cdbd9d313b028fda518e1e1ce6f83761cd" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.205588 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.227248 4933 scope.go:117] "RemoveContainer" containerID="9679f838ab691ab0b7cf0f16a706dd64da7b9ac46ada87fb962e9985a04f62db" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.248547 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.268228 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.278647 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 22 06:08:06 crc kubenswrapper[4933]: E0122 06:08:06.279039 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2cec7e1-f904-41dd-9ce9-0ad88b68e717" containerName="nova-api-log" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.279056 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2cec7e1-f904-41dd-9ce9-0ad88b68e717" containerName="nova-api-log" Jan 22 06:08:06 crc kubenswrapper[4933]: E0122 06:08:06.279084 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2cec7e1-f904-41dd-9ce9-0ad88b68e717" containerName="nova-api-api" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.279091 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2cec7e1-f904-41dd-9ce9-0ad88b68e717" containerName="nova-api-api" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.279236 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2cec7e1-f904-41dd-9ce9-0ad88b68e717" containerName="nova-api-api" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.279257 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2cec7e1-f904-41dd-9ce9-0ad88b68e717" containerName="nova-api-log" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.280256 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.290502 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.290579 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.290764 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.308434 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.428169 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.428241 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ldmq\" (UniqueName: \"kubernetes.io/projected/999e0454-e871-4274-b6e6-f83b7d4b9734-kube-api-access-2ldmq\") pod \"nova-api-0\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.428272 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-public-tls-certs\") pod \"nova-api-0\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.428310 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-config-data\") pod \"nova-api-0\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.428390 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-internal-tls-certs\") pod \"nova-api-0\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.428418 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/999e0454-e871-4274-b6e6-f83b7d4b9734-logs\") pod \"nova-api-0\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.477258 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="16fb5bb3-209b-4796-9dec-493ff7db88c5" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.477417 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="16fb5bb3-209b-4796-9dec-493ff7db88c5" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.481162 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-v7dl8"] Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.482332 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-v7dl8" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.486875 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.487718 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.516908 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2cec7e1-f904-41dd-9ce9-0ad88b68e717" path="/var/lib/kubelet/pods/d2cec7e1-f904-41dd-9ce9-0ad88b68e717/volumes" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.519960 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-v7dl8"] Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.531750 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-config-data\") pod \"nova-api-0\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.531870 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-internal-tls-certs\") pod \"nova-api-0\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.531931 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/999e0454-e871-4274-b6e6-f83b7d4b9734-logs\") pod \"nova-api-0\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.531959 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.532004 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ldmq\" (UniqueName: \"kubernetes.io/projected/999e0454-e871-4274-b6e6-f83b7d4b9734-kube-api-access-2ldmq\") pod \"nova-api-0\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.532030 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-public-tls-certs\") pod \"nova-api-0\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.533827 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/999e0454-e871-4274-b6e6-f83b7d4b9734-logs\") pod \"nova-api-0\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.537706 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-internal-tls-certs\") pod \"nova-api-0\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.539284 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-public-tls-certs\") pod \"nova-api-0\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.549505 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.557599 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ldmq\" (UniqueName: \"kubernetes.io/projected/999e0454-e871-4274-b6e6-f83b7d4b9734-kube-api-access-2ldmq\") pod \"nova-api-0\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.559047 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-config-data\") pod \"nova-api-0\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.613819 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.634040 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4wwq\" (UniqueName: \"kubernetes.io/projected/e553a055-30a6-4e9c-b424-66deb8dfabbb-kube-api-access-j4wwq\") pod \"nova-cell1-cell-mapping-v7dl8\" (UID: \"e553a055-30a6-4e9c-b424-66deb8dfabbb\") " pod="openstack/nova-cell1-cell-mapping-v7dl8" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.634109 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e553a055-30a6-4e9c-b424-66deb8dfabbb-scripts\") pod \"nova-cell1-cell-mapping-v7dl8\" (UID: \"e553a055-30a6-4e9c-b424-66deb8dfabbb\") " pod="openstack/nova-cell1-cell-mapping-v7dl8" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.634134 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e553a055-30a6-4e9c-b424-66deb8dfabbb-config-data\") pod \"nova-cell1-cell-mapping-v7dl8\" (UID: \"e553a055-30a6-4e9c-b424-66deb8dfabbb\") " pod="openstack/nova-cell1-cell-mapping-v7dl8" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.634175 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e553a055-30a6-4e9c-b424-66deb8dfabbb-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-v7dl8\" (UID: \"e553a055-30a6-4e9c-b424-66deb8dfabbb\") " pod="openstack/nova-cell1-cell-mapping-v7dl8" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.735617 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4wwq\" (UniqueName: \"kubernetes.io/projected/e553a055-30a6-4e9c-b424-66deb8dfabbb-kube-api-access-j4wwq\") pod \"nova-cell1-cell-mapping-v7dl8\" (UID: \"e553a055-30a6-4e9c-b424-66deb8dfabbb\") " pod="openstack/nova-cell1-cell-mapping-v7dl8" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.735904 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e553a055-30a6-4e9c-b424-66deb8dfabbb-scripts\") pod \"nova-cell1-cell-mapping-v7dl8\" (UID: \"e553a055-30a6-4e9c-b424-66deb8dfabbb\") " pod="openstack/nova-cell1-cell-mapping-v7dl8" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.735950 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e553a055-30a6-4e9c-b424-66deb8dfabbb-config-data\") pod \"nova-cell1-cell-mapping-v7dl8\" (UID: \"e553a055-30a6-4e9c-b424-66deb8dfabbb\") " pod="openstack/nova-cell1-cell-mapping-v7dl8" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.735984 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e553a055-30a6-4e9c-b424-66deb8dfabbb-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-v7dl8\" (UID: \"e553a055-30a6-4e9c-b424-66deb8dfabbb\") " pod="openstack/nova-cell1-cell-mapping-v7dl8" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.743787 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e553a055-30a6-4e9c-b424-66deb8dfabbb-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-v7dl8\" (UID: \"e553a055-30a6-4e9c-b424-66deb8dfabbb\") " pod="openstack/nova-cell1-cell-mapping-v7dl8" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.743834 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e553a055-30a6-4e9c-b424-66deb8dfabbb-config-data\") pod \"nova-cell1-cell-mapping-v7dl8\" (UID: \"e553a055-30a6-4e9c-b424-66deb8dfabbb\") " pod="openstack/nova-cell1-cell-mapping-v7dl8" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.746729 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e553a055-30a6-4e9c-b424-66deb8dfabbb-scripts\") pod \"nova-cell1-cell-mapping-v7dl8\" (UID: \"e553a055-30a6-4e9c-b424-66deb8dfabbb\") " pod="openstack/nova-cell1-cell-mapping-v7dl8" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.758047 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4wwq\" (UniqueName: \"kubernetes.io/projected/e553a055-30a6-4e9c-b424-66deb8dfabbb-kube-api-access-j4wwq\") pod \"nova-cell1-cell-mapping-v7dl8\" (UID: \"e553a055-30a6-4e9c-b424-66deb8dfabbb\") " pod="openstack/nova-cell1-cell-mapping-v7dl8" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.838547 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-v7dl8" Jan 22 06:08:06 crc kubenswrapper[4933]: I0122 06:08:06.988809 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:08:07 crc kubenswrapper[4933]: I0122 06:08:07.201987 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d9249f-f81c-4306-9bfe-3cdff0db79ed","Type":"ContainerStarted","Data":"dd62b5de102c7e48e4dfffab4dd9cef5921c95afbebb476737f544b2b7899f96"} Jan 22 06:08:07 crc kubenswrapper[4933]: I0122 06:08:07.223720 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"999e0454-e871-4274-b6e6-f83b7d4b9734","Type":"ContainerStarted","Data":"e5af426600a808462b63eff8401c4594a331586fc2ed5e4a0a5c0ee0fdb6f676"} Jan 22 06:08:07 crc kubenswrapper[4933]: I0122 06:08:07.223770 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"999e0454-e871-4274-b6e6-f83b7d4b9734","Type":"ContainerStarted","Data":"a6919aba7e026f0853ec40436371066927eb0289a602ee78264c6d28f4cf74c6"} Jan 22 06:08:07 crc kubenswrapper[4933]: W0122 06:08:07.434364 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode553a055_30a6_4e9c_b424_66deb8dfabbb.slice/crio-f093240ae3d16bb0fed525f5a6f831825a11d56fd568dc09dc193a5103fe7653 WatchSource:0}: Error finding container f093240ae3d16bb0fed525f5a6f831825a11d56fd568dc09dc193a5103fe7653: Status 404 returned error can't find the container with id f093240ae3d16bb0fed525f5a6f831825a11d56fd568dc09dc193a5103fe7653 Jan 22 06:08:07 crc kubenswrapper[4933]: I0122 06:08:07.434863 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-v7dl8"] Jan 22 06:08:08 crc kubenswrapper[4933]: I0122 06:08:08.244441 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"999e0454-e871-4274-b6e6-f83b7d4b9734","Type":"ContainerStarted","Data":"4197c09868eb250c59ece15ec1d24e662987d0a37f0eb57f584f1032fa85aca1"} Jan 22 06:08:08 crc kubenswrapper[4933]: I0122 06:08:08.247498 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-v7dl8" event={"ID":"e553a055-30a6-4e9c-b424-66deb8dfabbb","Type":"ContainerStarted","Data":"0fdc5152f900a8420f5cb44deece538cdbfc86883d7690cc9abc8456c127e4ab"} Jan 22 06:08:08 crc kubenswrapper[4933]: I0122 06:08:08.247560 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-v7dl8" event={"ID":"e553a055-30a6-4e9c-b424-66deb8dfabbb","Type":"ContainerStarted","Data":"f093240ae3d16bb0fed525f5a6f831825a11d56fd568dc09dc193a5103fe7653"} Jan 22 06:08:08 crc kubenswrapper[4933]: I0122 06:08:08.250625 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d9249f-f81c-4306-9bfe-3cdff0db79ed","Type":"ContainerStarted","Data":"47bd8ead87ea5aa0e8525cd7df366309c214c1f2fdce743e67affdc4033bf615"} Jan 22 06:08:08 crc kubenswrapper[4933]: I0122 06:08:08.250777 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerName="ceilometer-central-agent" containerID="cri-o://578aa44aa3132a9357b820e94381532ddb8bff44dbc1364537365707d07bcff4" gracePeriod=30 Jan 22 06:08:08 crc kubenswrapper[4933]: I0122 06:08:08.250813 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerName="proxy-httpd" containerID="cri-o://47bd8ead87ea5aa0e8525cd7df366309c214c1f2fdce743e67affdc4033bf615" gracePeriod=30 Jan 22 06:08:08 crc kubenswrapper[4933]: I0122 06:08:08.250843 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerName="ceilometer-notification-agent" containerID="cri-o://5c6fd9b0792931299b86c0ea51952034b99e604f97726f9b576398d78d93a906" gracePeriod=30 Jan 22 06:08:08 crc kubenswrapper[4933]: I0122 06:08:08.250816 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 06:08:08 crc kubenswrapper[4933]: I0122 06:08:08.250790 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerName="sg-core" containerID="cri-o://dd62b5de102c7e48e4dfffab4dd9cef5921c95afbebb476737f544b2b7899f96" gracePeriod=30 Jan 22 06:08:08 crc kubenswrapper[4933]: I0122 06:08:08.279008 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.278986555 podStartE2EDuration="2.278986555s" podCreationTimestamp="2026-01-22 06:08:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:08:08.265312854 +0000 UTC m=+1336.102438217" watchObservedRunningTime="2026-01-22 06:08:08.278986555 +0000 UTC m=+1336.116111908" Jan 22 06:08:08 crc kubenswrapper[4933]: I0122 06:08:08.286265 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-v7dl8" podStartSLOduration=2.286245254 podStartE2EDuration="2.286245254s" podCreationTimestamp="2026-01-22 06:08:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:08:08.280359764 +0000 UTC m=+1336.117485117" watchObservedRunningTime="2026-01-22 06:08:08.286245254 +0000 UTC m=+1336.123370607" Jan 22 06:08:08 crc kubenswrapper[4933]: I0122 06:08:08.307240 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.194829005 podStartE2EDuration="5.307223326s" podCreationTimestamp="2026-01-22 06:08:03 +0000 UTC" firstStartedPulling="2026-01-22 06:08:04.327334751 +0000 UTC m=+1332.164460104" lastFinishedPulling="2026-01-22 06:08:07.439729062 +0000 UTC m=+1335.276854425" observedRunningTime="2026-01-22 06:08:08.305492101 +0000 UTC m=+1336.142617484" watchObservedRunningTime="2026-01-22 06:08:08.307223326 +0000 UTC m=+1336.144348669" Jan 22 06:08:08 crc kubenswrapper[4933]: I0122 06:08:08.606288 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:08:08 crc kubenswrapper[4933]: I0122 06:08:08.713565 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-557bbc7df7-xxdtg"] Jan 22 06:08:08 crc kubenswrapper[4933]: I0122 06:08:08.714032 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" podUID="0206df56-62b3-4d6a-87d3-2819fec42c00" containerName="dnsmasq-dns" containerID="cri-o://2e704c0292519dfd8a922c623d94f8f05284f973119ecb1d6554c36fb2c2f49f" gracePeriod=10 Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.231883 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.263493 4933 generic.go:334] "Generic (PLEG): container finished" podID="0206df56-62b3-4d6a-87d3-2819fec42c00" containerID="2e704c0292519dfd8a922c623d94f8f05284f973119ecb1d6554c36fb2c2f49f" exitCode=0 Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.264613 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" event={"ID":"0206df56-62b3-4d6a-87d3-2819fec42c00","Type":"ContainerDied","Data":"2e704c0292519dfd8a922c623d94f8f05284f973119ecb1d6554c36fb2c2f49f"} Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.264652 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" event={"ID":"0206df56-62b3-4d6a-87d3-2819fec42c00","Type":"ContainerDied","Data":"b0e34ad26ad2323dab47af2977a844caa7ef407dc67659a452f2b11588c07d5e"} Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.264672 4933 scope.go:117] "RemoveContainer" containerID="2e704c0292519dfd8a922c623d94f8f05284f973119ecb1d6554c36fb2c2f49f" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.264818 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-557bbc7df7-xxdtg" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.275780 4933 generic.go:334] "Generic (PLEG): container finished" podID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerID="47bd8ead87ea5aa0e8525cd7df366309c214c1f2fdce743e67affdc4033bf615" exitCode=0 Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.275819 4933 generic.go:334] "Generic (PLEG): container finished" podID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerID="dd62b5de102c7e48e4dfffab4dd9cef5921c95afbebb476737f544b2b7899f96" exitCode=2 Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.275828 4933 generic.go:334] "Generic (PLEG): container finished" podID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerID="5c6fd9b0792931299b86c0ea51952034b99e604f97726f9b576398d78d93a906" exitCode=0 Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.275887 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d9249f-f81c-4306-9bfe-3cdff0db79ed","Type":"ContainerDied","Data":"47bd8ead87ea5aa0e8525cd7df366309c214c1f2fdce743e67affdc4033bf615"} Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.275939 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d9249f-f81c-4306-9bfe-3cdff0db79ed","Type":"ContainerDied","Data":"dd62b5de102c7e48e4dfffab4dd9cef5921c95afbebb476737f544b2b7899f96"} Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.275957 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d9249f-f81c-4306-9bfe-3cdff0db79ed","Type":"ContainerDied","Data":"5c6fd9b0792931299b86c0ea51952034b99e604f97726f9b576398d78d93a906"} Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.286694 4933 scope.go:117] "RemoveContainer" containerID="7be37068f9187d5ae31e28aa151800275f8ca0245ea157c1c68ec74d1cd203a1" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.328702 4933 scope.go:117] "RemoveContainer" containerID="2e704c0292519dfd8a922c623d94f8f05284f973119ecb1d6554c36fb2c2f49f" Jan 22 06:08:09 crc kubenswrapper[4933]: E0122 06:08:09.331583 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e704c0292519dfd8a922c623d94f8f05284f973119ecb1d6554c36fb2c2f49f\": container with ID starting with 2e704c0292519dfd8a922c623d94f8f05284f973119ecb1d6554c36fb2c2f49f not found: ID does not exist" containerID="2e704c0292519dfd8a922c623d94f8f05284f973119ecb1d6554c36fb2c2f49f" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.331643 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e704c0292519dfd8a922c623d94f8f05284f973119ecb1d6554c36fb2c2f49f"} err="failed to get container status \"2e704c0292519dfd8a922c623d94f8f05284f973119ecb1d6554c36fb2c2f49f\": rpc error: code = NotFound desc = could not find container \"2e704c0292519dfd8a922c623d94f8f05284f973119ecb1d6554c36fb2c2f49f\": container with ID starting with 2e704c0292519dfd8a922c623d94f8f05284f973119ecb1d6554c36fb2c2f49f not found: ID does not exist" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.331687 4933 scope.go:117] "RemoveContainer" containerID="7be37068f9187d5ae31e28aa151800275f8ca0245ea157c1c68ec74d1cd203a1" Jan 22 06:08:09 crc kubenswrapper[4933]: E0122 06:08:09.332299 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7be37068f9187d5ae31e28aa151800275f8ca0245ea157c1c68ec74d1cd203a1\": container with ID starting with 7be37068f9187d5ae31e28aa151800275f8ca0245ea157c1c68ec74d1cd203a1 not found: ID does not exist" containerID="7be37068f9187d5ae31e28aa151800275f8ca0245ea157c1c68ec74d1cd203a1" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.332332 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7be37068f9187d5ae31e28aa151800275f8ca0245ea157c1c68ec74d1cd203a1"} err="failed to get container status \"7be37068f9187d5ae31e28aa151800275f8ca0245ea157c1c68ec74d1cd203a1\": rpc error: code = NotFound desc = could not find container \"7be37068f9187d5ae31e28aa151800275f8ca0245ea157c1c68ec74d1cd203a1\": container with ID starting with 7be37068f9187d5ae31e28aa151800275f8ca0245ea157c1c68ec74d1cd203a1 not found: ID does not exist" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.382686 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-ovsdbserver-nb\") pod \"0206df56-62b3-4d6a-87d3-2819fec42c00\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.382747 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-dns-swift-storage-0\") pod \"0206df56-62b3-4d6a-87d3-2819fec42c00\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.382813 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-config\") pod \"0206df56-62b3-4d6a-87d3-2819fec42c00\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.382893 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ct5k4\" (UniqueName: \"kubernetes.io/projected/0206df56-62b3-4d6a-87d3-2819fec42c00-kube-api-access-ct5k4\") pod \"0206df56-62b3-4d6a-87d3-2819fec42c00\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.382962 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-dns-svc\") pod \"0206df56-62b3-4d6a-87d3-2819fec42c00\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.383602 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-ovsdbserver-sb\") pod \"0206df56-62b3-4d6a-87d3-2819fec42c00\" (UID: \"0206df56-62b3-4d6a-87d3-2819fec42c00\") " Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.392584 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0206df56-62b3-4d6a-87d3-2819fec42c00-kube-api-access-ct5k4" (OuterVolumeSpecName: "kube-api-access-ct5k4") pod "0206df56-62b3-4d6a-87d3-2819fec42c00" (UID: "0206df56-62b3-4d6a-87d3-2819fec42c00"). InnerVolumeSpecName "kube-api-access-ct5k4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.436008 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0206df56-62b3-4d6a-87d3-2819fec42c00" (UID: "0206df56-62b3-4d6a-87d3-2819fec42c00"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.436590 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0206df56-62b3-4d6a-87d3-2819fec42c00" (UID: "0206df56-62b3-4d6a-87d3-2819fec42c00"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.439503 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-config" (OuterVolumeSpecName: "config") pod "0206df56-62b3-4d6a-87d3-2819fec42c00" (UID: "0206df56-62b3-4d6a-87d3-2819fec42c00"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.447680 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0206df56-62b3-4d6a-87d3-2819fec42c00" (UID: "0206df56-62b3-4d6a-87d3-2819fec42c00"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.451881 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0206df56-62b3-4d6a-87d3-2819fec42c00" (UID: "0206df56-62b3-4d6a-87d3-2819fec42c00"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.485358 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.485392 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.485402 4933 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.485412 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.485421 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ct5k4\" (UniqueName: \"kubernetes.io/projected/0206df56-62b3-4d6a-87d3-2819fec42c00-kube-api-access-ct5k4\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.485449 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0206df56-62b3-4d6a-87d3-2819fec42c00-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.606306 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-557bbc7df7-xxdtg"] Jan 22 06:08:09 crc kubenswrapper[4933]: I0122 06:08:09.617458 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-557bbc7df7-xxdtg"] Jan 22 06:08:10 crc kubenswrapper[4933]: I0122 06:08:10.502399 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0206df56-62b3-4d6a-87d3-2819fec42c00" path="/var/lib/kubelet/pods/0206df56-62b3-4d6a-87d3-2819fec42c00/volumes" Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.778632 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.860633 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-sg-core-conf-yaml\") pod \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.860711 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-run-httpd\") pod \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.860752 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-config-data\") pod \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.860775 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-log-httpd\") pod \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.860816 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-combined-ca-bundle\") pod \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.860922 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fdx4x\" (UniqueName: \"kubernetes.io/projected/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-kube-api-access-fdx4x\") pod \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.861055 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-scripts\") pod \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.861126 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-ceilometer-tls-certs\") pod \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\" (UID: \"c5d9249f-f81c-4306-9bfe-3cdff0db79ed\") " Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.861516 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c5d9249f-f81c-4306-9bfe-3cdff0db79ed" (UID: "c5d9249f-f81c-4306-9bfe-3cdff0db79ed"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.861622 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c5d9249f-f81c-4306-9bfe-3cdff0db79ed" (UID: "c5d9249f-f81c-4306-9bfe-3cdff0db79ed"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.867140 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-kube-api-access-fdx4x" (OuterVolumeSpecName: "kube-api-access-fdx4x") pod "c5d9249f-f81c-4306-9bfe-3cdff0db79ed" (UID: "c5d9249f-f81c-4306-9bfe-3cdff0db79ed"). InnerVolumeSpecName "kube-api-access-fdx4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.870277 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-scripts" (OuterVolumeSpecName: "scripts") pod "c5d9249f-f81c-4306-9bfe-3cdff0db79ed" (UID: "c5d9249f-f81c-4306-9bfe-3cdff0db79ed"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.906632 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c5d9249f-f81c-4306-9bfe-3cdff0db79ed" (UID: "c5d9249f-f81c-4306-9bfe-3cdff0db79ed"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.926067 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "c5d9249f-f81c-4306-9bfe-3cdff0db79ed" (UID: "c5d9249f-f81c-4306-9bfe-3cdff0db79ed"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.940395 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c5d9249f-f81c-4306-9bfe-3cdff0db79ed" (UID: "c5d9249f-f81c-4306-9bfe-3cdff0db79ed"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.961097 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-config-data" (OuterVolumeSpecName: "config-data") pod "c5d9249f-f81c-4306-9bfe-3cdff0db79ed" (UID: "c5d9249f-f81c-4306-9bfe-3cdff0db79ed"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.963206 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.963225 4933 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.963237 4933 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.963246 4933 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.963255 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.963264 4933 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.963272 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:12 crc kubenswrapper[4933]: I0122 06:08:12.963280 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fdx4x\" (UniqueName: \"kubernetes.io/projected/c5d9249f-f81c-4306-9bfe-3cdff0db79ed-kube-api-access-fdx4x\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.349560 4933 generic.go:334] "Generic (PLEG): container finished" podID="e553a055-30a6-4e9c-b424-66deb8dfabbb" containerID="0fdc5152f900a8420f5cb44deece538cdbfc86883d7690cc9abc8456c127e4ab" exitCode=0 Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.349674 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-v7dl8" event={"ID":"e553a055-30a6-4e9c-b424-66deb8dfabbb","Type":"ContainerDied","Data":"0fdc5152f900a8420f5cb44deece538cdbfc86883d7690cc9abc8456c127e4ab"} Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.353128 4933 generic.go:334] "Generic (PLEG): container finished" podID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerID="578aa44aa3132a9357b820e94381532ddb8bff44dbc1364537365707d07bcff4" exitCode=0 Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.353267 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d9249f-f81c-4306-9bfe-3cdff0db79ed","Type":"ContainerDied","Data":"578aa44aa3132a9357b820e94381532ddb8bff44dbc1364537365707d07bcff4"} Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.353223 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.353327 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5d9249f-f81c-4306-9bfe-3cdff0db79ed","Type":"ContainerDied","Data":"9a0adea55dd03ff2d6346191e0241bed6eb10cdf63e91f64eebb1454bfcf71d0"} Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.353346 4933 scope.go:117] "RemoveContainer" containerID="47bd8ead87ea5aa0e8525cd7df366309c214c1f2fdce743e67affdc4033bf615" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.390249 4933 scope.go:117] "RemoveContainer" containerID="dd62b5de102c7e48e4dfffab4dd9cef5921c95afbebb476737f544b2b7899f96" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.455363 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.455668 4933 scope.go:117] "RemoveContainer" containerID="5c6fd9b0792931299b86c0ea51952034b99e604f97726f9b576398d78d93a906" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.479964 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.488164 4933 scope.go:117] "RemoveContainer" containerID="578aa44aa3132a9357b820e94381532ddb8bff44dbc1364537365707d07bcff4" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.492143 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:08:13 crc kubenswrapper[4933]: E0122 06:08:13.492539 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerName="ceilometer-notification-agent" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.492555 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerName="ceilometer-notification-agent" Jan 22 06:08:13 crc kubenswrapper[4933]: E0122 06:08:13.492565 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0206df56-62b3-4d6a-87d3-2819fec42c00" containerName="init" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.492572 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="0206df56-62b3-4d6a-87d3-2819fec42c00" containerName="init" Jan 22 06:08:13 crc kubenswrapper[4933]: E0122 06:08:13.492585 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0206df56-62b3-4d6a-87d3-2819fec42c00" containerName="dnsmasq-dns" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.492591 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="0206df56-62b3-4d6a-87d3-2819fec42c00" containerName="dnsmasq-dns" Jan 22 06:08:13 crc kubenswrapper[4933]: E0122 06:08:13.492620 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerName="ceilometer-central-agent" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.492626 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerName="ceilometer-central-agent" Jan 22 06:08:13 crc kubenswrapper[4933]: E0122 06:08:13.492638 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerName="proxy-httpd" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.492645 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerName="proxy-httpd" Jan 22 06:08:13 crc kubenswrapper[4933]: E0122 06:08:13.492679 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerName="sg-core" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.492686 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerName="sg-core" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.492918 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerName="sg-core" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.492933 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerName="proxy-httpd" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.492945 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerName="ceilometer-notification-agent" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.492957 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" containerName="ceilometer-central-agent" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.492982 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="0206df56-62b3-4d6a-87d3-2819fec42c00" containerName="dnsmasq-dns" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.496606 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.501455 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.501721 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.501935 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.535106 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.547017 4933 scope.go:117] "RemoveContainer" containerID="47bd8ead87ea5aa0e8525cd7df366309c214c1f2fdce743e67affdc4033bf615" Jan 22 06:08:13 crc kubenswrapper[4933]: E0122 06:08:13.547574 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47bd8ead87ea5aa0e8525cd7df366309c214c1f2fdce743e67affdc4033bf615\": container with ID starting with 47bd8ead87ea5aa0e8525cd7df366309c214c1f2fdce743e67affdc4033bf615 not found: ID does not exist" containerID="47bd8ead87ea5aa0e8525cd7df366309c214c1f2fdce743e67affdc4033bf615" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.547603 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47bd8ead87ea5aa0e8525cd7df366309c214c1f2fdce743e67affdc4033bf615"} err="failed to get container status \"47bd8ead87ea5aa0e8525cd7df366309c214c1f2fdce743e67affdc4033bf615\": rpc error: code = NotFound desc = could not find container \"47bd8ead87ea5aa0e8525cd7df366309c214c1f2fdce743e67affdc4033bf615\": container with ID starting with 47bd8ead87ea5aa0e8525cd7df366309c214c1f2fdce743e67affdc4033bf615 not found: ID does not exist" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.547623 4933 scope.go:117] "RemoveContainer" containerID="dd62b5de102c7e48e4dfffab4dd9cef5921c95afbebb476737f544b2b7899f96" Jan 22 06:08:13 crc kubenswrapper[4933]: E0122 06:08:13.547868 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd62b5de102c7e48e4dfffab4dd9cef5921c95afbebb476737f544b2b7899f96\": container with ID starting with dd62b5de102c7e48e4dfffab4dd9cef5921c95afbebb476737f544b2b7899f96 not found: ID does not exist" containerID="dd62b5de102c7e48e4dfffab4dd9cef5921c95afbebb476737f544b2b7899f96" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.547891 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd62b5de102c7e48e4dfffab4dd9cef5921c95afbebb476737f544b2b7899f96"} err="failed to get container status \"dd62b5de102c7e48e4dfffab4dd9cef5921c95afbebb476737f544b2b7899f96\": rpc error: code = NotFound desc = could not find container \"dd62b5de102c7e48e4dfffab4dd9cef5921c95afbebb476737f544b2b7899f96\": container with ID starting with dd62b5de102c7e48e4dfffab4dd9cef5921c95afbebb476737f544b2b7899f96 not found: ID does not exist" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.547906 4933 scope.go:117] "RemoveContainer" containerID="5c6fd9b0792931299b86c0ea51952034b99e604f97726f9b576398d78d93a906" Jan 22 06:08:13 crc kubenswrapper[4933]: E0122 06:08:13.548065 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c6fd9b0792931299b86c0ea51952034b99e604f97726f9b576398d78d93a906\": container with ID starting with 5c6fd9b0792931299b86c0ea51952034b99e604f97726f9b576398d78d93a906 not found: ID does not exist" containerID="5c6fd9b0792931299b86c0ea51952034b99e604f97726f9b576398d78d93a906" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.548096 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c6fd9b0792931299b86c0ea51952034b99e604f97726f9b576398d78d93a906"} err="failed to get container status \"5c6fd9b0792931299b86c0ea51952034b99e604f97726f9b576398d78d93a906\": rpc error: code = NotFound desc = could not find container \"5c6fd9b0792931299b86c0ea51952034b99e604f97726f9b576398d78d93a906\": container with ID starting with 5c6fd9b0792931299b86c0ea51952034b99e604f97726f9b576398d78d93a906 not found: ID does not exist" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.548109 4933 scope.go:117] "RemoveContainer" containerID="578aa44aa3132a9357b820e94381532ddb8bff44dbc1364537365707d07bcff4" Jan 22 06:08:13 crc kubenswrapper[4933]: E0122 06:08:13.548320 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"578aa44aa3132a9357b820e94381532ddb8bff44dbc1364537365707d07bcff4\": container with ID starting with 578aa44aa3132a9357b820e94381532ddb8bff44dbc1364537365707d07bcff4 not found: ID does not exist" containerID="578aa44aa3132a9357b820e94381532ddb8bff44dbc1364537365707d07bcff4" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.548345 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"578aa44aa3132a9357b820e94381532ddb8bff44dbc1364537365707d07bcff4"} err="failed to get container status \"578aa44aa3132a9357b820e94381532ddb8bff44dbc1364537365707d07bcff4\": rpc error: code = NotFound desc = could not find container \"578aa44aa3132a9357b820e94381532ddb8bff44dbc1364537365707d07bcff4\": container with ID starting with 578aa44aa3132a9357b820e94381532ddb8bff44dbc1364537365707d07bcff4 not found: ID does not exist" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.572095 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.572151 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.572210 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-log-httpd\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.572243 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-run-httpd\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.572264 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.572339 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grgk4\" (UniqueName: \"kubernetes.io/projected/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-kube-api-access-grgk4\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.572403 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-scripts\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.572442 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-config-data\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.674365 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-scripts\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.674443 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-config-data\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.674493 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.674524 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.675586 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-log-httpd\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.675679 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-run-httpd\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.675724 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.675860 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grgk4\" (UniqueName: \"kubernetes.io/projected/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-kube-api-access-grgk4\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.676067 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-log-httpd\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.676656 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-run-httpd\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.679043 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.679465 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-scripts\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.680346 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.686707 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.689815 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-config-data\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.692042 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grgk4\" (UniqueName: \"kubernetes.io/projected/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-kube-api-access-grgk4\") pod \"ceilometer-0\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " pod="openstack/ceilometer-0" Jan 22 06:08:13 crc kubenswrapper[4933]: I0122 06:08:13.834837 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:08:14 crc kubenswrapper[4933]: I0122 06:08:14.314455 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:08:14 crc kubenswrapper[4933]: I0122 06:08:14.363175 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8","Type":"ContainerStarted","Data":"ab185a8031470817b118de7fce3c92732e6b839c83b25ee26538e0efb0265b02"} Jan 22 06:08:14 crc kubenswrapper[4933]: I0122 06:08:14.505711 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5d9249f-f81c-4306-9bfe-3cdff0db79ed" path="/var/lib/kubelet/pods/c5d9249f-f81c-4306-9bfe-3cdff0db79ed/volumes" Jan 22 06:08:14 crc kubenswrapper[4933]: I0122 06:08:14.685772 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-v7dl8" Jan 22 06:08:14 crc kubenswrapper[4933]: I0122 06:08:14.795774 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e553a055-30a6-4e9c-b424-66deb8dfabbb-config-data\") pod \"e553a055-30a6-4e9c-b424-66deb8dfabbb\" (UID: \"e553a055-30a6-4e9c-b424-66deb8dfabbb\") " Jan 22 06:08:14 crc kubenswrapper[4933]: I0122 06:08:14.795835 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j4wwq\" (UniqueName: \"kubernetes.io/projected/e553a055-30a6-4e9c-b424-66deb8dfabbb-kube-api-access-j4wwq\") pod \"e553a055-30a6-4e9c-b424-66deb8dfabbb\" (UID: \"e553a055-30a6-4e9c-b424-66deb8dfabbb\") " Jan 22 06:08:14 crc kubenswrapper[4933]: I0122 06:08:14.795897 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e553a055-30a6-4e9c-b424-66deb8dfabbb-scripts\") pod \"e553a055-30a6-4e9c-b424-66deb8dfabbb\" (UID: \"e553a055-30a6-4e9c-b424-66deb8dfabbb\") " Jan 22 06:08:14 crc kubenswrapper[4933]: I0122 06:08:14.795917 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e553a055-30a6-4e9c-b424-66deb8dfabbb-combined-ca-bundle\") pod \"e553a055-30a6-4e9c-b424-66deb8dfabbb\" (UID: \"e553a055-30a6-4e9c-b424-66deb8dfabbb\") " Jan 22 06:08:14 crc kubenswrapper[4933]: I0122 06:08:14.801013 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e553a055-30a6-4e9c-b424-66deb8dfabbb-scripts" (OuterVolumeSpecName: "scripts") pod "e553a055-30a6-4e9c-b424-66deb8dfabbb" (UID: "e553a055-30a6-4e9c-b424-66deb8dfabbb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:14 crc kubenswrapper[4933]: I0122 06:08:14.804180 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e553a055-30a6-4e9c-b424-66deb8dfabbb-kube-api-access-j4wwq" (OuterVolumeSpecName: "kube-api-access-j4wwq") pod "e553a055-30a6-4e9c-b424-66deb8dfabbb" (UID: "e553a055-30a6-4e9c-b424-66deb8dfabbb"). InnerVolumeSpecName "kube-api-access-j4wwq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:08:14 crc kubenswrapper[4933]: I0122 06:08:14.822359 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e553a055-30a6-4e9c-b424-66deb8dfabbb-config-data" (OuterVolumeSpecName: "config-data") pod "e553a055-30a6-4e9c-b424-66deb8dfabbb" (UID: "e553a055-30a6-4e9c-b424-66deb8dfabbb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:14 crc kubenswrapper[4933]: I0122 06:08:14.846550 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e553a055-30a6-4e9c-b424-66deb8dfabbb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e553a055-30a6-4e9c-b424-66deb8dfabbb" (UID: "e553a055-30a6-4e9c-b424-66deb8dfabbb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:14 crc kubenswrapper[4933]: I0122 06:08:14.899365 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e553a055-30a6-4e9c-b424-66deb8dfabbb-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:14 crc kubenswrapper[4933]: I0122 06:08:14.899658 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j4wwq\" (UniqueName: \"kubernetes.io/projected/e553a055-30a6-4e9c-b424-66deb8dfabbb-kube-api-access-j4wwq\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:14 crc kubenswrapper[4933]: I0122 06:08:14.899678 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e553a055-30a6-4e9c-b424-66deb8dfabbb-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:14 crc kubenswrapper[4933]: I0122 06:08:14.899694 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e553a055-30a6-4e9c-b424-66deb8dfabbb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:15 crc kubenswrapper[4933]: I0122 06:08:15.374465 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8","Type":"ContainerStarted","Data":"baeb972a644576e51e82e73e63e77cc4fc3796f87a5f81af36bd84230ffbbb39"} Jan 22 06:08:15 crc kubenswrapper[4933]: I0122 06:08:15.376214 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-v7dl8" event={"ID":"e553a055-30a6-4e9c-b424-66deb8dfabbb","Type":"ContainerDied","Data":"f093240ae3d16bb0fed525f5a6f831825a11d56fd568dc09dc193a5103fe7653"} Jan 22 06:08:15 crc kubenswrapper[4933]: I0122 06:08:15.376281 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f093240ae3d16bb0fed525f5a6f831825a11d56fd568dc09dc193a5103fe7653" Jan 22 06:08:15 crc kubenswrapper[4933]: I0122 06:08:15.376242 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-v7dl8" Jan 22 06:08:15 crc kubenswrapper[4933]: I0122 06:08:15.533268 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 22 06:08:15 crc kubenswrapper[4933]: I0122 06:08:15.541139 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 22 06:08:15 crc kubenswrapper[4933]: I0122 06:08:15.548948 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 22 06:08:15 crc kubenswrapper[4933]: I0122 06:08:15.609540 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:08:15 crc kubenswrapper[4933]: I0122 06:08:15.609844 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="999e0454-e871-4274-b6e6-f83b7d4b9734" containerName="nova-api-log" containerID="cri-o://e5af426600a808462b63eff8401c4594a331586fc2ed5e4a0a5c0ee0fdb6f676" gracePeriod=30 Jan 22 06:08:15 crc kubenswrapper[4933]: I0122 06:08:15.610365 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="999e0454-e871-4274-b6e6-f83b7d4b9734" containerName="nova-api-api" containerID="cri-o://4197c09868eb250c59ece15ec1d24e662987d0a37f0eb57f584f1032fa85aca1" gracePeriod=30 Jan 22 06:08:15 crc kubenswrapper[4933]: I0122 06:08:15.623409 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 06:08:15 crc kubenswrapper[4933]: I0122 06:08:15.623604 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="9b3c1d90-1d9a-485f-a6b3-4c527cd173bb" containerName="nova-scheduler-scheduler" containerID="cri-o://4d64563ea4c05a55275d0208eb9b3d7076bbf3ac8685166071692de0f119dcbf" gracePeriod=30 Jan 22 06:08:15 crc kubenswrapper[4933]: I0122 06:08:15.642561 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 06:08:16 crc kubenswrapper[4933]: E0122 06:08:16.277375 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4d64563ea4c05a55275d0208eb9b3d7076bbf3ac8685166071692de0f119dcbf" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 22 06:08:16 crc kubenswrapper[4933]: E0122 06:08:16.281306 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4d64563ea4c05a55275d0208eb9b3d7076bbf3ac8685166071692de0f119dcbf" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 22 06:08:16 crc kubenswrapper[4933]: E0122 06:08:16.282853 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4d64563ea4c05a55275d0208eb9b3d7076bbf3ac8685166071692de0f119dcbf" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 22 06:08:16 crc kubenswrapper[4933]: E0122 06:08:16.282906 4933 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="9b3c1d90-1d9a-485f-a6b3-4c527cd173bb" containerName="nova-scheduler-scheduler" Jan 22 06:08:16 crc kubenswrapper[4933]: I0122 06:08:16.428466 4933 generic.go:334] "Generic (PLEG): container finished" podID="999e0454-e871-4274-b6e6-f83b7d4b9734" containerID="4197c09868eb250c59ece15ec1d24e662987d0a37f0eb57f584f1032fa85aca1" exitCode=0 Jan 22 06:08:16 crc kubenswrapper[4933]: I0122 06:08:16.428519 4933 generic.go:334] "Generic (PLEG): container finished" podID="999e0454-e871-4274-b6e6-f83b7d4b9734" containerID="e5af426600a808462b63eff8401c4594a331586fc2ed5e4a0a5c0ee0fdb6f676" exitCode=143 Jan 22 06:08:16 crc kubenswrapper[4933]: I0122 06:08:16.428635 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"999e0454-e871-4274-b6e6-f83b7d4b9734","Type":"ContainerDied","Data":"4197c09868eb250c59ece15ec1d24e662987d0a37f0eb57f584f1032fa85aca1"} Jan 22 06:08:16 crc kubenswrapper[4933]: I0122 06:08:16.428676 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"999e0454-e871-4274-b6e6-f83b7d4b9734","Type":"ContainerDied","Data":"e5af426600a808462b63eff8401c4594a331586fc2ed5e4a0a5c0ee0fdb6f676"} Jan 22 06:08:16 crc kubenswrapper[4933]: I0122 06:08:16.433396 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8","Type":"ContainerStarted","Data":"1efb8a0dd4a5e96297be6dd03c40644d0d87af777a8a5b3ce08724ee8ab4e337"} Jan 22 06:08:16 crc kubenswrapper[4933]: I0122 06:08:16.433441 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8","Type":"ContainerStarted","Data":"83f50dc0f110857dd2bcd69ddb9eb051bf30ec68e850a625d293c6f90c8de031"} Jan 22 06:08:16 crc kubenswrapper[4933]: I0122 06:08:16.440265 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 22 06:08:16 crc kubenswrapper[4933]: I0122 06:08:16.853515 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 06:08:16 crc kubenswrapper[4933]: I0122 06:08:16.948731 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/999e0454-e871-4274-b6e6-f83b7d4b9734-logs\") pod \"999e0454-e871-4274-b6e6-f83b7d4b9734\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " Jan 22 06:08:16 crc kubenswrapper[4933]: I0122 06:08:16.948805 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-internal-tls-certs\") pod \"999e0454-e871-4274-b6e6-f83b7d4b9734\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " Jan 22 06:08:16 crc kubenswrapper[4933]: I0122 06:08:16.948845 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2ldmq\" (UniqueName: \"kubernetes.io/projected/999e0454-e871-4274-b6e6-f83b7d4b9734-kube-api-access-2ldmq\") pod \"999e0454-e871-4274-b6e6-f83b7d4b9734\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " Jan 22 06:08:16 crc kubenswrapper[4933]: I0122 06:08:16.948983 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-combined-ca-bundle\") pod \"999e0454-e871-4274-b6e6-f83b7d4b9734\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " Jan 22 06:08:16 crc kubenswrapper[4933]: I0122 06:08:16.949047 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-config-data\") pod \"999e0454-e871-4274-b6e6-f83b7d4b9734\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " Jan 22 06:08:16 crc kubenswrapper[4933]: I0122 06:08:16.949193 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-public-tls-certs\") pod \"999e0454-e871-4274-b6e6-f83b7d4b9734\" (UID: \"999e0454-e871-4274-b6e6-f83b7d4b9734\") " Jan 22 06:08:16 crc kubenswrapper[4933]: I0122 06:08:16.950650 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/999e0454-e871-4274-b6e6-f83b7d4b9734-logs" (OuterVolumeSpecName: "logs") pod "999e0454-e871-4274-b6e6-f83b7d4b9734" (UID: "999e0454-e871-4274-b6e6-f83b7d4b9734"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:08:16 crc kubenswrapper[4933]: I0122 06:08:16.971656 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/999e0454-e871-4274-b6e6-f83b7d4b9734-kube-api-access-2ldmq" (OuterVolumeSpecName: "kube-api-access-2ldmq") pod "999e0454-e871-4274-b6e6-f83b7d4b9734" (UID: "999e0454-e871-4274-b6e6-f83b7d4b9734"). InnerVolumeSpecName "kube-api-access-2ldmq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:08:16 crc kubenswrapper[4933]: I0122 06:08:16.976285 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-config-data" (OuterVolumeSpecName: "config-data") pod "999e0454-e871-4274-b6e6-f83b7d4b9734" (UID: "999e0454-e871-4274-b6e6-f83b7d4b9734"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:16 crc kubenswrapper[4933]: I0122 06:08:16.976879 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "999e0454-e871-4274-b6e6-f83b7d4b9734" (UID: "999e0454-e871-4274-b6e6-f83b7d4b9734"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.008308 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "999e0454-e871-4274-b6e6-f83b7d4b9734" (UID: "999e0454-e871-4274-b6e6-f83b7d4b9734"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.011096 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "999e0454-e871-4274-b6e6-f83b7d4b9734" (UID: "999e0454-e871-4274-b6e6-f83b7d4b9734"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.050733 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.050765 4933 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.050777 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/999e0454-e871-4274-b6e6-f83b7d4b9734-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.050784 4933 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.050793 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2ldmq\" (UniqueName: \"kubernetes.io/projected/999e0454-e871-4274-b6e6-f83b7d4b9734-kube-api-access-2ldmq\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.050802 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/999e0454-e871-4274-b6e6-f83b7d4b9734-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.453641 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"999e0454-e871-4274-b6e6-f83b7d4b9734","Type":"ContainerDied","Data":"a6919aba7e026f0853ec40436371066927eb0289a602ee78264c6d28f4cf74c6"} Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.453667 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.453707 4933 scope.go:117] "RemoveContainer" containerID="4197c09868eb250c59ece15ec1d24e662987d0a37f0eb57f584f1032fa85aca1" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.453806 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="16fb5bb3-209b-4796-9dec-493ff7db88c5" containerName="nova-metadata-log" containerID="cri-o://0e444f954278dd5be49a970362dc635837a430bf7bfaec305702acf410b09db5" gracePeriod=30 Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.453888 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="16fb5bb3-209b-4796-9dec-493ff7db88c5" containerName="nova-metadata-metadata" containerID="cri-o://e6b160829dfa4020312bc79db77e4d8bdf72c9ef8472b02748a3dc1d4c48fcc6" gracePeriod=30 Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.490231 4933 scope.go:117] "RemoveContainer" containerID="e5af426600a808462b63eff8401c4594a331586fc2ed5e4a0a5c0ee0fdb6f676" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.498761 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.514049 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.524875 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 22 06:08:17 crc kubenswrapper[4933]: E0122 06:08:17.525269 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="999e0454-e871-4274-b6e6-f83b7d4b9734" containerName="nova-api-api" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.525287 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="999e0454-e871-4274-b6e6-f83b7d4b9734" containerName="nova-api-api" Jan 22 06:08:17 crc kubenswrapper[4933]: E0122 06:08:17.525297 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="999e0454-e871-4274-b6e6-f83b7d4b9734" containerName="nova-api-log" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.525303 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="999e0454-e871-4274-b6e6-f83b7d4b9734" containerName="nova-api-log" Jan 22 06:08:17 crc kubenswrapper[4933]: E0122 06:08:17.525317 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e553a055-30a6-4e9c-b424-66deb8dfabbb" containerName="nova-manage" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.525325 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e553a055-30a6-4e9c-b424-66deb8dfabbb" containerName="nova-manage" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.525502 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="999e0454-e871-4274-b6e6-f83b7d4b9734" containerName="nova-api-log" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.525513 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="999e0454-e871-4274-b6e6-f83b7d4b9734" containerName="nova-api-api" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.525538 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="e553a055-30a6-4e9c-b424-66deb8dfabbb" containerName="nova-manage" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.526415 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.535901 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.535914 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.536054 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.546099 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.661043 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.661105 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56625c99-64dc-4742-9927-0210d8fe8d9d-logs\") pod \"nova-api-0\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.661149 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-config-data\") pod \"nova-api-0\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.661264 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.661288 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pdjn\" (UniqueName: \"kubernetes.io/projected/56625c99-64dc-4742-9927-0210d8fe8d9d-kube-api-access-8pdjn\") pod \"nova-api-0\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.661324 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-public-tls-certs\") pod \"nova-api-0\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.763166 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.763223 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56625c99-64dc-4742-9927-0210d8fe8d9d-logs\") pod \"nova-api-0\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.763272 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-config-data\") pod \"nova-api-0\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.763393 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.763419 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pdjn\" (UniqueName: \"kubernetes.io/projected/56625c99-64dc-4742-9927-0210d8fe8d9d-kube-api-access-8pdjn\") pod \"nova-api-0\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.763452 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-public-tls-certs\") pod \"nova-api-0\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.764148 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56625c99-64dc-4742-9927-0210d8fe8d9d-logs\") pod \"nova-api-0\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.769890 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.770694 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.789537 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pdjn\" (UniqueName: \"kubernetes.io/projected/56625c99-64dc-4742-9927-0210d8fe8d9d-kube-api-access-8pdjn\") pod \"nova-api-0\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.816881 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-config-data\") pod \"nova-api-0\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.817453 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-public-tls-certs\") pod \"nova-api-0\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " pod="openstack/nova-api-0" Jan 22 06:08:17 crc kubenswrapper[4933]: I0122 06:08:17.849839 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 06:08:18 crc kubenswrapper[4933]: I0122 06:08:18.306039 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:08:18 crc kubenswrapper[4933]: W0122 06:08:18.308654 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod56625c99_64dc_4742_9927_0210d8fe8d9d.slice/crio-170c3882fe58d599e3fb221c49bb5456562e7747aa88579e72a325d48eaef23a WatchSource:0}: Error finding container 170c3882fe58d599e3fb221c49bb5456562e7747aa88579e72a325d48eaef23a: Status 404 returned error can't find the container with id 170c3882fe58d599e3fb221c49bb5456562e7747aa88579e72a325d48eaef23a Jan 22 06:08:18 crc kubenswrapper[4933]: I0122 06:08:18.463403 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8","Type":"ContainerStarted","Data":"7759c053d18a0cfde4caa05a9fcd933f06f5e1f05af3297873fc5f9ea3a8ae61"} Jan 22 06:08:18 crc kubenswrapper[4933]: I0122 06:08:18.463565 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 06:08:18 crc kubenswrapper[4933]: I0122 06:08:18.464337 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"56625c99-64dc-4742-9927-0210d8fe8d9d","Type":"ContainerStarted","Data":"170c3882fe58d599e3fb221c49bb5456562e7747aa88579e72a325d48eaef23a"} Jan 22 06:08:18 crc kubenswrapper[4933]: I0122 06:08:18.466193 4933 generic.go:334] "Generic (PLEG): container finished" podID="16fb5bb3-209b-4796-9dec-493ff7db88c5" containerID="0e444f954278dd5be49a970362dc635837a430bf7bfaec305702acf410b09db5" exitCode=143 Jan 22 06:08:18 crc kubenswrapper[4933]: I0122 06:08:18.466257 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"16fb5bb3-209b-4796-9dec-493ff7db88c5","Type":"ContainerDied","Data":"0e444f954278dd5be49a970362dc635837a430bf7bfaec305702acf410b09db5"} Jan 22 06:08:18 crc kubenswrapper[4933]: I0122 06:08:18.487926 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.230529182 podStartE2EDuration="5.487906037s" podCreationTimestamp="2026-01-22 06:08:13 +0000 UTC" firstStartedPulling="2026-01-22 06:08:14.343834447 +0000 UTC m=+1342.180959830" lastFinishedPulling="2026-01-22 06:08:17.601211342 +0000 UTC m=+1345.438336685" observedRunningTime="2026-01-22 06:08:18.481616242 +0000 UTC m=+1346.318741595" watchObservedRunningTime="2026-01-22 06:08:18.487906037 +0000 UTC m=+1346.325031390" Jan 22 06:08:18 crc kubenswrapper[4933]: I0122 06:08:18.501780 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="999e0454-e871-4274-b6e6-f83b7d4b9734" path="/var/lib/kubelet/pods/999e0454-e871-4274-b6e6-f83b7d4b9734/volumes" Jan 22 06:08:19 crc kubenswrapper[4933]: I0122 06:08:19.484046 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"56625c99-64dc-4742-9927-0210d8fe8d9d","Type":"ContainerStarted","Data":"b6f0f60869caf1ac1eeabf3cf5ea90ed5b94691c40db36a9d775cc676248cd2f"} Jan 22 06:08:19 crc kubenswrapper[4933]: I0122 06:08:19.484156 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"56625c99-64dc-4742-9927-0210d8fe8d9d","Type":"ContainerStarted","Data":"a37bad5d083026a83896f1bbe2715cb126a4a56637c064f44bca0ac67965f6f0"} Jan 22 06:08:19 crc kubenswrapper[4933]: I0122 06:08:19.516352 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.516225645 podStartE2EDuration="2.516225645s" podCreationTimestamp="2026-01-22 06:08:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:08:19.507338698 +0000 UTC m=+1347.344464041" watchObservedRunningTime="2026-01-22 06:08:19.516225645 +0000 UTC m=+1347.353351018" Jan 22 06:08:20 crc kubenswrapper[4933]: I0122 06:08:20.492278 4933 generic.go:334] "Generic (PLEG): container finished" podID="9b3c1d90-1d9a-485f-a6b3-4c527cd173bb" containerID="4d64563ea4c05a55275d0208eb9b3d7076bbf3ac8685166071692de0f119dcbf" exitCode=0 Jan 22 06:08:20 crc kubenswrapper[4933]: I0122 06:08:20.499312 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9b3c1d90-1d9a-485f-a6b3-4c527cd173bb","Type":"ContainerDied","Data":"4d64563ea4c05a55275d0208eb9b3d7076bbf3ac8685166071692de0f119dcbf"} Jan 22 06:08:20 crc kubenswrapper[4933]: I0122 06:08:20.630710 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="16fb5bb3-209b-4796-9dec-493ff7db88c5" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": read tcp 10.217.0.2:39986->10.217.0.197:8775: read: connection reset by peer" Jan 22 06:08:20 crc kubenswrapper[4933]: I0122 06:08:20.630710 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="16fb5bb3-209b-4796-9dec-493ff7db88c5" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": read tcp 10.217.0.2:40002->10.217.0.197:8775: read: connection reset by peer" Jan 22 06:08:20 crc kubenswrapper[4933]: I0122 06:08:20.792393 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 06:08:20 crc kubenswrapper[4933]: I0122 06:08:20.934374 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6pnb9\" (UniqueName: \"kubernetes.io/projected/9b3c1d90-1d9a-485f-a6b3-4c527cd173bb-kube-api-access-6pnb9\") pod \"9b3c1d90-1d9a-485f-a6b3-4c527cd173bb\" (UID: \"9b3c1d90-1d9a-485f-a6b3-4c527cd173bb\") " Jan 22 06:08:20 crc kubenswrapper[4933]: I0122 06:08:20.934520 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b3c1d90-1d9a-485f-a6b3-4c527cd173bb-combined-ca-bundle\") pod \"9b3c1d90-1d9a-485f-a6b3-4c527cd173bb\" (UID: \"9b3c1d90-1d9a-485f-a6b3-4c527cd173bb\") " Jan 22 06:08:20 crc kubenswrapper[4933]: I0122 06:08:20.934603 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b3c1d90-1d9a-485f-a6b3-4c527cd173bb-config-data\") pod \"9b3c1d90-1d9a-485f-a6b3-4c527cd173bb\" (UID: \"9b3c1d90-1d9a-485f-a6b3-4c527cd173bb\") " Jan 22 06:08:20 crc kubenswrapper[4933]: I0122 06:08:20.941375 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b3c1d90-1d9a-485f-a6b3-4c527cd173bb-kube-api-access-6pnb9" (OuterVolumeSpecName: "kube-api-access-6pnb9") pod "9b3c1d90-1d9a-485f-a6b3-4c527cd173bb" (UID: "9b3c1d90-1d9a-485f-a6b3-4c527cd173bb"). InnerVolumeSpecName "kube-api-access-6pnb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:08:20 crc kubenswrapper[4933]: I0122 06:08:20.981389 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b3c1d90-1d9a-485f-a6b3-4c527cd173bb-config-data" (OuterVolumeSpecName: "config-data") pod "9b3c1d90-1d9a-485f-a6b3-4c527cd173bb" (UID: "9b3c1d90-1d9a-485f-a6b3-4c527cd173bb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:20 crc kubenswrapper[4933]: I0122 06:08:20.992858 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b3c1d90-1d9a-485f-a6b3-4c527cd173bb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9b3c1d90-1d9a-485f-a6b3-4c527cd173bb" (UID: "9b3c1d90-1d9a-485f-a6b3-4c527cd173bb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.037058 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6pnb9\" (UniqueName: \"kubernetes.io/projected/9b3c1d90-1d9a-485f-a6b3-4c527cd173bb-kube-api-access-6pnb9\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.037103 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b3c1d90-1d9a-485f-a6b3-4c527cd173bb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.037114 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b3c1d90-1d9a-485f-a6b3-4c527cd173bb-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.052926 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.138068 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16fb5bb3-209b-4796-9dec-493ff7db88c5-combined-ca-bundle\") pod \"16fb5bb3-209b-4796-9dec-493ff7db88c5\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.138262 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/16fb5bb3-209b-4796-9dec-493ff7db88c5-logs\") pod \"16fb5bb3-209b-4796-9dec-493ff7db88c5\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.138382 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16fb5bb3-209b-4796-9dec-493ff7db88c5-config-data\") pod \"16fb5bb3-209b-4796-9dec-493ff7db88c5\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.138416 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/16fb5bb3-209b-4796-9dec-493ff7db88c5-nova-metadata-tls-certs\") pod \"16fb5bb3-209b-4796-9dec-493ff7db88c5\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.138491 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m84mj\" (UniqueName: \"kubernetes.io/projected/16fb5bb3-209b-4796-9dec-493ff7db88c5-kube-api-access-m84mj\") pod \"16fb5bb3-209b-4796-9dec-493ff7db88c5\" (UID: \"16fb5bb3-209b-4796-9dec-493ff7db88c5\") " Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.139936 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16fb5bb3-209b-4796-9dec-493ff7db88c5-logs" (OuterVolumeSpecName: "logs") pod "16fb5bb3-209b-4796-9dec-493ff7db88c5" (UID: "16fb5bb3-209b-4796-9dec-493ff7db88c5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.142598 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16fb5bb3-209b-4796-9dec-493ff7db88c5-kube-api-access-m84mj" (OuterVolumeSpecName: "kube-api-access-m84mj") pod "16fb5bb3-209b-4796-9dec-493ff7db88c5" (UID: "16fb5bb3-209b-4796-9dec-493ff7db88c5"). InnerVolumeSpecName "kube-api-access-m84mj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.162386 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16fb5bb3-209b-4796-9dec-493ff7db88c5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "16fb5bb3-209b-4796-9dec-493ff7db88c5" (UID: "16fb5bb3-209b-4796-9dec-493ff7db88c5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.168241 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16fb5bb3-209b-4796-9dec-493ff7db88c5-config-data" (OuterVolumeSpecName: "config-data") pod "16fb5bb3-209b-4796-9dec-493ff7db88c5" (UID: "16fb5bb3-209b-4796-9dec-493ff7db88c5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.198538 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16fb5bb3-209b-4796-9dec-493ff7db88c5-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "16fb5bb3-209b-4796-9dec-493ff7db88c5" (UID: "16fb5bb3-209b-4796-9dec-493ff7db88c5"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.240392 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16fb5bb3-209b-4796-9dec-493ff7db88c5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.240420 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/16fb5bb3-209b-4796-9dec-493ff7db88c5-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.240430 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16fb5bb3-209b-4796-9dec-493ff7db88c5-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.240439 4933 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/16fb5bb3-209b-4796-9dec-493ff7db88c5-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.240449 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m84mj\" (UniqueName: \"kubernetes.io/projected/16fb5bb3-209b-4796-9dec-493ff7db88c5-kube-api-access-m84mj\") on node \"crc\" DevicePath \"\"" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.510181 4933 generic.go:334] "Generic (PLEG): container finished" podID="16fb5bb3-209b-4796-9dec-493ff7db88c5" containerID="e6b160829dfa4020312bc79db77e4d8bdf72c9ef8472b02748a3dc1d4c48fcc6" exitCode=0 Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.511175 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"16fb5bb3-209b-4796-9dec-493ff7db88c5","Type":"ContainerDied","Data":"e6b160829dfa4020312bc79db77e4d8bdf72c9ef8472b02748a3dc1d4c48fcc6"} Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.511314 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"16fb5bb3-209b-4796-9dec-493ff7db88c5","Type":"ContainerDied","Data":"20cdf7b7e747fbfe47522131c2c4bb45c6cf78d62e6d1b18bcd891c4ed76100b"} Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.511410 4933 scope.go:117] "RemoveContainer" containerID="e6b160829dfa4020312bc79db77e4d8bdf72c9ef8472b02748a3dc1d4c48fcc6" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.511643 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.532121 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9b3c1d90-1d9a-485f-a6b3-4c527cd173bb","Type":"ContainerDied","Data":"ba2809474f3529e5eecfbed195dfd81581924c20108b4ba107f210aaef379c89"} Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.532215 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.567511 4933 scope.go:117] "RemoveContainer" containerID="0e444f954278dd5be49a970362dc635837a430bf7bfaec305702acf410b09db5" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.570140 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.582702 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.597925 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.608539 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.610659 4933 scope.go:117] "RemoveContainer" containerID="e6b160829dfa4020312bc79db77e4d8bdf72c9ef8472b02748a3dc1d4c48fcc6" Jan 22 06:08:21 crc kubenswrapper[4933]: E0122 06:08:21.616269 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6b160829dfa4020312bc79db77e4d8bdf72c9ef8472b02748a3dc1d4c48fcc6\": container with ID starting with e6b160829dfa4020312bc79db77e4d8bdf72c9ef8472b02748a3dc1d4c48fcc6 not found: ID does not exist" containerID="e6b160829dfa4020312bc79db77e4d8bdf72c9ef8472b02748a3dc1d4c48fcc6" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.616324 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6b160829dfa4020312bc79db77e4d8bdf72c9ef8472b02748a3dc1d4c48fcc6"} err="failed to get container status \"e6b160829dfa4020312bc79db77e4d8bdf72c9ef8472b02748a3dc1d4c48fcc6\": rpc error: code = NotFound desc = could not find container \"e6b160829dfa4020312bc79db77e4d8bdf72c9ef8472b02748a3dc1d4c48fcc6\": container with ID starting with e6b160829dfa4020312bc79db77e4d8bdf72c9ef8472b02748a3dc1d4c48fcc6 not found: ID does not exist" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.616366 4933 scope.go:117] "RemoveContainer" containerID="0e444f954278dd5be49a970362dc635837a430bf7bfaec305702acf410b09db5" Jan 22 06:08:21 crc kubenswrapper[4933]: E0122 06:08:21.618957 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e444f954278dd5be49a970362dc635837a430bf7bfaec305702acf410b09db5\": container with ID starting with 0e444f954278dd5be49a970362dc635837a430bf7bfaec305702acf410b09db5 not found: ID does not exist" containerID="0e444f954278dd5be49a970362dc635837a430bf7bfaec305702acf410b09db5" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.618996 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e444f954278dd5be49a970362dc635837a430bf7bfaec305702acf410b09db5"} err="failed to get container status \"0e444f954278dd5be49a970362dc635837a430bf7bfaec305702acf410b09db5\": rpc error: code = NotFound desc = could not find container \"0e444f954278dd5be49a970362dc635837a430bf7bfaec305702acf410b09db5\": container with ID starting with 0e444f954278dd5be49a970362dc635837a430bf7bfaec305702acf410b09db5 not found: ID does not exist" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.619019 4933 scope.go:117] "RemoveContainer" containerID="4d64563ea4c05a55275d0208eb9b3d7076bbf3ac8685166071692de0f119dcbf" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.629378 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 22 06:08:21 crc kubenswrapper[4933]: E0122 06:08:21.629796 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16fb5bb3-209b-4796-9dec-493ff7db88c5" containerName="nova-metadata-log" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.629807 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="16fb5bb3-209b-4796-9dec-493ff7db88c5" containerName="nova-metadata-log" Jan 22 06:08:21 crc kubenswrapper[4933]: E0122 06:08:21.629819 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16fb5bb3-209b-4796-9dec-493ff7db88c5" containerName="nova-metadata-metadata" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.629825 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="16fb5bb3-209b-4796-9dec-493ff7db88c5" containerName="nova-metadata-metadata" Jan 22 06:08:21 crc kubenswrapper[4933]: E0122 06:08:21.629856 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b3c1d90-1d9a-485f-a6b3-4c527cd173bb" containerName="nova-scheduler-scheduler" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.629862 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b3c1d90-1d9a-485f-a6b3-4c527cd173bb" containerName="nova-scheduler-scheduler" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.630030 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="16fb5bb3-209b-4796-9dec-493ff7db88c5" containerName="nova-metadata-metadata" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.630051 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b3c1d90-1d9a-485f-a6b3-4c527cd173bb" containerName="nova-scheduler-scheduler" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.630060 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="16fb5bb3-209b-4796-9dec-493ff7db88c5" containerName="nova-metadata-log" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.631264 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.638659 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.638682 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.638828 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.640274 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.641514 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.644646 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.654422 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.760703 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6fae4840-8fac-4192-8358-cbcae518e70d-logs\") pod \"nova-metadata-0\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " pod="openstack/nova-metadata-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.760937 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6fae4840-8fac-4192-8358-cbcae518e70d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " pod="openstack/nova-metadata-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.761049 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfvqs\" (UniqueName: \"kubernetes.io/projected/6fae4840-8fac-4192-8358-cbcae518e70d-kube-api-access-pfvqs\") pod \"nova-metadata-0\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " pod="openstack/nova-metadata-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.761182 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fae4840-8fac-4192-8358-cbcae518e70d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " pod="openstack/nova-metadata-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.761329 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fae4840-8fac-4192-8358-cbcae518e70d-config-data\") pod \"nova-metadata-0\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " pod="openstack/nova-metadata-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.761451 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf956626-51e3-4aff-b24b-4a553160327c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"cf956626-51e3-4aff-b24b-4a553160327c\") " pod="openstack/nova-scheduler-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.761538 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxg58\" (UniqueName: \"kubernetes.io/projected/cf956626-51e3-4aff-b24b-4a553160327c-kube-api-access-vxg58\") pod \"nova-scheduler-0\" (UID: \"cf956626-51e3-4aff-b24b-4a553160327c\") " pod="openstack/nova-scheduler-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.761627 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf956626-51e3-4aff-b24b-4a553160327c-config-data\") pod \"nova-scheduler-0\" (UID: \"cf956626-51e3-4aff-b24b-4a553160327c\") " pod="openstack/nova-scheduler-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.863570 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf956626-51e3-4aff-b24b-4a553160327c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"cf956626-51e3-4aff-b24b-4a553160327c\") " pod="openstack/nova-scheduler-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.863635 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxg58\" (UniqueName: \"kubernetes.io/projected/cf956626-51e3-4aff-b24b-4a553160327c-kube-api-access-vxg58\") pod \"nova-scheduler-0\" (UID: \"cf956626-51e3-4aff-b24b-4a553160327c\") " pod="openstack/nova-scheduler-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.863683 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf956626-51e3-4aff-b24b-4a553160327c-config-data\") pod \"nova-scheduler-0\" (UID: \"cf956626-51e3-4aff-b24b-4a553160327c\") " pod="openstack/nova-scheduler-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.863749 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6fae4840-8fac-4192-8358-cbcae518e70d-logs\") pod \"nova-metadata-0\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " pod="openstack/nova-metadata-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.863793 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6fae4840-8fac-4192-8358-cbcae518e70d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " pod="openstack/nova-metadata-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.863845 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfvqs\" (UniqueName: \"kubernetes.io/projected/6fae4840-8fac-4192-8358-cbcae518e70d-kube-api-access-pfvqs\") pod \"nova-metadata-0\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " pod="openstack/nova-metadata-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.863868 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fae4840-8fac-4192-8358-cbcae518e70d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " pod="openstack/nova-metadata-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.863905 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fae4840-8fac-4192-8358-cbcae518e70d-config-data\") pod \"nova-metadata-0\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " pod="openstack/nova-metadata-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.864887 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6fae4840-8fac-4192-8358-cbcae518e70d-logs\") pod \"nova-metadata-0\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " pod="openstack/nova-metadata-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.869815 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fae4840-8fac-4192-8358-cbcae518e70d-config-data\") pod \"nova-metadata-0\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " pod="openstack/nova-metadata-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.870645 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf956626-51e3-4aff-b24b-4a553160327c-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"cf956626-51e3-4aff-b24b-4a553160327c\") " pod="openstack/nova-scheduler-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.870917 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fae4840-8fac-4192-8358-cbcae518e70d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " pod="openstack/nova-metadata-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.871457 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf956626-51e3-4aff-b24b-4a553160327c-config-data\") pod \"nova-scheduler-0\" (UID: \"cf956626-51e3-4aff-b24b-4a553160327c\") " pod="openstack/nova-scheduler-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.876443 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6fae4840-8fac-4192-8358-cbcae518e70d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " pod="openstack/nova-metadata-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.891203 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfvqs\" (UniqueName: \"kubernetes.io/projected/6fae4840-8fac-4192-8358-cbcae518e70d-kube-api-access-pfvqs\") pod \"nova-metadata-0\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " pod="openstack/nova-metadata-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.893412 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxg58\" (UniqueName: \"kubernetes.io/projected/cf956626-51e3-4aff-b24b-4a553160327c-kube-api-access-vxg58\") pod \"nova-scheduler-0\" (UID: \"cf956626-51e3-4aff-b24b-4a553160327c\") " pod="openstack/nova-scheduler-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.964766 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 06:08:21 crc kubenswrapper[4933]: I0122 06:08:21.977742 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 06:08:22 crc kubenswrapper[4933]: I0122 06:08:22.449670 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 06:08:22 crc kubenswrapper[4933]: W0122 06:08:22.451141 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6fae4840_8fac_4192_8358_cbcae518e70d.slice/crio-22019c00eacee40fbbe6d90e8f349b8275d96b73d0c0369b14596fa5817fd488 WatchSource:0}: Error finding container 22019c00eacee40fbbe6d90e8f349b8275d96b73d0c0369b14596fa5817fd488: Status 404 returned error can't find the container with id 22019c00eacee40fbbe6d90e8f349b8275d96b73d0c0369b14596fa5817fd488 Jan 22 06:08:22 crc kubenswrapper[4933]: I0122 06:08:22.503834 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16fb5bb3-209b-4796-9dec-493ff7db88c5" path="/var/lib/kubelet/pods/16fb5bb3-209b-4796-9dec-493ff7db88c5/volumes" Jan 22 06:08:22 crc kubenswrapper[4933]: I0122 06:08:22.504747 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b3c1d90-1d9a-485f-a6b3-4c527cd173bb" path="/var/lib/kubelet/pods/9b3c1d90-1d9a-485f-a6b3-4c527cd173bb/volumes" Jan 22 06:08:22 crc kubenswrapper[4933]: I0122 06:08:22.558060 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6fae4840-8fac-4192-8358-cbcae518e70d","Type":"ContainerStarted","Data":"22019c00eacee40fbbe6d90e8f349b8275d96b73d0c0369b14596fa5817fd488"} Jan 22 06:08:22 crc kubenswrapper[4933]: I0122 06:08:22.564031 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 06:08:22 crc kubenswrapper[4933]: W0122 06:08:22.578182 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcf956626_51e3_4aff_b24b_4a553160327c.slice/crio-518642500f820671c936da21dd017610349e0eef6ffdec8c64931e903ec802eb WatchSource:0}: Error finding container 518642500f820671c936da21dd017610349e0eef6ffdec8c64931e903ec802eb: Status 404 returned error can't find the container with id 518642500f820671c936da21dd017610349e0eef6ffdec8c64931e903ec802eb Jan 22 06:08:23 crc kubenswrapper[4933]: I0122 06:08:23.573793 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6fae4840-8fac-4192-8358-cbcae518e70d","Type":"ContainerStarted","Data":"f46d4f43b41aa5fe98df750c180e6af7780c3a5b7b9665b0eea715c193df207c"} Jan 22 06:08:23 crc kubenswrapper[4933]: I0122 06:08:23.574334 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6fae4840-8fac-4192-8358-cbcae518e70d","Type":"ContainerStarted","Data":"769102d68320fe984ffaababc2e759013ea7756eebffbcf2bed246a58368e7c3"} Jan 22 06:08:23 crc kubenswrapper[4933]: I0122 06:08:23.577814 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cf956626-51e3-4aff-b24b-4a553160327c","Type":"ContainerStarted","Data":"727aeb5a63faa31a31c03450f9d3d8823575f9ab0abd3ff2f9a00a0f91ec8597"} Jan 22 06:08:23 crc kubenswrapper[4933]: I0122 06:08:23.577853 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cf956626-51e3-4aff-b24b-4a553160327c","Type":"ContainerStarted","Data":"518642500f820671c936da21dd017610349e0eef6ffdec8c64931e903ec802eb"} Jan 22 06:08:23 crc kubenswrapper[4933]: I0122 06:08:23.599787 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.599761644 podStartE2EDuration="2.599761644s" podCreationTimestamp="2026-01-22 06:08:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:08:23.597340724 +0000 UTC m=+1351.434466087" watchObservedRunningTime="2026-01-22 06:08:23.599761644 +0000 UTC m=+1351.436887037" Jan 22 06:08:23 crc kubenswrapper[4933]: I0122 06:08:23.627814 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.62778936 podStartE2EDuration="2.62778936s" podCreationTimestamp="2026-01-22 06:08:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:08:23.616599886 +0000 UTC m=+1351.453725269" watchObservedRunningTime="2026-01-22 06:08:23.62778936 +0000 UTC m=+1351.464914713" Jan 22 06:08:26 crc kubenswrapper[4933]: I0122 06:08:26.964937 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 06:08:26 crc kubenswrapper[4933]: I0122 06:08:26.965566 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 06:08:26 crc kubenswrapper[4933]: I0122 06:08:26.978870 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 22 06:08:27 crc kubenswrapper[4933]: I0122 06:08:27.850358 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 06:08:27 crc kubenswrapper[4933]: I0122 06:08:27.850890 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 06:08:28 crc kubenswrapper[4933]: I0122 06:08:28.875353 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="56625c99-64dc-4742-9927-0210d8fe8d9d" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.204:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 06:08:28 crc kubenswrapper[4933]: I0122 06:08:28.875352 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="56625c99-64dc-4742-9927-0210d8fe8d9d" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.204:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 06:08:31 crc kubenswrapper[4933]: I0122 06:08:31.965630 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 22 06:08:31 crc kubenswrapper[4933]: I0122 06:08:31.966162 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 22 06:08:31 crc kubenswrapper[4933]: I0122 06:08:31.978277 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 22 06:08:32 crc kubenswrapper[4933]: I0122 06:08:32.009000 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 22 06:08:32 crc kubenswrapper[4933]: I0122 06:08:32.692429 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 22 06:08:32 crc kubenswrapper[4933]: I0122 06:08:32.981249 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="6fae4840-8fac-4192-8358-cbcae518e70d" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.205:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 06:08:32 crc kubenswrapper[4933]: I0122 06:08:32.981249 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="6fae4840-8fac-4192-8358-cbcae518e70d" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.205:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 06:08:37 crc kubenswrapper[4933]: I0122 06:08:37.858006 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 22 06:08:37 crc kubenswrapper[4933]: I0122 06:08:37.859751 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 22 06:08:37 crc kubenswrapper[4933]: I0122 06:08:37.867876 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 22 06:08:37 crc kubenswrapper[4933]: I0122 06:08:37.870466 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 22 06:08:38 crc kubenswrapper[4933]: I0122 06:08:38.728244 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 22 06:08:38 crc kubenswrapper[4933]: I0122 06:08:38.736248 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 22 06:08:41 crc kubenswrapper[4933]: I0122 06:08:41.972543 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 22 06:08:41 crc kubenswrapper[4933]: I0122 06:08:41.976232 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 22 06:08:41 crc kubenswrapper[4933]: I0122 06:08:41.978897 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 22 06:08:42 crc kubenswrapper[4933]: I0122 06:08:42.782466 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 22 06:08:43 crc kubenswrapper[4933]: I0122 06:08:43.843881 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.485421 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.486208 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6" containerName="openstackclient" containerID="cri-o://8bf7a48a2b547ef8e38d6ce658ae3a575bfebda764938fc766292bd884b6d211" gracePeriod=2 Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.508947 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.690790 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-633c-account-create-update-bklwf"] Jan 22 06:09:07 crc kubenswrapper[4933]: E0122 06:09:07.691578 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6" containerName="openstackclient" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.691598 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6" containerName="openstackclient" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.691924 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6" containerName="openstackclient" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.692617 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-633c-account-create-update-bklwf" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.706420 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.709356 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmvs6\" (UniqueName: \"kubernetes.io/projected/729db25e-5864-4305-99dd-24ce61f45029-kube-api-access-xmvs6\") pod \"cinder-633c-account-create-update-bklwf\" (UID: \"729db25e-5864-4305-99dd-24ce61f45029\") " pod="openstack/cinder-633c-account-create-update-bklwf" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.709419 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/729db25e-5864-4305-99dd-24ce61f45029-operator-scripts\") pod \"cinder-633c-account-create-update-bklwf\" (UID: \"729db25e-5864-4305-99dd-24ce61f45029\") " pod="openstack/cinder-633c-account-create-update-bklwf" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.715774 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-b23f-account-create-update-hfxzc"] Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.716952 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b23f-account-create-update-hfxzc" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.722188 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.725147 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-b23f-account-create-update-sfkx5"] Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.737394 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-b23f-account-create-update-sfkx5"] Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.750444 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-633c-account-create-update-bklwf"] Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.787767 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-e75b-account-create-update-tmgdt"] Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.789896 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e75b-account-create-update-tmgdt" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.807486 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.823005 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmvs6\" (UniqueName: \"kubernetes.io/projected/729db25e-5864-4305-99dd-24ce61f45029-kube-api-access-xmvs6\") pod \"cinder-633c-account-create-update-bklwf\" (UID: \"729db25e-5864-4305-99dd-24ce61f45029\") " pod="openstack/cinder-633c-account-create-update-bklwf" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.823250 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/729db25e-5864-4305-99dd-24ce61f45029-operator-scripts\") pod \"cinder-633c-account-create-update-bklwf\" (UID: \"729db25e-5864-4305-99dd-24ce61f45029\") " pod="openstack/cinder-633c-account-create-update-bklwf" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.824182 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/729db25e-5864-4305-99dd-24ce61f45029-operator-scripts\") pod \"cinder-633c-account-create-update-bklwf\" (UID: \"729db25e-5864-4305-99dd-24ce61f45029\") " pod="openstack/cinder-633c-account-create-update-bklwf" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.876465 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmvs6\" (UniqueName: \"kubernetes.io/projected/729db25e-5864-4305-99dd-24ce61f45029-kube-api-access-xmvs6\") pod \"cinder-633c-account-create-update-bklwf\" (UID: \"729db25e-5864-4305-99dd-24ce61f45029\") " pod="openstack/cinder-633c-account-create-update-bklwf" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.892698 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-b23f-account-create-update-hfxzc"] Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.911220 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-kp2cn"] Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.925866 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kp2cn" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.927597 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7-operator-scripts\") pod \"nova-api-b23f-account-create-update-hfxzc\" (UID: \"2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7\") " pod="openstack/nova-api-b23f-account-create-update-hfxzc" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.927777 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsb6j\" (UniqueName: \"kubernetes.io/projected/2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7-kube-api-access-xsb6j\") pod \"nova-api-b23f-account-create-update-hfxzc\" (UID: \"2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7\") " pod="openstack/nova-api-b23f-account-create-update-hfxzc" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.927804 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/841bd4c5-516a-406d-af7f-8d551b970cab-operator-scripts\") pod \"barbican-e75b-account-create-update-tmgdt\" (UID: \"841bd4c5-516a-406d-af7f-8d551b970cab\") " pod="openstack/barbican-e75b-account-create-update-tmgdt" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.927861 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dpkl\" (UniqueName: \"kubernetes.io/projected/841bd4c5-516a-406d-af7f-8d551b970cab-kube-api-access-6dpkl\") pod \"barbican-e75b-account-create-update-tmgdt\" (UID: \"841bd4c5-516a-406d-af7f-8d551b970cab\") " pod="openstack/barbican-e75b-account-create-update-tmgdt" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.934780 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.937715 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-e75b-account-create-update-tmgdt"] Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.948971 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-kp2cn"] Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.963602 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-f8f6-account-create-update-n4zdn"] Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.964735 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f8f6-account-create-update-n4zdn" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.967334 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-633c-account-create-update-xl2w2"] Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.971355 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 22 06:09:07 crc kubenswrapper[4933]: I0122 06:09:07.978213 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-633c-account-create-update-xl2w2"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.013617 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-f8f6-account-create-update-n4zdn"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.029907 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d8e9d8c-961f-4dc5-84b8-51c486220cdc-operator-scripts\") pod \"root-account-create-update-kp2cn\" (UID: \"1d8e9d8c-961f-4dc5-84b8-51c486220cdc\") " pod="openstack/root-account-create-update-kp2cn" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.029968 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsb6j\" (UniqueName: \"kubernetes.io/projected/2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7-kube-api-access-xsb6j\") pod \"nova-api-b23f-account-create-update-hfxzc\" (UID: \"2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7\") " pod="openstack/nova-api-b23f-account-create-update-hfxzc" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.029988 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/841bd4c5-516a-406d-af7f-8d551b970cab-operator-scripts\") pod \"barbican-e75b-account-create-update-tmgdt\" (UID: \"841bd4c5-516a-406d-af7f-8d551b970cab\") " pod="openstack/barbican-e75b-account-create-update-tmgdt" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.030020 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twf88\" (UniqueName: \"kubernetes.io/projected/1d8e9d8c-961f-4dc5-84b8-51c486220cdc-kube-api-access-twf88\") pod \"root-account-create-update-kp2cn\" (UID: \"1d8e9d8c-961f-4dc5-84b8-51c486220cdc\") " pod="openstack/root-account-create-update-kp2cn" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.030048 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dpkl\" (UniqueName: \"kubernetes.io/projected/841bd4c5-516a-406d-af7f-8d551b970cab-kube-api-access-6dpkl\") pod \"barbican-e75b-account-create-update-tmgdt\" (UID: \"841bd4c5-516a-406d-af7f-8d551b970cab\") " pod="openstack/barbican-e75b-account-create-update-tmgdt" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.030130 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7-operator-scripts\") pod \"nova-api-b23f-account-create-update-hfxzc\" (UID: \"2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7\") " pod="openstack/nova-api-b23f-account-create-update-hfxzc" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.031012 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7-operator-scripts\") pod \"nova-api-b23f-account-create-update-hfxzc\" (UID: \"2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7\") " pod="openstack/nova-api-b23f-account-create-update-hfxzc" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.031777 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/841bd4c5-516a-406d-af7f-8d551b970cab-operator-scripts\") pod \"barbican-e75b-account-create-update-tmgdt\" (UID: \"841bd4c5-516a-406d-af7f-8d551b970cab\") " pod="openstack/barbican-e75b-account-create-update-tmgdt" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.075761 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.077148 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-633c-account-create-update-bklwf" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.091383 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-e75b-account-create-update-2nrxw"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.094431 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dpkl\" (UniqueName: \"kubernetes.io/projected/841bd4c5-516a-406d-af7f-8d551b970cab-kube-api-access-6dpkl\") pod \"barbican-e75b-account-create-update-tmgdt\" (UID: \"841bd4c5-516a-406d-af7f-8d551b970cab\") " pod="openstack/barbican-e75b-account-create-update-tmgdt" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.097492 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsb6j\" (UniqueName: \"kubernetes.io/projected/2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7-kube-api-access-xsb6j\") pod \"nova-api-b23f-account-create-update-hfxzc\" (UID: \"2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7\") " pod="openstack/nova-api-b23f-account-create-update-hfxzc" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.116829 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-e75b-account-create-update-2nrxw"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.136494 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d8e9d8c-961f-4dc5-84b8-51c486220cdc-operator-scripts\") pod \"root-account-create-update-kp2cn\" (UID: \"1d8e9d8c-961f-4dc5-84b8-51c486220cdc\") " pod="openstack/root-account-create-update-kp2cn" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.137054 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2022299-4089-4740-ae8f-50ca5b4be2b5-operator-scripts\") pod \"nova-cell0-f8f6-account-create-update-n4zdn\" (UID: \"c2022299-4089-4740-ae8f-50ca5b4be2b5\") " pod="openstack/nova-cell0-f8f6-account-create-update-n4zdn" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.137247 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twf88\" (UniqueName: \"kubernetes.io/projected/1d8e9d8c-961f-4dc5-84b8-51c486220cdc-kube-api-access-twf88\") pod \"root-account-create-update-kp2cn\" (UID: \"1d8e9d8c-961f-4dc5-84b8-51c486220cdc\") " pod="openstack/root-account-create-update-kp2cn" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.137374 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rw2pc\" (UniqueName: \"kubernetes.io/projected/c2022299-4089-4740-ae8f-50ca5b4be2b5-kube-api-access-rw2pc\") pod \"nova-cell0-f8f6-account-create-update-n4zdn\" (UID: \"c2022299-4089-4740-ae8f-50ca5b4be2b5\") " pod="openstack/nova-cell0-f8f6-account-create-update-n4zdn" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.137975 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d8e9d8c-961f-4dc5-84b8-51c486220cdc-operator-scripts\") pod \"root-account-create-update-kp2cn\" (UID: \"1d8e9d8c-961f-4dc5-84b8-51c486220cdc\") " pod="openstack/root-account-create-update-kp2cn" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.160425 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b23f-account-create-update-hfxzc" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.163741 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-548e-account-create-update-c8h9b"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.168139 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-548e-account-create-update-c8h9b" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.168636 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twf88\" (UniqueName: \"kubernetes.io/projected/1d8e9d8c-961f-4dc5-84b8-51c486220cdc-kube-api-access-twf88\") pod \"root-account-create-update-kp2cn\" (UID: \"1d8e9d8c-961f-4dc5-84b8-51c486220cdc\") " pod="openstack/root-account-create-update-kp2cn" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.200034 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.225731 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-548e-account-create-update-c8h9b"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.233411 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e75b-account-create-update-tmgdt" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.242397 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2022299-4089-4740-ae8f-50ca5b4be2b5-operator-scripts\") pod \"nova-cell0-f8f6-account-create-update-n4zdn\" (UID: \"c2022299-4089-4740-ae8f-50ca5b4be2b5\") " pod="openstack/nova-cell0-f8f6-account-create-update-n4zdn" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.242498 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rw2pc\" (UniqueName: \"kubernetes.io/projected/c2022299-4089-4740-ae8f-50ca5b4be2b5-kube-api-access-rw2pc\") pod \"nova-cell0-f8f6-account-create-update-n4zdn\" (UID: \"c2022299-4089-4740-ae8f-50ca5b4be2b5\") " pod="openstack/nova-cell0-f8f6-account-create-update-n4zdn" Jan 22 06:09:08 crc kubenswrapper[4933]: E0122 06:09:08.242970 4933 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 22 06:09:08 crc kubenswrapper[4933]: E0122 06:09:08.243008 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-config-data podName:4d712958-1ece-47de-9798-6e852b03c565 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:08.742994236 +0000 UTC m=+1396.580119589 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-config-data") pod "rabbitmq-cell1-server-0" (UID: "4d712958-1ece-47de-9798-6e852b03c565") : configmap "rabbitmq-cell1-config-data" not found Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.243858 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2022299-4089-4740-ae8f-50ca5b4be2b5-operator-scripts\") pod \"nova-cell0-f8f6-account-create-update-n4zdn\" (UID: \"c2022299-4089-4740-ae8f-50ca5b4be2b5\") " pod="openstack/nova-cell0-f8f6-account-create-update-n4zdn" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.289926 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-f8f6-account-create-update-4r24j"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.299451 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rw2pc\" (UniqueName: \"kubernetes.io/projected/c2022299-4089-4740-ae8f-50ca5b4be2b5-kube-api-access-rw2pc\") pod \"nova-cell0-f8f6-account-create-update-n4zdn\" (UID: \"c2022299-4089-4740-ae8f-50ca5b4be2b5\") " pod="openstack/nova-cell0-f8f6-account-create-update-n4zdn" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.332528 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kp2cn" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.344130 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9cb8166a-621e-4e96-b4e7-4d4fca32a727-operator-scripts\") pod \"nova-cell1-548e-account-create-update-c8h9b\" (UID: \"9cb8166a-621e-4e96-b4e7-4d4fca32a727\") " pod="openstack/nova-cell1-548e-account-create-update-c8h9b" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.344268 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ft7gx\" (UniqueName: \"kubernetes.io/projected/9cb8166a-621e-4e96-b4e7-4d4fca32a727-kube-api-access-ft7gx\") pod \"nova-cell1-548e-account-create-update-c8h9b\" (UID: \"9cb8166a-621e-4e96-b4e7-4d4fca32a727\") " pod="openstack/nova-cell1-548e-account-create-update-c8h9b" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.368480 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f8f6-account-create-update-n4zdn" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.398426 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-f8f6-account-create-update-4r24j"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.445509 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9cb8166a-621e-4e96-b4e7-4d4fca32a727-operator-scripts\") pod \"nova-cell1-548e-account-create-update-c8h9b\" (UID: \"9cb8166a-621e-4e96-b4e7-4d4fca32a727\") " pod="openstack/nova-cell1-548e-account-create-update-c8h9b" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.445662 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ft7gx\" (UniqueName: \"kubernetes.io/projected/9cb8166a-621e-4e96-b4e7-4d4fca32a727-kube-api-access-ft7gx\") pod \"nova-cell1-548e-account-create-update-c8h9b\" (UID: \"9cb8166a-621e-4e96-b4e7-4d4fca32a727\") " pod="openstack/nova-cell1-548e-account-create-update-c8h9b" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.446990 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9cb8166a-621e-4e96-b4e7-4d4fca32a727-operator-scripts\") pod \"nova-cell1-548e-account-create-update-c8h9b\" (UID: \"9cb8166a-621e-4e96-b4e7-4d4fca32a727\") " pod="openstack/nova-cell1-548e-account-create-update-c8h9b" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.447038 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-knb5h"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.488888 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-knb5h"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.503850 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ft7gx\" (UniqueName: \"kubernetes.io/projected/9cb8166a-621e-4e96-b4e7-4d4fca32a727-kube-api-access-ft7gx\") pod \"nova-cell1-548e-account-create-update-c8h9b\" (UID: \"9cb8166a-621e-4e96-b4e7-4d4fca32a727\") " pod="openstack/nova-cell1-548e-account-create-update-c8h9b" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.508508 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b3a9bf8-9baa-4b5a-a321-1bff747279fd" path="/var/lib/kubelet/pods/6b3a9bf8-9baa-4b5a-a321-1bff747279fd/volumes" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.510412 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b8bd705-8ba6-471f-8ed4-dbbd18816c6e" path="/var/lib/kubelet/pods/8b8bd705-8ba6-471f-8ed4-dbbd18816c6e/volumes" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.511216 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f84f03f-67e9-41d2-8e74-98fd0ce61cac" path="/var/lib/kubelet/pods/9f84f03f-67e9-41d2-8e74-98fd0ce61cac/volumes" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.511846 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2d004aa-1c9d-428c-b3f3-851b50d53cc1" path="/var/lib/kubelet/pods/b2d004aa-1c9d-428c-b3f3-851b50d53cc1/volumes" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.513925 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c15a0f2e-dcbe-4197-bdcd-f50425d09e80" path="/var/lib/kubelet/pods/c15a0f2e-dcbe-4197-bdcd-f50425d09e80/volumes" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.517999 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.518275 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="9445b2f3-83ea-4e79-8312-ceffa2208f77" containerName="ovn-northd" containerID="cri-o://480d1b66c4dcca2c067fc5041e62b5b6640e85f510d6d67079fc834d3bbb885a" gracePeriod=30 Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.518561 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="9445b2f3-83ea-4e79-8312-ceffa2208f77" containerName="openstack-network-exporter" containerID="cri-o://bb6a7eef37a247e6e0eaaa17e2b10e4afc7b1b1032f493709b0fed75abf884df" gracePeriod=30 Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.545533 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-548e-account-create-update-k4rxd"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.555496 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-548e-account-create-update-k4rxd"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.612157 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-zhkks"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.638181 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-zhkks"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.638912 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-548e-account-create-update-c8h9b" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.650301 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-nkj4c"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.664873 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-nkj4c"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.687627 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-92ghq"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.712876 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-92ghq"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.718164 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-szr8s"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.718407 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-szr8s" podUID="eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a" containerName="openstack-network-exporter" containerID="cri-o://49be386c17bf17cbf852a7ced425f8a40d45b629b580c910b7c1ed1881aef46f" gracePeriod=30 Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.741186 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-rwb6s"] Jan 22 06:09:08 crc kubenswrapper[4933]: E0122 06:09:08.753462 4933 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 22 06:09:08 crc kubenswrapper[4933]: E0122 06:09:08.753525 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-config-data podName:4d712958-1ece-47de-9798-6e852b03c565 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:09.753509509 +0000 UTC m=+1397.590634852 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-config-data") pod "rabbitmq-cell1-server-0" (UID: "4d712958-1ece-47de-9798-6e852b03c565") : configmap "rabbitmq-cell1-config-data" not found Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.783135 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-98e3-account-create-update-hg9mt"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.792817 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-98e3-account-create-update-hg9mt" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.824796 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.846336 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-phtjz"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.881680 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e581b2e9-57f2-452a-956c-4e8c5d75b3fb-operator-scripts\") pod \"neutron-98e3-account-create-update-hg9mt\" (UID: \"e581b2e9-57f2-452a-956c-4e8c5d75b3fb\") " pod="openstack/neutron-98e3-account-create-update-hg9mt" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.881856 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xb86w\" (UniqueName: \"kubernetes.io/projected/e581b2e9-57f2-452a-956c-4e8c5d75b3fb-kube-api-access-xb86w\") pod \"neutron-98e3-account-create-update-hg9mt\" (UID: \"e581b2e9-57f2-452a-956c-4e8c5d75b3fb\") " pod="openstack/neutron-98e3-account-create-update-hg9mt" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.983414 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-98e3-account-create-update-hg9mt"] Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.984928 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xb86w\" (UniqueName: \"kubernetes.io/projected/e581b2e9-57f2-452a-956c-4e8c5d75b3fb-kube-api-access-xb86w\") pod \"neutron-98e3-account-create-update-hg9mt\" (UID: \"e581b2e9-57f2-452a-956c-4e8c5d75b3fb\") " pod="openstack/neutron-98e3-account-create-update-hg9mt" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.985583 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e581b2e9-57f2-452a-956c-4e8c5d75b3fb-operator-scripts\") pod \"neutron-98e3-account-create-update-hg9mt\" (UID: \"e581b2e9-57f2-452a-956c-4e8c5d75b3fb\") " pod="openstack/neutron-98e3-account-create-update-hg9mt" Jan 22 06:09:08 crc kubenswrapper[4933]: I0122 06:09:08.988740 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e581b2e9-57f2-452a-956c-4e8c5d75b3fb-operator-scripts\") pod \"neutron-98e3-account-create-update-hg9mt\" (UID: \"e581b2e9-57f2-452a-956c-4e8c5d75b3fb\") " pod="openstack/neutron-98e3-account-create-update-hg9mt" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.030226 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xb86w\" (UniqueName: \"kubernetes.io/projected/e581b2e9-57f2-452a-956c-4e8c5d75b3fb-kube-api-access-xb86w\") pod \"neutron-98e3-account-create-update-hg9mt\" (UID: \"e581b2e9-57f2-452a-956c-4e8c5d75b3fb\") " pod="openstack/neutron-98e3-account-create-update-hg9mt" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.035163 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-2cdpf"] Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.038050 4933 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 22 06:09:09 crc kubenswrapper[4933]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: if [ -n "cinder" ]; then Jan 22 06:09:09 crc kubenswrapper[4933]: GRANT_DATABASE="cinder" Jan 22 06:09:09 crc kubenswrapper[4933]: else Jan 22 06:09:09 crc kubenswrapper[4933]: GRANT_DATABASE="*" Jan 22 06:09:09 crc kubenswrapper[4933]: fi Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: # going for maximum compatibility here: Jan 22 06:09:09 crc kubenswrapper[4933]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 22 06:09:09 crc kubenswrapper[4933]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 22 06:09:09 crc kubenswrapper[4933]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 22 06:09:09 crc kubenswrapper[4933]: # support updates Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: $MYSQL_CMD < logger="UnhandledError" Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.039173 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"cinder-db-secret\\\" not found\"" pod="openstack/cinder-633c-account-create-update-bklwf" podUID="729db25e-5864-4305-99dd-24ce61f45029" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.079012 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-2cdpf"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.086792 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-98e3-account-create-update-hg9mt" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.103239 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-v7dl8"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.148527 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-v7dl8"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.179320 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-gwqkb"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.196297 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-633c-account-create-update-bklwf" event={"ID":"729db25e-5864-4305-99dd-24ce61f45029","Type":"ContainerStarted","Data":"c233fabf2515748b2408ec869d70b5f62bc26742f8dddc1fd6fd491d63916ae3"} Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.200427 4933 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 22 06:09:09 crc kubenswrapper[4933]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: if [ -n "cinder" ]; then Jan 22 06:09:09 crc kubenswrapper[4933]: GRANT_DATABASE="cinder" Jan 22 06:09:09 crc kubenswrapper[4933]: else Jan 22 06:09:09 crc kubenswrapper[4933]: GRANT_DATABASE="*" Jan 22 06:09:09 crc kubenswrapper[4933]: fi Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: # going for maximum compatibility here: Jan 22 06:09:09 crc kubenswrapper[4933]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 22 06:09:09 crc kubenswrapper[4933]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 22 06:09:09 crc kubenswrapper[4933]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 22 06:09:09 crc kubenswrapper[4933]: # support updates Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: $MYSQL_CMD < logger="UnhandledError" Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.201899 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"cinder-db-secret\\\" not found\"" pod="openstack/cinder-633c-account-create-update-bklwf" podUID="729db25e-5864-4305-99dd-24ce61f45029" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.205450 4933 generic.go:334] "Generic (PLEG): container finished" podID="9445b2f3-83ea-4e79-8312-ceffa2208f77" containerID="bb6a7eef37a247e6e0eaaa17e2b10e4afc7b1b1032f493709b0fed75abf884df" exitCode=2 Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.205529 4933 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err="command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: " execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-phtjz" message=< Jan 22 06:09:09 crc kubenswrapper[4933]: Exiting ovn-controller (1) [ OK ] Jan 22 06:09:09 crc kubenswrapper[4933]: > Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.205631 4933 kuberuntime_container.go:691] "PreStop hook failed" err="command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: " pod="openstack/ovn-controller-phtjz" podUID="2160e11a-468c-4bf7-9fdc-e579f3ecf896" containerName="ovn-controller" containerID="cri-o://cffd7868d1d8343c9ebe115ec9c5afd392b4e675691b973c2e8edd358ac529ec" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.205683 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-phtjz" podUID="2160e11a-468c-4bf7-9fdc-e579f3ecf896" containerName="ovn-controller" containerID="cri-o://cffd7868d1d8343c9ebe115ec9c5afd392b4e675691b973c2e8edd358ac529ec" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.205537 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"9445b2f3-83ea-4e79-8312-ceffa2208f77","Type":"ContainerDied","Data":"bb6a7eef37a247e6e0eaaa17e2b10e4afc7b1b1032f493709b0fed75abf884df"} Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.215653 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-szr8s_eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a/openstack-network-exporter/0.log" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.215703 4933 generic.go:334] "Generic (PLEG): container finished" podID="eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a" containerID="49be386c17bf17cbf852a7ced425f8a40d45b629b580c910b7c1ed1881aef46f" exitCode=2 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.215739 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-szr8s" event={"ID":"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a","Type":"ContainerDied","Data":"49be386c17bf17cbf852a7ced425f8a40d45b629b580c910b7c1ed1881aef46f"} Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.250386 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-gwqkb"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.311571 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.319129 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-98e3-account-create-update-7zvxp"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.334046 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-98e3-account-create-update-7zvxp"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.342503 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ddd577785-b24fs"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.342843 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5ddd577785-b24fs" podUID="810720aa-b861-48e5-bd66-b1544f4f683a" containerName="dnsmasq-dns" containerID="cri-o://be973f0c711a601539c68dbefa84d0f75359d8153ac8c7d3bb7dc0a944ae2556" gracePeriod=10 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.353050 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-566788757d-gkrdt"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.353306 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-566788757d-gkrdt" podUID="d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194" containerName="placement-log" containerID="cri-o://9ef7106268a25e14e2c1b4b8ae2a3405385542e15442cd78aec71a27dca326ba" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.353414 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-566788757d-gkrdt" podUID="d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194" containerName="placement-api" containerID="cri-o://9656bc111246901626a3ae6303bb9f3055280024dabd06af3b590f5dad87fafe" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.368887 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.372706 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="f4b41ac3-d05d-4bec-952f-c362cb5aad64" containerName="openstack-network-exporter" containerID="cri-o://b8fa72a1d737aaba2c6ef382aac2a61139f81989233737acfd4fd0708eec386a" gracePeriod=300 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.392762 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-633c-account-create-update-bklwf"] Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.419407 4933 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.419468 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-config-data podName:47299478-bcfd-4f21-a56c-efcf7b167999 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:09.919449796 +0000 UTC m=+1397.756575149 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-config-data") pod "rabbitmq-server-0" (UID: "47299478-bcfd-4f21-a56c-efcf7b167999") : configmap "rabbitmq-config-data" not found Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.426326 4933 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 22 06:09:09 crc kubenswrapper[4933]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: if [ -n "nova_api" ]; then Jan 22 06:09:09 crc kubenswrapper[4933]: GRANT_DATABASE="nova_api" Jan 22 06:09:09 crc kubenswrapper[4933]: else Jan 22 06:09:09 crc kubenswrapper[4933]: GRANT_DATABASE="*" Jan 22 06:09:09 crc kubenswrapper[4933]: fi Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: # going for maximum compatibility here: Jan 22 06:09:09 crc kubenswrapper[4933]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 22 06:09:09 crc kubenswrapper[4933]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 22 06:09:09 crc kubenswrapper[4933]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 22 06:09:09 crc kubenswrapper[4933]: # support updates Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: $MYSQL_CMD < logger="UnhandledError" Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.429094 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-api-db-secret\\\" not found\"" pod="openstack/nova-api-b23f-account-create-update-hfxzc" podUID="2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.432977 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.433666 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="a4b88c60-2edd-436c-996f-b8f07311f5ef" containerName="openstack-network-exporter" containerID="cri-o://7561e4048595cee77f6047174945f8a81c575b242d3b3be183508c84bf12d15d" gracePeriod=300 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.464610 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.464860 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="a2bcbc4b-30c4-4ec8-81bf-6cba18171506" containerName="glance-log" containerID="cri-o://ec11285d9cdded033c8043b14c5171616e5845163e0327ea1a11b2d67c958235" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.465525 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="a2bcbc4b-30c4-4ec8-81bf-6cba18171506" containerName="glance-httpd" containerID="cri-o://7e0168931d199a81b487119803e1944a792da4293cd792448c0c7c6cb8c0b855" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.476104 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-2tsdt"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.493344 4933 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/cinder-scheduler-0" secret="" err="secret \"cinder-cinder-dockercfg-m2xwl\" not found" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.495428 4933 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/nova-cell1-novncproxy-0" secret="" err="secret \"nova-nova-dockercfg-65dnh\" not found" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.498985 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="f4b41ac3-d05d-4bec-952f-c362cb5aad64" containerName="ovsdbserver-sb" containerID="cri-o://1e48373d2f9642eae495cf2b2ade3933c39166aaf3df09df17b1f26537b81222" gracePeriod=300 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.511921 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-2tsdt"] Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.524980 4933 secret.go:188] Couldn't get secret openstack/cinder-scheduler-config-data: secret "cinder-scheduler-config-data" not found Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.525034 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data-custom podName:0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:10.025019083 +0000 UTC m=+1397.862144436 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data-custom" (UniqueName: "kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data-custom") pod "cinder-scheduler-0" (UID: "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9") : secret "cinder-scheduler-config-data" not found Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.526892 4933 secret.go:188] Couldn't get secret openstack/nova-cell1-novncproxy-config-data: secret "nova-cell1-novncproxy-config-data" not found Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.526940 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-config-data podName:eb84c6f9-457d-46df-a4de-b5bfe612e945 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:10.026925209 +0000 UTC m=+1397.864050562 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-config-data") pod "nova-cell1-novncproxy-0" (UID: "eb84c6f9-457d-46df-a4de-b5bfe612e945") : secret "nova-cell1-novncproxy-config-data" not found Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.526988 4933 secret.go:188] Couldn't get secret openstack/cinder-scripts: secret "cinder-scripts" not found Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.527008 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-scripts podName:0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:10.027002291 +0000 UTC m=+1397.864127644 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-scripts") pod "cinder-scheduler-0" (UID: "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9") : secret "cinder-scripts" not found Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.528938 4933 secret.go:188] Couldn't get secret openstack/cinder-config-data: secret "cinder-config-data" not found Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.528983 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data podName:0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:10.0289706 +0000 UTC m=+1397.866096023 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data") pod "cinder-scheduler-0" (UID: "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9") : secret "cinder-config-data" not found Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.544746 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="a4b88c60-2edd-436c-996f-b8f07311f5ef" containerName="ovsdbserver-nb" containerID="cri-o://61aa1634a7490f2c19d0a61583367b3b595c012d49db045a0f91f868015c9e92" gracePeriod=300 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.547134 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.547438 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="b4c8b893-2e30-4273-bbec-7ff7efee686e" containerName="glance-log" containerID="cri-o://99672693509f4dfe6f517b698528ee0345ec420a893d4e6ab3cda20b8fb397ea" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.547948 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="b4c8b893-2e30-4273-bbec-7ff7efee686e" containerName="glance-httpd" containerID="cri-o://80ec94b4b71bb2bb6fa66b1913564ba57af12174724640c7e8888c5d410e87a8" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.576113 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.618435 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.618699 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="603c9f42-93c4-4268-b513-d2309571ac20" containerName="cinder-api-log" containerID="cri-o://2f820ff473f51e41089b39b460bd1b0e17ef3dee077f8634fc530dbd7212251b" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.619196 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="603c9f42-93c4-4268-b513-d2309571ac20" containerName="cinder-api" containerID="cri-o://a5442678c4d8db8a7d32c5e8c0b8ef799742f21cd0c0918ab7cc4e0588f933fc" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.648183 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-f2b9-account-create-update-rhljf"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.681155 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-f2b9-account-create-update-rhljf"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.705147 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-r4zfq"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.717857 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-r4zfq"] Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.757604 4933 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.757661 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-config-data podName:4d712958-1ece-47de-9798-6e852b03c565 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:11.757648592 +0000 UTC m=+1399.594773945 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-config-data") pod "rabbitmq-cell1-server-0" (UID: "4d712958-1ece-47de-9798-6e852b03c565") : configmap "rabbitmq-cell1-config-data" not found Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.771125 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-sr5dw"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.777616 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-rwb6s" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovsdb-server" containerID="cri-o://4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" gracePeriod=29 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.785921 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-sr5dw"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.804485 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-szr8s_eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a/openstack-network-exporter/0.log" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.804741 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.821936 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-zczgn"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.830283 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-zczgn"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.859737 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-combined-ca-bundle\") pod \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.859913 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-ovn-rundir\") pod \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.860035 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2xr9j\" (UniqueName: \"kubernetes.io/projected/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-kube-api-access-2xr9j\") pod \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.860176 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-metrics-certs-tls-certs\") pod \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.860338 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-ovs-rundir\") pod \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.860666 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-config\") pod \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\" (UID: \"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a\") " Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.868018 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-config" (OuterVolumeSpecName: "config") pod "eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a" (UID: "eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.872123 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a" (UID: "eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.872176 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a" (UID: "eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.874662 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-rwb6s" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovs-vswitchd" containerID="cri-o://fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" gracePeriod=29 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.876330 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.939479 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-kube-api-access-2xr9j" (OuterVolumeSpecName: "kube-api-access-2xr9j") pod "eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a" (UID: "eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a"). InnerVolumeSpecName "kube-api-access-2xr9j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.947660 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-b23f-account-create-update-hfxzc"] Jan 22 06:09:09 crc kubenswrapper[4933]: W0122 06:09:09.965277 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod841bd4c5_516a_406d_af7f_8d551b970cab.slice/crio-8d6ce78b44adbc00bbbea797d8a4719944afbdc8d13de056a505efe339ac7184 WatchSource:0}: Error finding container 8d6ce78b44adbc00bbbea797d8a4719944afbdc8d13de056a505efe339ac7184: Status 404 returned error can't find the container with id 8d6ce78b44adbc00bbbea797d8a4719944afbdc8d13de056a505efe339ac7184 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.970234 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-633c-account-create-update-bklwf"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.974420 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a" (UID: "eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.979337 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.979368 4933 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-ovn-rundir\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.979377 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2xr9j\" (UniqueName: \"kubernetes.io/projected/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-kube-api-access-2xr9j\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.979386 4933 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-ovs-rundir\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.979443 4933 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.979485 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-config-data podName:47299478-bcfd-4f21-a56c-efcf7b167999 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:10.979471277 +0000 UTC m=+1398.816596630 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-config-data") pod "rabbitmq-server-0" (UID: "47299478-bcfd-4f21-a56c-efcf7b167999") : configmap "rabbitmq-config-data" not found Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.985024 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.985654 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="account-server" containerID="cri-o://88bc3429a376b19172757bdf15fd8015c87d29a4672fc50f7cd63426a4a15deb" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.986491 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="container-updater" containerID="cri-o://02aabe8bc9d6a787100f261aab25ec19fece062ced3a51ee5af7db32e0476c01" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.986822 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="container-auditor" containerID="cri-o://5c84341f9cb1713a1792b0f79a08b86f98220a86f1ae11140038b774810dc9bf" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.987385 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="swift-recon-cron" containerID="cri-o://3dbccd349100017de57314d2ef2e4235aa70b98e80d36f8e602e30cd6b29a896" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.987468 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="rsync" containerID="cri-o://4bf3bc4884d64bf94b227e5a3f89d2cd681e2010861ba6ad807f97e6ed46fa36" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.987510 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-expirer" containerID="cri-o://9a3a457b6ab2d11ee8b59c4ef3cb0fb0706bc20b89418ab13b9ceffb95fea763" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.987562 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-updater" containerID="cri-o://7cfffd64b9e03c3d5063b865c0c0af9e8e61d754936c8d6e9bc69e678886a8de" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.987593 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-auditor" containerID="cri-o://0a2c7ba35b45c00194109715a53245977cc22628a5deb202f9c6835fd7a8b075" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.987647 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-replicator" containerID="cri-o://8a27921119da49050071af7c42b3954b7dd3fbf2145808d90887c6de819bffec" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.987679 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-server" containerID="cri-o://805b814e2cbc13d8230bd687a77c696f506fa359d8f4364fabf274beca8c9fbe" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.988467 4933 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 22 06:09:09 crc kubenswrapper[4933]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: if [ -n "barbican" ]; then Jan 22 06:09:09 crc kubenswrapper[4933]: GRANT_DATABASE="barbican" Jan 22 06:09:09 crc kubenswrapper[4933]: else Jan 22 06:09:09 crc kubenswrapper[4933]: GRANT_DATABASE="*" Jan 22 06:09:09 crc kubenswrapper[4933]: fi Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: # going for maximum compatibility here: Jan 22 06:09:09 crc kubenswrapper[4933]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 22 06:09:09 crc kubenswrapper[4933]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 22 06:09:09 crc kubenswrapper[4933]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 22 06:09:09 crc kubenswrapper[4933]: # support updates Jan 22 06:09:09 crc kubenswrapper[4933]: Jan 22 06:09:09 crc kubenswrapper[4933]: $MYSQL_CMD < logger="UnhandledError" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.990099 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="account-reaper" containerID="cri-o://b474cfa5d681b7ffb201c8ace8fb7d3efb77a53712a69fa5646c7089e6d05e5a" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.990293 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="container-replicator" containerID="cri-o://ce89b1febf7814e26dbdcab688f4151d8251ed7ce3d27c8d2405f7735ce3e4ad" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.990367 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="container-server" containerID="cri-o://263bbaf72a78f3a591d84bbd2a5fddf505db79d66e26fc745570da4a483e5714" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.990422 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="account-replicator" containerID="cri-o://07d6aad661ae1121fa77133d2f0b4b28385e2d29ec41899d592ae1ee48161fdd" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.990475 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="account-auditor" containerID="cri-o://89e9ea346551a5c5894ae7469a69c2ea0a9fc34a0adf372fc6b1fea201f66654" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: E0122 06:09:09.990484 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"barbican-db-secret\\\" not found\"" pod="openstack/barbican-e75b-account-create-update-tmgdt" podUID="841bd4c5-516a-406d-af7f-8d551b970cab" Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.994703 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.995057 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="56625c99-64dc-4742-9927-0210d8fe8d9d" containerName="nova-api-log" containerID="cri-o://a37bad5d083026a83896f1bbe2715cb126a4a56637c064f44bca0ac67965f6f0" gracePeriod=30 Jan 22 06:09:09 crc kubenswrapper[4933]: I0122 06:09:09.995477 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="56625c99-64dc-4742-9927-0210d8fe8d9d" containerName="nova-api-api" containerID="cri-o://b6f0f60869caf1ac1eeabf3cf5ea90ed5b94691c40db36a9d775cc676248cd2f" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.009439 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.060182 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-brkgn"] Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.064776 4933 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 22 06:09:10 crc kubenswrapper[4933]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: if [ -n "" ]; then Jan 22 06:09:10 crc kubenswrapper[4933]: GRANT_DATABASE="" Jan 22 06:09:10 crc kubenswrapper[4933]: else Jan 22 06:09:10 crc kubenswrapper[4933]: GRANT_DATABASE="*" Jan 22 06:09:10 crc kubenswrapper[4933]: fi Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: # going for maximum compatibility here: Jan 22 06:09:10 crc kubenswrapper[4933]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 22 06:09:10 crc kubenswrapper[4933]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 22 06:09:10 crc kubenswrapper[4933]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 22 06:09:10 crc kubenswrapper[4933]: # support updates Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: $MYSQL_CMD < logger="UnhandledError" Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.066286 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-cell1-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-kp2cn" podUID="1d8e9d8c-961f-4dc5-84b8-51c486220cdc" Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.081869 4933 secret.go:188] Couldn't get secret openstack/cinder-scripts: secret "cinder-scripts" not found Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.081928 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-scripts podName:0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:11.081914458 +0000 UTC m=+1398.919039811 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-scripts") pod "cinder-scheduler-0" (UID: "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9") : secret "cinder-scripts" not found Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.081873 4933 secret.go:188] Couldn't get secret openstack/nova-cell1-novncproxy-config-data: secret "nova-cell1-novncproxy-config-data" not found Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.082410 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-config-data podName:eb84c6f9-457d-46df-a4de-b5bfe612e945 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:11.082388799 +0000 UTC m=+1398.919514152 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-config-data") pod "nova-cell1-novncproxy-0" (UID: "eb84c6f9-457d-46df-a4de-b5bfe612e945") : secret "nova-cell1-novncproxy-config-data" not found Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.083591 4933 secret.go:188] Couldn't get secret openstack/cinder-config-data: secret "cinder-config-data" not found Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.083632 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data podName:0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:11.083620919 +0000 UTC m=+1398.920746362 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data") pod "cinder-scheduler-0" (UID: "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9") : secret "cinder-config-data" not found Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.088086 4933 secret.go:188] Couldn't get secret openstack/cinder-scheduler-config-data: secret "cinder-scheduler-config-data" not found Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.088427 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data-custom podName:0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:11.088408606 +0000 UTC m=+1398.925533959 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data-custom" (UniqueName: "kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data-custom") pod "cinder-scheduler-0" (UID: "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9") : secret "cinder-scheduler-config-data" not found Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.114812 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-brkgn"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.139864 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a" (UID: "eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.176421 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-9002-account-create-update-l62qv"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.200963 4933 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.208898 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-phtjz" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.209340 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-b23f-account-create-update-hfxzc"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.220117 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-8sz22"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.232196 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-8sz22"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.241007 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-9002-account-create-update-l62qv"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.248438 4933 generic.go:334] "Generic (PLEG): container finished" podID="a2bcbc4b-30c4-4ec8-81bf-6cba18171506" containerID="ec11285d9cdded033c8043b14c5171616e5845163e0327ea1a11b2d67c958235" exitCode=143 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.248499 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a2bcbc4b-30c4-4ec8-81bf-6cba18171506","Type":"ContainerDied","Data":"ec11285d9cdded033c8043b14c5171616e5845163e0327ea1a11b2d67c958235"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.256978 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.257400 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6fae4840-8fac-4192-8358-cbcae518e70d" containerName="nova-metadata-metadata" containerID="cri-o://f46d4f43b41aa5fe98df750c180e6af7780c3a5b7b9665b0eea715c193df207c" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.257330 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6fae4840-8fac-4192-8358-cbcae518e70d" containerName="nova-metadata-log" containerID="cri-o://769102d68320fe984ffaababc2e759013ea7756eebffbcf2bed246a58368e7c3" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.265061 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-9cz7d"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.265144 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e75b-account-create-update-tmgdt" event={"ID":"841bd4c5-516a-406d-af7f-8d551b970cab","Type":"ContainerStarted","Data":"8d6ce78b44adbc00bbbea797d8a4719944afbdc8d13de056a505efe339ac7184"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.271762 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-9cz7d"] Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.275151 4933 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 22 06:09:10 crc kubenswrapper[4933]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: if [ -n "barbican" ]; then Jan 22 06:09:10 crc kubenswrapper[4933]: GRANT_DATABASE="barbican" Jan 22 06:09:10 crc kubenswrapper[4933]: else Jan 22 06:09:10 crc kubenswrapper[4933]: GRANT_DATABASE="*" Jan 22 06:09:10 crc kubenswrapper[4933]: fi Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: # going for maximum compatibility here: Jan 22 06:09:10 crc kubenswrapper[4933]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 22 06:09:10 crc kubenswrapper[4933]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 22 06:09:10 crc kubenswrapper[4933]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 22 06:09:10 crc kubenswrapper[4933]: # support updates Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: $MYSQL_CMD < logger="UnhandledError" Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.276664 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"barbican-db-secret\\\" not found\"" pod="openstack/barbican-e75b-account-create-update-tmgdt" podUID="841bd4c5-516a-406d-af7f-8d551b970cab" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.281981 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-2fmwb"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.290014 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-2fmwb"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.297282 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-f8f6-account-create-update-n4zdn"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.301715 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2160e11a-468c-4bf7-9fdc-e579f3ecf896-var-run-ovn\") pod \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.301754 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2160e11a-468c-4bf7-9fdc-e579f3ecf896-var-run\") pod \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.301780 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/2160e11a-468c-4bf7-9fdc-e579f3ecf896-ovn-controller-tls-certs\") pod \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.301876 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b5g5d\" (UniqueName: \"kubernetes.io/projected/2160e11a-468c-4bf7-9fdc-e579f3ecf896-kube-api-access-b5g5d\") pod \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.301921 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2160e11a-468c-4bf7-9fdc-e579f3ecf896-combined-ca-bundle\") pod \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.301944 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2160e11a-468c-4bf7-9fdc-e579f3ecf896-var-log-ovn\") pod \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.302023 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2160e11a-468c-4bf7-9fdc-e579f3ecf896-scripts\") pod \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\" (UID: \"2160e11a-468c-4bf7-9fdc-e579f3ecf896\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.302977 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2160e11a-468c-4bf7-9fdc-e579f3ecf896-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "2160e11a-468c-4bf7-9fdc-e579f3ecf896" (UID: "2160e11a-468c-4bf7-9fdc-e579f3ecf896"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.303184 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2160e11a-468c-4bf7-9fdc-e579f3ecf896-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "2160e11a-468c-4bf7-9fdc-e579f3ecf896" (UID: "2160e11a-468c-4bf7-9fdc-e579f3ecf896"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.303262 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2160e11a-468c-4bf7-9fdc-e579f3ecf896-var-run" (OuterVolumeSpecName: "var-run") pod "2160e11a-468c-4bf7-9fdc-e579f3ecf896" (UID: "2160e11a-468c-4bf7-9fdc-e579f3ecf896"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.304369 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2160e11a-468c-4bf7-9fdc-e579f3ecf896-scripts" (OuterVolumeSpecName: "scripts") pod "2160e11a-468c-4bf7-9fdc-e579f3ecf896" (UID: "2160e11a-468c-4bf7-9fdc-e579f3ecf896"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.307768 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-e75b-account-create-update-tmgdt"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.316251 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318012 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2160e11a-468c-4bf7-9fdc-e579f3ecf896-kube-api-access-b5g5d" (OuterVolumeSpecName: "kube-api-access-b5g5d") pod "2160e11a-468c-4bf7-9fdc-e579f3ecf896" (UID: "2160e11a-468c-4bf7-9fdc-e579f3ecf896"). InnerVolumeSpecName "kube-api-access-b5g5d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318263 4933 generic.go:334] "Generic (PLEG): container finished" podID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerID="9a3a457b6ab2d11ee8b59c4ef3cb0fb0706bc20b89418ab13b9ceffb95fea763" exitCode=0 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318297 4933 generic.go:334] "Generic (PLEG): container finished" podID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerID="7cfffd64b9e03c3d5063b865c0c0af9e8e61d754936c8d6e9bc69e678886a8de" exitCode=0 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318312 4933 generic.go:334] "Generic (PLEG): container finished" podID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerID="0a2c7ba35b45c00194109715a53245977cc22628a5deb202f9c6835fd7a8b075" exitCode=0 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318367 4933 generic.go:334] "Generic (PLEG): container finished" podID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerID="8a27921119da49050071af7c42b3954b7dd3fbf2145808d90887c6de819bffec" exitCode=0 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318374 4933 generic.go:334] "Generic (PLEG): container finished" podID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerID="02aabe8bc9d6a787100f261aab25ec19fece062ced3a51ee5af7db32e0476c01" exitCode=0 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318381 4933 generic.go:334] "Generic (PLEG): container finished" podID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerID="5c84341f9cb1713a1792b0f79a08b86f98220a86f1ae11140038b774810dc9bf" exitCode=0 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318387 4933 generic.go:334] "Generic (PLEG): container finished" podID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerID="ce89b1febf7814e26dbdcab688f4151d8251ed7ce3d27c8d2405f7735ce3e4ad" exitCode=0 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318393 4933 generic.go:334] "Generic (PLEG): container finished" podID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerID="b474cfa5d681b7ffb201c8ace8fb7d3efb77a53712a69fa5646c7089e6d05e5a" exitCode=0 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318400 4933 generic.go:334] "Generic (PLEG): container finished" podID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerID="89e9ea346551a5c5894ae7469a69c2ea0a9fc34a0adf372fc6b1fea201f66654" exitCode=0 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318405 4933 generic.go:334] "Generic (PLEG): container finished" podID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerID="07d6aad661ae1121fa77133d2f0b4b28385e2d29ec41899d592ae1ee48161fdd" exitCode=0 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318414 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerDied","Data":"9a3a457b6ab2d11ee8b59c4ef3cb0fb0706bc20b89418ab13b9ceffb95fea763"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318489 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerDied","Data":"7cfffd64b9e03c3d5063b865c0c0af9e8e61d754936c8d6e9bc69e678886a8de"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318502 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerDied","Data":"0a2c7ba35b45c00194109715a53245977cc22628a5deb202f9c6835fd7a8b075"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318515 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerDied","Data":"8a27921119da49050071af7c42b3954b7dd3fbf2145808d90887c6de819bffec"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318525 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerDied","Data":"02aabe8bc9d6a787100f261aab25ec19fece062ced3a51ee5af7db32e0476c01"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318539 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerDied","Data":"5c84341f9cb1713a1792b0f79a08b86f98220a86f1ae11140038b774810dc9bf"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318549 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerDied","Data":"ce89b1febf7814e26dbdcab688f4151d8251ed7ce3d27c8d2405f7735ce3e4ad"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318560 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerDied","Data":"b474cfa5d681b7ffb201c8ace8fb7d3efb77a53712a69fa5646c7089e6d05e5a"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318571 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerDied","Data":"89e9ea346551a5c5894ae7469a69c2ea0a9fc34a0adf372fc6b1fea201f66654"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.318585 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerDied","Data":"07d6aad661ae1121fa77133d2f0b4b28385e2d29ec41899d592ae1ee48161fdd"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.326594 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-dg7dg"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.331907 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-dg7dg"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.335995 4933 generic.go:334] "Generic (PLEG): container finished" podID="d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194" containerID="9ef7106268a25e14e2c1b4b8ae2a3405385542e15442cd78aec71a27dca326ba" exitCode=143 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.336123 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-566788757d-gkrdt" event={"ID":"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194","Type":"ContainerDied","Data":"9ef7106268a25e14e2c1b4b8ae2a3405385542e15442cd78aec71a27dca326ba"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.338920 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-67fd8f79cc-pb6vw"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.339208 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-67fd8f79cc-pb6vw" podUID="62dded9b-a123-4bd0-ab4c-8de7680be023" containerName="proxy-httpd" containerID="cri-o://23a7c678de86633dca6c9c40d455ce3ac68ebfa5501131949cd43ab694fb34bd" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.339579 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-67fd8f79cc-pb6vw" podUID="62dded9b-a123-4bd0-ab4c-8de7680be023" containerName="proxy-server" containerID="cri-o://88766015ee280911eb0e2545ab5037d409220a2d72a53bab39fa2849ee4efc90" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.340051 4933 generic.go:334] "Generic (PLEG): container finished" podID="603c9f42-93c4-4268-b513-d2309571ac20" containerID="2f820ff473f51e41089b39b460bd1b0e17ef3dee077f8634fc530dbd7212251b" exitCode=143 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.340110 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"603c9f42-93c4-4268-b513-d2309571ac20","Type":"ContainerDied","Data":"2f820ff473f51e41089b39b460bd1b0e17ef3dee077f8634fc530dbd7212251b"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.347384 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-548e-account-create-update-c8h9b"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.348499 4933 generic.go:334] "Generic (PLEG): container finished" podID="45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6" containerID="8bf7a48a2b547ef8e38d6ce658ae3a575bfebda764938fc766292bd884b6d211" exitCode=137 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.348559 4933 scope.go:117] "RemoveContainer" containerID="8bf7a48a2b547ef8e38d6ce658ae3a575bfebda764938fc766292bd884b6d211" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.348704 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.351349 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b23f-account-create-update-hfxzc" event={"ID":"2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7","Type":"ContainerStarted","Data":"91b93531d4715c42b17a167c922c48c0d916bfc35003d71b5f81a9401598714a"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.354858 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5f996bcdbf-kwx6s"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.355229 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5f996bcdbf-kwx6s" podUID="fe53ac25-75b3-42c3-802f-5359023b26e7" containerName="neutron-api" containerID="cri-o://2ae19b8623001ea92970743871f64a042fee6abb1332e83f11c894e81eff91b0" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.355469 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5f996bcdbf-kwx6s" podUID="fe53ac25-75b3-42c3-802f-5359023b26e7" containerName="neutron-httpd" containerID="cri-o://e6d22e9623f38cd68cde6e98c7b0ee2b102f8edb35af1c6667a3464fee551f58" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.362941 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2160e11a-468c-4bf7-9fdc-e579f3ecf896-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2160e11a-468c-4bf7-9fdc-e579f3ecf896" (UID: "2160e11a-468c-4bf7-9fdc-e579f3ecf896"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.364630 4933 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 22 06:09:10 crc kubenswrapper[4933]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: if [ -n "nova_api" ]; then Jan 22 06:09:10 crc kubenswrapper[4933]: GRANT_DATABASE="nova_api" Jan 22 06:09:10 crc kubenswrapper[4933]: else Jan 22 06:09:10 crc kubenswrapper[4933]: GRANT_DATABASE="*" Jan 22 06:09:10 crc kubenswrapper[4933]: fi Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: # going for maximum compatibility here: Jan 22 06:09:10 crc kubenswrapper[4933]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 22 06:09:10 crc kubenswrapper[4933]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 22 06:09:10 crc kubenswrapper[4933]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 22 06:09:10 crc kubenswrapper[4933]: # support updates Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: $MYSQL_CMD < logger="UnhandledError" Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.375281 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-api-db-secret\\\" not found\"" pod="openstack/nova-api-b23f-account-create-update-hfxzc" podUID="2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.375530 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-9hsxt"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.390941 4933 generic.go:334] "Generic (PLEG): container finished" podID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" exitCode=0 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.391181 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-rwb6s" event={"ID":"12629e2f-7d6e-417c-a8df-c15b7a3e794e","Type":"ContainerDied","Data":"4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.394945 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kp2cn" event={"ID":"1d8e9d8c-961f-4dc5-84b8-51c486220cdc","Type":"ContainerStarted","Data":"e9346d264c86bdb6698627f8ff5484c15a226ea21f8a99a1bd92d89a5e2b3110"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.397756 4933 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/root-account-create-update-kp2cn" secret="" err="secret \"galera-openstack-cell1-dockercfg-7z6wt\" not found" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.401630 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-9hsxt"] Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.401797 4933 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 22 06:09:10 crc kubenswrapper[4933]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: if [ -n "" ]; then Jan 22 06:09:10 crc kubenswrapper[4933]: GRANT_DATABASE="" Jan 22 06:09:10 crc kubenswrapper[4933]: else Jan 22 06:09:10 crc kubenswrapper[4933]: GRANT_DATABASE="*" Jan 22 06:09:10 crc kubenswrapper[4933]: fi Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: # going for maximum compatibility here: Jan 22 06:09:10 crc kubenswrapper[4933]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 22 06:09:10 crc kubenswrapper[4933]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 22 06:09:10 crc kubenswrapper[4933]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 22 06:09:10 crc kubenswrapper[4933]: # support updates Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: $MYSQL_CMD < logger="UnhandledError" Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.405859 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-cell1-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-kp2cn" podUID="1d8e9d8c-961f-4dc5-84b8-51c486220cdc" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.406102 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_f4b41ac3-d05d-4bec-952f-c362cb5aad64/ovsdbserver-sb/0.log" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.406151 4933 generic.go:334] "Generic (PLEG): container finished" podID="f4b41ac3-d05d-4bec-952f-c362cb5aad64" containerID="b8fa72a1d737aaba2c6ef382aac2a61139f81989233737acfd4fd0708eec386a" exitCode=2 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.406166 4933 generic.go:334] "Generic (PLEG): container finished" podID="f4b41ac3-d05d-4bec-952f-c362cb5aad64" containerID="1e48373d2f9642eae495cf2b2ade3933c39166aaf3df09df17b1f26537b81222" exitCode=143 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.406253 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f4b41ac3-d05d-4bec-952f-c362cb5aad64","Type":"ContainerDied","Data":"b8fa72a1d737aaba2c6ef382aac2a61139f81989233737acfd4fd0708eec386a"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.406278 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f4b41ac3-d05d-4bec-952f-c362cb5aad64","Type":"ContainerDied","Data":"1e48373d2f9642eae495cf2b2ade3933c39166aaf3df09df17b1f26537b81222"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.407248 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-openstack-config-secret\") pod \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\" (UID: \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.407390 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-combined-ca-bundle\") pod \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\" (UID: \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.407547 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-openstack-config\") pod \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\" (UID: \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.407703 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8m7pt\" (UniqueName: \"kubernetes.io/projected/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-kube-api-access-8m7pt\") pod \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\" (UID: \"45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.408463 4933 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/2160e11a-468c-4bf7-9fdc-e579f3ecf896-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.408555 4933 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2160e11a-468c-4bf7-9fdc-e579f3ecf896-var-run\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.408644 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b5g5d\" (UniqueName: \"kubernetes.io/projected/2160e11a-468c-4bf7-9fdc-e579f3ecf896-kube-api-access-b5g5d\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.408761 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2160e11a-468c-4bf7-9fdc-e579f3ecf896-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.408832 4933 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/2160e11a-468c-4bf7-9fdc-e579f3ecf896-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.408909 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2160e11a-468c-4bf7-9fdc-e579f3ecf896-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.426413 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-98e3-account-create-update-hg9mt"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.429364 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_a4b88c60-2edd-436c-996f-b8f07311f5ef/ovsdbserver-nb/0.log" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.429426 4933 generic.go:334] "Generic (PLEG): container finished" podID="a4b88c60-2edd-436c-996f-b8f07311f5ef" containerID="7561e4048595cee77f6047174945f8a81c575b242d3b3be183508c84bf12d15d" exitCode=2 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.429448 4933 generic.go:334] "Generic (PLEG): container finished" podID="a4b88c60-2edd-436c-996f-b8f07311f5ef" containerID="61aa1634a7490f2c19d0a61583367b3b595c012d49db045a0f91f868015c9e92" exitCode=143 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.429534 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"a4b88c60-2edd-436c-996f-b8f07311f5ef","Type":"ContainerDied","Data":"7561e4048595cee77f6047174945f8a81c575b242d3b3be183508c84bf12d15d"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.429562 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"a4b88c60-2edd-436c-996f-b8f07311f5ef","Type":"ContainerDied","Data":"61aa1634a7490f2c19d0a61583367b3b595c012d49db045a0f91f868015c9e92"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.434880 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.441057 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-kube-api-access-8m7pt" (OuterVolumeSpecName: "kube-api-access-8m7pt") pod "45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6" (UID: "45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6"). InnerVolumeSpecName "kube-api-access-8m7pt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.443054 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-7c6944456-lk7l7"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.443362 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" podUID="0fecb571-89ee-4d10-a1e3-e3755946df2b" containerName="barbican-keystone-listener-log" containerID="cri-o://11cdb1302043612d4966e8227c66cd55138cd37ef40b24e2659a9776bb49e386" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.443859 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" podUID="0fecb571-89ee-4d10-a1e3-e3755946df2b" containerName="barbican-keystone-listener" containerID="cri-o://19349f4fb699d96d982ef68a33cea6a25a5b8d3f3671b4a1b34a92f90876b922" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.445658 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6" (UID: "45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.446057 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6" (UID: "45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.453180 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="943da5ba-d325-4686-871d-802b7730d02a" containerName="galera" containerID="cri-o://4205f5d3155c766e46db2874af6cdb743c6e1ecc121652ab6d216950365cf512" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.453589 4933 generic.go:334] "Generic (PLEG): container finished" podID="b4c8b893-2e30-4273-bbec-7ff7efee686e" containerID="99672693509f4dfe6f517b698528ee0345ec420a893d4e6ab3cda20b8fb397ea" exitCode=143 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.453697 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b4c8b893-2e30-4273-bbec-7ff7efee686e","Type":"ContainerDied","Data":"99672693509f4dfe6f517b698528ee0345ec420a893d4e6ab3cda20b8fb397ea"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.459833 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2160e11a-468c-4bf7-9fdc-e579f3ecf896-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "2160e11a-468c-4bf7-9fdc-e579f3ecf896" (UID: "2160e11a-468c-4bf7-9fdc-e579f3ecf896"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:10 crc kubenswrapper[4933]: W0122 06:09:10.465730 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9cb8166a_621e_4e96_b4e7_4d4fca32a727.slice/crio-ff823bdb91836445cb31164596d700ceae160b48349d6e6920accf1be2569f8b WatchSource:0}: Error finding container ff823bdb91836445cb31164596d700ceae160b48349d6e6920accf1be2569f8b: Status 404 returned error can't find the container with id ff823bdb91836445cb31164596d700ceae160b48349d6e6920accf1be2569f8b Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.466012 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-szr8s_eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a/openstack-network-exporter/0.log" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.466136 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-szr8s" event={"ID":"eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a","Type":"ContainerDied","Data":"914be5bfee9e203b71c6416243d7c2b9d913edeb5d4b3d64cb1dcad13384d3be"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.466178 4933 scope.go:117] "RemoveContainer" containerID="49be386c17bf17cbf852a7ced425f8a40d45b629b580c910b7c1ed1881aef46f" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.466203 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-szr8s" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.472094 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-6448b46975-jx7gp"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.472334 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-6448b46975-jx7gp" podUID="5505bed5-dba3-4067-b94c-acd00b7c37c7" containerName="barbican-worker-log" containerID="cri-o://ae64a47d0a256e71036f25f3770e6938d214f52aa76de1db83e4a3d607be7dbc" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.472629 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-6448b46975-jx7gp" podUID="5505bed5-dba3-4067-b94c-acd00b7c37c7" containerName="barbican-worker" containerID="cri-o://45526307e9624de914d45bbd929b47ffd667a4044ab9422715456c34fe59622d" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.476411 4933 generic.go:334] "Generic (PLEG): container finished" podID="2160e11a-468c-4bf7-9fdc-e579f3ecf896" containerID="cffd7868d1d8343c9ebe115ec9c5afd392b4e675691b973c2e8edd358ac529ec" exitCode=0 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.476482 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-phtjz" event={"ID":"2160e11a-468c-4bf7-9fdc-e579f3ecf896","Type":"ContainerDied","Data":"cffd7868d1d8343c9ebe115ec9c5afd392b4e675691b973c2e8edd358ac529ec"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.476509 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-phtjz" event={"ID":"2160e11a-468c-4bf7-9fdc-e579f3ecf896","Type":"ContainerDied","Data":"a2d9ba2da68dc8e442d3212284c30975b51e5c7cbdc4c47cf97d090f71657af7"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.476721 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-phtjz" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.494125 4933 generic.go:334] "Generic (PLEG): container finished" podID="810720aa-b861-48e5-bd66-b1544f4f683a" containerID="be973f0c711a601539c68dbefa84d0f75359d8153ac8c7d3bb7dc0a944ae2556" exitCode=0 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.502602 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6" (UID: "45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.504702 4933 generic.go:334] "Generic (PLEG): container finished" podID="56625c99-64dc-4742-9927-0210d8fe8d9d" containerID="a37bad5d083026a83896f1bbe2715cb126a4a56637c064f44bca0ac67965f6f0" exitCode=143 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.504990 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9" containerName="cinder-scheduler" containerID="cri-o://e136629b351ca188667a0d656d1460f094277accee28f74429184e904516ae0c" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.505324 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9" containerName="probe" containerID="cri-o://47d5a47f9c85c2d9dc14b7850a92bcef2cae8e70655cb013dd920c1d39eb6587" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.506245 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="eb84c6f9-457d-46df-a4de-b5bfe612e945" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://e4c2907944b8cddfe0450299fb215823e03077dd25b111c02c49498256c142f1" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.507766 4933 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 22 06:09:10 crc kubenswrapper[4933]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: if [ -n "nova_cell1" ]; then Jan 22 06:09:10 crc kubenswrapper[4933]: GRANT_DATABASE="nova_cell1" Jan 22 06:09:10 crc kubenswrapper[4933]: else Jan 22 06:09:10 crc kubenswrapper[4933]: GRANT_DATABASE="*" Jan 22 06:09:10 crc kubenswrapper[4933]: fi Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: # going for maximum compatibility here: Jan 22 06:09:10 crc kubenswrapper[4933]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 22 06:09:10 crc kubenswrapper[4933]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 22 06:09:10 crc kubenswrapper[4933]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 22 06:09:10 crc kubenswrapper[4933]: # support updates Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: $MYSQL_CMD < logger="UnhandledError" Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.510326 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-cell1-db-secret\\\" not found\"" pod="openstack/nova-cell1-548e-account-create-update-c8h9b" podUID="9cb8166a-621e-4e96-b4e7-4d4fca32a727" Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.515341 4933 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 22 06:09:10 crc kubenswrapper[4933]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: if [ -n "cinder" ]; then Jan 22 06:09:10 crc kubenswrapper[4933]: GRANT_DATABASE="cinder" Jan 22 06:09:10 crc kubenswrapper[4933]: else Jan 22 06:09:10 crc kubenswrapper[4933]: GRANT_DATABASE="*" Jan 22 06:09:10 crc kubenswrapper[4933]: fi Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: # going for maximum compatibility here: Jan 22 06:09:10 crc kubenswrapper[4933]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 22 06:09:10 crc kubenswrapper[4933]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 22 06:09:10 crc kubenswrapper[4933]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 22 06:09:10 crc kubenswrapper[4933]: # support updates Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: $MYSQL_CMD < logger="UnhandledError" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.516328 4933 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.516344 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.516353 4933 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.516362 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8m7pt\" (UniqueName: \"kubernetes.io/projected/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6-kube-api-access-8m7pt\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.516370 4933 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/2160e11a-468c-4bf7-9fdc-e579f3ecf896-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.516393 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"cinder-db-secret\\\" not found\"" pod="openstack/cinder-633c-account-create-update-bklwf" podUID="729db25e-5864-4305-99dd-24ce61f45029" Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.516463 4933 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.516501 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1d8e9d8c-961f-4dc5-84b8-51c486220cdc-operator-scripts podName:1d8e9d8c-961f-4dc5-84b8-51c486220cdc nodeName:}" failed. No retries permitted until 2026-01-22 06:09:11.016486666 +0000 UTC m=+1398.853612029 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/1d8e9d8c-961f-4dc5-84b8-51c486220cdc-operator-scripts") pod "root-account-create-update-kp2cn" (UID: "1d8e9d8c-961f-4dc5-84b8-51c486220cdc") : configmap "openstack-cell1-scripts" not found Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.817323 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0235a4fc-a22c-493c-95a8-7b90423eab40" path="/var/lib/kubelet/pods/0235a4fc-a22c-493c-95a8-7b90423eab40/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.818364 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10081d7b-4a85-4d4a-8b06-d12d59eac231" path="/var/lib/kubelet/pods/10081d7b-4a85-4d4a-8b06-d12d59eac231/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.819357 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1086bd39-4637-4123-a7b2-d85d3a603dd5" path="/var/lib/kubelet/pods/1086bd39-4637-4123-a7b2-d85d3a603dd5/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.820308 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="112a4ef5-b86f-4258-84db-bef5e66f9674" path="/var/lib/kubelet/pods/112a4ef5-b86f-4258-84db-bef5e66f9674/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.827893 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.828722 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16cc03f7-df79-437e-903f-c0c6e5ba1cf0" path="/var/lib/kubelet/pods/16cc03f7-df79-437e-903f-c0c6e5ba1cf0/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.829713 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18be1708-2025-4b39-a74c-fe83cf4744ad" path="/var/lib/kubelet/pods/18be1708-2025-4b39-a74c-fe83cf4744ad/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.830568 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38d9042f-25f0-439a-9911-944297684f27" path="/var/lib/kubelet/pods/38d9042f-25f0-439a-9911-944297684f27/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.834182 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6" path="/var/lib/kubelet/pods/45dfd4b5-6a72-42f7-b34d-d2cbb08ee3e6/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.835482 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47f2883c-cf0b-4774-96ea-6a6e6ec8f335" path="/var/lib/kubelet/pods/47f2883c-cf0b-4774-96ea-6a6e6ec8f335/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.836024 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49b3b5e7-c5e1-4a79-90a2-339b4f07f585" path="/var/lib/kubelet/pods/49b3b5e7-c5e1-4a79-90a2-339b4f07f585/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.839942 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6242cc75-a86f-488b-bf60-47cc855a330c" path="/var/lib/kubelet/pods/6242cc75-a86f-488b-bf60-47cc855a330c/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.841493 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c1ba58d-aa1c-49e8-9975-319ff8cbdec5" path="/var/lib/kubelet/pods/6c1ba58d-aa1c-49e8-9975-319ff8cbdec5/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.845039 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76f30c05-b9b8-4439-aab6-b2c7e948a75f" path="/var/lib/kubelet/pods/76f30c05-b9b8-4439-aab6-b2c7e948a75f/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.853184 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="878b8e95-58ae-4e22-82c2-14e2d85a230c" path="/var/lib/kubelet/pods/878b8e95-58ae-4e22-82c2-14e2d85a230c/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.855026 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="913fa4df-79e3-40d8-8218-a869383e2a25" path="/var/lib/kubelet/pods/913fa4df-79e3-40d8-8218-a869383e2a25/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.856250 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_a4b88c60-2edd-436c-996f-b8f07311f5ef/ovsdbserver-nb/0.log" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.856309 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.856468 4933 scope.go:117] "RemoveContainer" containerID="cffd7868d1d8343c9ebe115ec9c5afd392b4e675691b973c2e8edd358ac529ec" Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.856920 4933 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 22 06:09:10 crc kubenswrapper[4933]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: if [ -n "nova_cell0" ]; then Jan 22 06:09:10 crc kubenswrapper[4933]: GRANT_DATABASE="nova_cell0" Jan 22 06:09:10 crc kubenswrapper[4933]: else Jan 22 06:09:10 crc kubenswrapper[4933]: GRANT_DATABASE="*" Jan 22 06:09:10 crc kubenswrapper[4933]: fi Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: # going for maximum compatibility here: Jan 22 06:09:10 crc kubenswrapper[4933]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 22 06:09:10 crc kubenswrapper[4933]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 22 06:09:10 crc kubenswrapper[4933]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 22 06:09:10 crc kubenswrapper[4933]: # support updates Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: $MYSQL_CMD < logger="UnhandledError" Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.858054 4933 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 22 06:09:10 crc kubenswrapper[4933]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: if [ -n "neutron" ]; then Jan 22 06:09:10 crc kubenswrapper[4933]: GRANT_DATABASE="neutron" Jan 22 06:09:10 crc kubenswrapper[4933]: else Jan 22 06:09:10 crc kubenswrapper[4933]: GRANT_DATABASE="*" Jan 22 06:09:10 crc kubenswrapper[4933]: fi Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: # going for maximum compatibility here: Jan 22 06:09:10 crc kubenswrapper[4933]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 22 06:09:10 crc kubenswrapper[4933]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 22 06:09:10 crc kubenswrapper[4933]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 22 06:09:10 crc kubenswrapper[4933]: # support updates Jan 22 06:09:10 crc kubenswrapper[4933]: Jan 22 06:09:10 crc kubenswrapper[4933]: $MYSQL_CMD < logger="UnhandledError" Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.858122 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-cell0-db-secret\\\" not found\"" pod="openstack/nova-cell0-f8f6-account-create-update-n4zdn" podUID="c2022299-4089-4740-ae8f-50ca5b4be2b5" Jan 22 06:09:10 crc kubenswrapper[4933]: E0122 06:09:10.860666 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"neutron-db-secret\\\" not found\"" pod="openstack/neutron-98e3-account-create-update-hg9mt" podUID="e581b2e9-57f2-452a-956c-4e8c5d75b3fb" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.865615 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4587953-b9d4-470c-aaca-3cf9f80c8961" path="/var/lib/kubelet/pods/a4587953-b9d4-470c-aaca-3cf9f80c8961/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.868993 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b01a1347-77f8-4f4a-b98b-862d96c7c55c" path="/var/lib/kubelet/pods/b01a1347-77f8-4f4a-b98b-862d96c7c55c/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.870614 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9516c75-f573-46d8-b9b1-036db5eee52f" path="/var/lib/kubelet/pods/b9516c75-f573-46d8-b9b1-036db5eee52f/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.871975 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6ae84cf-ec9a-42e4-9c55-035d9accb4b2" path="/var/lib/kubelet/pods/c6ae84cf-ec9a-42e4-9c55-035d9accb4b2/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.872476 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e553a055-30a6-4e9c-b424-66deb8dfabbb" path="/var/lib/kubelet/pods/e553a055-30a6-4e9c-b424-66deb8dfabbb/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.874980 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e591255a-6edc-40de-a50f-2f39d3e9bb59" path="/var/lib/kubelet/pods/e591255a-6edc-40de-a50f-2f39d3e9bb59/volumes" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.878306 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ddd577785-b24fs" event={"ID":"810720aa-b861-48e5-bd66-b1544f4f683a","Type":"ContainerDied","Data":"be973f0c711a601539c68dbefa84d0f75359d8153ac8c7d3bb7dc0a944ae2556"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.878431 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-575b89575b-kkrzb"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.878450 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"56625c99-64dc-4742-9927-0210d8fe8d9d","Type":"ContainerDied","Data":"a37bad5d083026a83896f1bbe2715cb126a4a56637c064f44bca0ac67965f6f0"} Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.878461 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.879769 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-kp2cn"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.879782 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.879793 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-e75b-account-create-update-tmgdt"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.879896 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-kp2cn"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.879906 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.879916 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6mkbd"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.879927 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6mkbd"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.879939 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.879949 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-x8sfh"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.880049 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-x8sfh"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.880060 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.880140 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-548e-account-create-update-c8h9b"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.880220 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-f8f6-account-create-update-n4zdn"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.880536 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="2a218455-793d-4ccf-880a-d89b28e98b2d" containerName="nova-cell1-conductor-conductor" containerID="cri-o://38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.879662 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-575b89575b-kkrzb" podUID="91864da0-319b-46e9-b4ef-8ccee4c52d37" containerName="barbican-api-log" containerID="cri-o://919264dcf0b82bef1d83226e9c6472a32457bb0515e5a68ff0d276535cb3dcff" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.879740 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-575b89575b-kkrzb" podUID="91864da0-319b-46e9-b4ef-8ccee4c52d37" containerName="barbican-api" containerID="cri-o://c6536d22d6c93b55c99436f1124a25adb4cae4775413fcbf11c9e30b27a18603" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.881387 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="a944aa66-1c67-4661-968f-e976494cf1eb" containerName="nova-cell0-conductor-conductor" containerID="cri-o://3e78db5f3e39d6ac0502c10f14b200e06daa035604cc2c40311b7a3661cf9a95" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.881505 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="cf956626-51e3-4aff-b24b-4a553160327c" containerName="nova-scheduler-scheduler" containerID="cri-o://727aeb5a63faa31a31c03450f9d3d8823575f9ab0abd3ff2f9a00a0f91ec8597" gracePeriod=30 Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.888971 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-98e3-account-create-update-hg9mt"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.919456 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-szr8s"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.920087 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_f4b41ac3-d05d-4bec-952f-c362cb5aad64/ovsdbserver-sb/0.log" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.920149 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.931953 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-szr8s"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.938036 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-phtjz"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.939476 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hqhks\" (UniqueName: \"kubernetes.io/projected/a4b88c60-2edd-436c-996f-b8f07311f5ef-kube-api-access-hqhks\") pod \"a4b88c60-2edd-436c-996f-b8f07311f5ef\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.939565 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4b88c60-2edd-436c-996f-b8f07311f5ef-config\") pod \"a4b88c60-2edd-436c-996f-b8f07311f5ef\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.939639 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4b88c60-2edd-436c-996f-b8f07311f5ef-metrics-certs-tls-certs\") pod \"a4b88c60-2edd-436c-996f-b8f07311f5ef\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.939663 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-config\") pod \"810720aa-b861-48e5-bd66-b1544f4f683a\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.939743 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4b88c60-2edd-436c-996f-b8f07311f5ef-combined-ca-bundle\") pod \"a4b88c60-2edd-436c-996f-b8f07311f5ef\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.941369 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4b88c60-2edd-436c-996f-b8f07311f5ef-ovsdbserver-nb-tls-certs\") pod \"a4b88c60-2edd-436c-996f-b8f07311f5ef\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.941386 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a4b88c60-2edd-436c-996f-b8f07311f5ef-scripts\") pod \"a4b88c60-2edd-436c-996f-b8f07311f5ef\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.941413 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-dns-swift-storage-0\") pod \"810720aa-b861-48e5-bd66-b1544f4f683a\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.941430 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a4b88c60-2edd-436c-996f-b8f07311f5ef-ovsdb-rundir\") pod \"a4b88c60-2edd-436c-996f-b8f07311f5ef\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.942254 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"a4b88c60-2edd-436c-996f-b8f07311f5ef\" (UID: \"a4b88c60-2edd-436c-996f-b8f07311f5ef\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.942299 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-dns-svc\") pod \"810720aa-b861-48e5-bd66-b1544f4f683a\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.942318 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-ovsdbserver-nb\") pod \"810720aa-b861-48e5-bd66-b1544f4f683a\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.942367 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-ovsdbserver-sb\") pod \"810720aa-b861-48e5-bd66-b1544f4f683a\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.942393 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5t62\" (UniqueName: \"kubernetes.io/projected/810720aa-b861-48e5-bd66-b1544f4f683a-kube-api-access-q5t62\") pod \"810720aa-b861-48e5-bd66-b1544f4f683a\" (UID: \"810720aa-b861-48e5-bd66-b1544f4f683a\") " Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.940790 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4b88c60-2edd-436c-996f-b8f07311f5ef-config" (OuterVolumeSpecName: "config") pod "a4b88c60-2edd-436c-996f-b8f07311f5ef" (UID: "a4b88c60-2edd-436c-996f-b8f07311f5ef"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.946508 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-phtjz"] Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.947263 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4b88c60-2edd-436c-996f-b8f07311f5ef-scripts" (OuterVolumeSpecName: "scripts") pod "a4b88c60-2edd-436c-996f-b8f07311f5ef" (UID: "a4b88c60-2edd-436c-996f-b8f07311f5ef"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.947853 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a4b88c60-2edd-436c-996f-b8f07311f5ef-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "a4b88c60-2edd-436c-996f-b8f07311f5ef" (UID: "a4b88c60-2edd-436c-996f-b8f07311f5ef"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.949765 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4b88c60-2edd-436c-996f-b8f07311f5ef-kube-api-access-hqhks" (OuterVolumeSpecName: "kube-api-access-hqhks") pod "a4b88c60-2edd-436c-996f-b8f07311f5ef" (UID: "a4b88c60-2edd-436c-996f-b8f07311f5ef"). InnerVolumeSpecName "kube-api-access-hqhks". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.952142 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "a4b88c60-2edd-436c-996f-b8f07311f5ef" (UID: "a4b88c60-2edd-436c-996f-b8f07311f5ef"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 06:09:10 crc kubenswrapper[4933]: I0122 06:09:10.972378 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="4d712958-1ece-47de-9798-6e852b03c565" containerName="rabbitmq" containerID="cri-o://c1710389b2dae67dfbd6fae597c0b78a024bc303c5cb265a90023ca99e2818b9" gracePeriod=604800 Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.020718 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/810720aa-b861-48e5-bd66-b1544f4f683a-kube-api-access-q5t62" (OuterVolumeSpecName: "kube-api-access-q5t62") pod "810720aa-b861-48e5-bd66-b1544f4f683a" (UID: "810720aa-b861-48e5-bd66-b1544f4f683a"). InnerVolumeSpecName "kube-api-access-q5t62". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.021534 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="47299478-bcfd-4f21-a56c-efcf7b167999" containerName="rabbitmq" containerID="cri-o://9c99762ed66dc820d592fe5b2a44c175901d1c948185099ec445d18b9d3c9e4e" gracePeriod=604800 Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.044230 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f4b41ac3-d05d-4bec-952f-c362cb5aad64-scripts\") pod \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.044306 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.044361 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f4b41ac3-d05d-4bec-952f-c362cb5aad64-ovsdb-rundir\") pod \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.044505 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4b41ac3-d05d-4bec-952f-c362cb5aad64-ovsdbserver-sb-tls-certs\") pod \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.044567 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4b41ac3-d05d-4bec-952f-c362cb5aad64-metrics-certs-tls-certs\") pod \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.044622 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4b41ac3-d05d-4bec-952f-c362cb5aad64-config\") pod \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.044645 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4b41ac3-d05d-4bec-952f-c362cb5aad64-combined-ca-bundle\") pod \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.044661 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7l8nh\" (UniqueName: \"kubernetes.io/projected/f4b41ac3-d05d-4bec-952f-c362cb5aad64-kube-api-access-7l8nh\") pod \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\" (UID: \"f4b41ac3-d05d-4bec-952f-c362cb5aad64\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.045161 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a4b88c60-2edd-436c-996f-b8f07311f5ef-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.045172 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a4b88c60-2edd-436c-996f-b8f07311f5ef-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.045208 4933 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.045217 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5t62\" (UniqueName: \"kubernetes.io/projected/810720aa-b861-48e5-bd66-b1544f4f683a-kube-api-access-q5t62\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.045228 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hqhks\" (UniqueName: \"kubernetes.io/projected/a4b88c60-2edd-436c-996f-b8f07311f5ef-kube-api-access-hqhks\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.045236 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4b88c60-2edd-436c-996f-b8f07311f5ef-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.046881 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4b41ac3-d05d-4bec-952f-c362cb5aad64-scripts" (OuterVolumeSpecName: "scripts") pod "f4b41ac3-d05d-4bec-952f-c362cb5aad64" (UID: "f4b41ac3-d05d-4bec-952f-c362cb5aad64"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.047231 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4b41ac3-d05d-4bec-952f-c362cb5aad64-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "f4b41ac3-d05d-4bec-952f-c362cb5aad64" (UID: "f4b41ac3-d05d-4bec-952f-c362cb5aad64"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.047290 4933 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.047358 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-config-data podName:47299478-bcfd-4f21-a56c-efcf7b167999 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:13.047341435 +0000 UTC m=+1400.884466788 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-config-data") pod "rabbitmq-server-0" (UID: "47299478-bcfd-4f21-a56c-efcf7b167999") : configmap "rabbitmq-config-data" not found Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.049393 4933 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.049465 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1d8e9d8c-961f-4dc5-84b8-51c486220cdc-operator-scripts podName:1d8e9d8c-961f-4dc5-84b8-51c486220cdc nodeName:}" failed. No retries permitted until 2026-01-22 06:09:12.049446217 +0000 UTC m=+1399.886571610 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/1d8e9d8c-961f-4dc5-84b8-51c486220cdc-operator-scripts") pod "root-account-create-update-kp2cn" (UID: "1d8e9d8c-961f-4dc5-84b8-51c486220cdc") : configmap "openstack-cell1-scripts" not found Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.049901 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4b41ac3-d05d-4bec-952f-c362cb5aad64-config" (OuterVolumeSpecName: "config") pod "f4b41ac3-d05d-4bec-952f-c362cb5aad64" (UID: "f4b41ac3-d05d-4bec-952f-c362cb5aad64"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.062441 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4b41ac3-d05d-4bec-952f-c362cb5aad64-kube-api-access-7l8nh" (OuterVolumeSpecName: "kube-api-access-7l8nh") pod "f4b41ac3-d05d-4bec-952f-c362cb5aad64" (UID: "f4b41ac3-d05d-4bec-952f-c362cb5aad64"). InnerVolumeSpecName "kube-api-access-7l8nh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.067253 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "810720aa-b861-48e5-bd66-b1544f4f683a" (UID: "810720aa-b861-48e5-bd66-b1544f4f683a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.069207 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "f4b41ac3-d05d-4bec-952f-c362cb5aad64" (UID: "f4b41ac3-d05d-4bec-952f-c362cb5aad64"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.106791 4933 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.149364 4933 secret.go:188] Couldn't get secret openstack/cinder-config-data: secret "cinder-config-data" not found Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.149404 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4b41ac3-d05d-4bec-952f-c362cb5aad64-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.149426 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7l8nh\" (UniqueName: \"kubernetes.io/projected/f4b41ac3-d05d-4bec-952f-c362cb5aad64-kube-api-access-7l8nh\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.149440 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data podName:0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:13.149422437 +0000 UTC m=+1400.986547790 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data") pod "cinder-scheduler-0" (UID: "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9") : secret "cinder-config-data" not found Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.149476 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f4b41ac3-d05d-4bec-952f-c362cb5aad64-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.149498 4933 secret.go:188] Couldn't get secret openstack/cinder-scripts: secret "cinder-scripts" not found Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.149542 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-scripts podName:0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:13.14952901 +0000 UTC m=+1400.986654363 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-scripts") pod "cinder-scheduler-0" (UID: "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9") : secret "cinder-scripts" not found Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.149501 4933 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.149557 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f4b41ac3-d05d-4bec-952f-c362cb5aad64-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.149917 4933 secret.go:188] Couldn't get secret openstack/cinder-scheduler-config-data: secret "cinder-scheduler-config-data" not found Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.149968 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data-custom podName:0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:13.149956191 +0000 UTC m=+1400.987081594 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data-custom" (UniqueName: "kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data-custom") pod "cinder-scheduler-0" (UID: "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9") : secret "cinder-scheduler-config-data" not found Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.150021 4933 secret.go:188] Couldn't get secret openstack/nova-cell1-novncproxy-config-data: secret "nova-cell1-novncproxy-config-data" not found Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.150049 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-config-data podName:eb84c6f9-457d-46df-a4de-b5bfe612e945 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:13.150040043 +0000 UTC m=+1400.987165476 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-config-data") pod "nova-cell1-novncproxy-0" (UID: "eb84c6f9-457d-46df-a4de-b5bfe612e945") : secret "nova-cell1-novncproxy-config-data" not found Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.150093 4933 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.150109 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.161799 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "810720aa-b861-48e5-bd66-b1544f4f683a" (UID: "810720aa-b861-48e5-bd66-b1544f4f683a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.173015 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "810720aa-b861-48e5-bd66-b1544f4f683a" (UID: "810720aa-b861-48e5-bd66-b1544f4f683a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.178827 4933 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.242874 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4b88c60-2edd-436c-996f-b8f07311f5ef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a4b88c60-2edd-436c-996f-b8f07311f5ef" (UID: "a4b88c60-2edd-436c-996f-b8f07311f5ef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.243885 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-config" (OuterVolumeSpecName: "config") pod "810720aa-b861-48e5-bd66-b1544f4f683a" (UID: "810720aa-b861-48e5-bd66-b1544f4f683a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.246219 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4b41ac3-d05d-4bec-952f-c362cb5aad64-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f4b41ac3-d05d-4bec-952f-c362cb5aad64" (UID: "f4b41ac3-d05d-4bec-952f-c362cb5aad64"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.252213 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4b41ac3-d05d-4bec-952f-c362cb5aad64-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.252244 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.252254 4933 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.252262 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4b88c60-2edd-436c-996f-b8f07311f5ef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.252272 4933 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.252281 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.286876 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4b41ac3-d05d-4bec-952f-c362cb5aad64-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "f4b41ac3-d05d-4bec-952f-c362cb5aad64" (UID: "f4b41ac3-d05d-4bec-952f-c362cb5aad64"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.306037 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4b88c60-2edd-436c-996f-b8f07311f5ef-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "a4b88c60-2edd-436c-996f-b8f07311f5ef" (UID: "a4b88c60-2edd-436c-996f-b8f07311f5ef"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.309920 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "810720aa-b861-48e5-bd66-b1544f4f683a" (UID: "810720aa-b861-48e5-bd66-b1544f4f683a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.319607 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4b88c60-2edd-436c-996f-b8f07311f5ef-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "a4b88c60-2edd-436c-996f-b8f07311f5ef" (UID: "a4b88c60-2edd-436c-996f-b8f07311f5ef"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.349586 4933 scope.go:117] "RemoveContainer" containerID="cffd7868d1d8343c9ebe115ec9c5afd392b4e675691b973c2e8edd358ac529ec" Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.350037 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cffd7868d1d8343c9ebe115ec9c5afd392b4e675691b973c2e8edd358ac529ec\": container with ID starting with cffd7868d1d8343c9ebe115ec9c5afd392b4e675691b973c2e8edd358ac529ec not found: ID does not exist" containerID="cffd7868d1d8343c9ebe115ec9c5afd392b4e675691b973c2e8edd358ac529ec" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.350088 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cffd7868d1d8343c9ebe115ec9c5afd392b4e675691b973c2e8edd358ac529ec"} err="failed to get container status \"cffd7868d1d8343c9ebe115ec9c5afd392b4e675691b973c2e8edd358ac529ec\": rpc error: code = NotFound desc = could not find container \"cffd7868d1d8343c9ebe115ec9c5afd392b4e675691b973c2e8edd358ac529ec\": container with ID starting with cffd7868d1d8343c9ebe115ec9c5afd392b4e675691b973c2e8edd358ac529ec not found: ID does not exist" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.350107 4933 scope.go:117] "RemoveContainer" containerID="be973f0c711a601539c68dbefa84d0f75359d8153ac8c7d3bb7dc0a944ae2556" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.358249 4933 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4b88c60-2edd-436c-996f-b8f07311f5ef-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.358282 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a4b88c60-2edd-436c-996f-b8f07311f5ef-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.358292 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/810720aa-b861-48e5-bd66-b1544f4f683a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.358300 4933 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4b41ac3-d05d-4bec-952f-c362cb5aad64-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.380804 4933 scope.go:117] "RemoveContainer" containerID="1d8140b090d28240c97f862586cc231f7cc391744e9a708646c8fbb1d6dc838a" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.387047 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4b41ac3-d05d-4bec-952f-c362cb5aad64-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "f4b41ac3-d05d-4bec-952f-c362cb5aad64" (UID: "f4b41ac3-d05d-4bec-952f-c362cb5aad64"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.460574 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4b41ac3-d05d-4bec-952f-c362cb5aad64-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.499923 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="480d1b66c4dcca2c067fc5041e62b5b6640e85f510d6d67079fc834d3bbb885a" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.504167 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="480d1b66c4dcca2c067fc5041e62b5b6640e85f510d6d67079fc834d3bbb885a" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.517775 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="480d1b66c4dcca2c067fc5041e62b5b6640e85f510d6d67079fc834d3bbb885a" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.517812 4933 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="9445b2f3-83ea-4e79-8312-ceffa2208f77" containerName="ovn-northd" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.549731 4933 generic.go:334] "Generic (PLEG): container finished" podID="fe53ac25-75b3-42c3-802f-5359023b26e7" containerID="e6d22e9623f38cd68cde6e98c7b0ee2b102f8edb35af1c6667a3464fee551f58" exitCode=0 Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.549904 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f996bcdbf-kwx6s" event={"ID":"fe53ac25-75b3-42c3-802f-5359023b26e7","Type":"ContainerDied","Data":"e6d22e9623f38cd68cde6e98c7b0ee2b102f8edb35af1c6667a3464fee551f58"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.556010 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_f4b41ac3-d05d-4bec-952f-c362cb5aad64/ovsdbserver-sb/0.log" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.556105 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f4b41ac3-d05d-4bec-952f-c362cb5aad64","Type":"ContainerDied","Data":"46368c2f9984c912eaf46a5911397d7d9f98e2ecfcea66ba3b5a05fdeed1b4e8"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.556148 4933 scope.go:117] "RemoveContainer" containerID="b8fa72a1d737aaba2c6ef382aac2a61139f81989233737acfd4fd0708eec386a" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.556320 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.562217 4933 generic.go:334] "Generic (PLEG): container finished" podID="0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9" containerID="47d5a47f9c85c2d9dc14b7850a92bcef2cae8e70655cb013dd920c1d39eb6587" exitCode=0 Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.562270 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9","Type":"ContainerDied","Data":"47d5a47f9c85c2d9dc14b7850a92bcef2cae8e70655cb013dd920c1d39eb6587"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.565538 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.568023 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-548e-account-create-update-c8h9b" event={"ID":"9cb8166a-621e-4e96-b4e7-4d4fca32a727","Type":"ContainerStarted","Data":"ff823bdb91836445cb31164596d700ceae160b48349d6e6920accf1be2569f8b"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.574712 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ddd577785-b24fs" event={"ID":"810720aa-b861-48e5-bd66-b1544f4f683a","Type":"ContainerDied","Data":"3c3bbc0de597556753dea50b495143395c55d843ef905670d4ed10e80d26ec19"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.574862 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ddd577785-b24fs" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.596221 4933 scope.go:117] "RemoveContainer" containerID="1e48373d2f9642eae495cf2b2ade3933c39166aaf3df09df17b1f26537b81222" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.665364 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-config-data\") pod \"eb84c6f9-457d-46df-a4de-b5bfe612e945\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.665632 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-nova-novncproxy-tls-certs\") pod \"eb84c6f9-457d-46df-a4de-b5bfe612e945\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.665843 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-combined-ca-bundle\") pod \"eb84c6f9-457d-46df-a4de-b5bfe612e945\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.665946 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-vencrypt-tls-certs\") pod \"eb84c6f9-457d-46df-a4de-b5bfe612e945\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.666056 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njkfk\" (UniqueName: \"kubernetes.io/projected/eb84c6f9-457d-46df-a4de-b5bfe612e945-kube-api-access-njkfk\") pod \"eb84c6f9-457d-46df-a4de-b5bfe612e945\" (UID: \"eb84c6f9-457d-46df-a4de-b5bfe612e945\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.681043 4933 generic.go:334] "Generic (PLEG): container finished" podID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerID="4bf3bc4884d64bf94b227e5a3f89d2cd681e2010861ba6ad807f97e6ed46fa36" exitCode=0 Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.681104 4933 generic.go:334] "Generic (PLEG): container finished" podID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerID="805b814e2cbc13d8230bd687a77c696f506fa359d8f4364fabf274beca8c9fbe" exitCode=0 Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.681121 4933 generic.go:334] "Generic (PLEG): container finished" podID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerID="263bbaf72a78f3a591d84bbd2a5fddf505db79d66e26fc745570da4a483e5714" exitCode=0 Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.681133 4933 generic.go:334] "Generic (PLEG): container finished" podID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerID="88bc3429a376b19172757bdf15fd8015c87d29a4672fc50f7cd63426a4a15deb" exitCode=0 Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.681211 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerDied","Data":"4bf3bc4884d64bf94b227e5a3f89d2cd681e2010861ba6ad807f97e6ed46fa36"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.681263 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerDied","Data":"805b814e2cbc13d8230bd687a77c696f506fa359d8f4364fabf274beca8c9fbe"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.681280 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerDied","Data":"263bbaf72a78f3a591d84bbd2a5fddf505db79d66e26fc745570da4a483e5714"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.681350 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerDied","Data":"88bc3429a376b19172757bdf15fd8015c87d29a4672fc50f7cd63426a4a15deb"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.690540 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb84c6f9-457d-46df-a4de-b5bfe612e945-kube-api-access-njkfk" (OuterVolumeSpecName: "kube-api-access-njkfk") pod "eb84c6f9-457d-46df-a4de-b5bfe612e945" (UID: "eb84c6f9-457d-46df-a4de-b5bfe612e945"). InnerVolumeSpecName "kube-api-access-njkfk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.695648 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eb84c6f9-457d-46df-a4de-b5bfe612e945" (UID: "eb84c6f9-457d-46df-a4de-b5bfe612e945"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.700511 4933 generic.go:334] "Generic (PLEG): container finished" podID="91864da0-319b-46e9-b4ef-8ccee4c52d37" containerID="919264dcf0b82bef1d83226e9c6472a32457bb0515e5a68ff0d276535cb3dcff" exitCode=143 Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.700575 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-575b89575b-kkrzb" event={"ID":"91864da0-319b-46e9-b4ef-8ccee4c52d37","Type":"ContainerDied","Data":"919264dcf0b82bef1d83226e9c6472a32457bb0515e5a68ff0d276535cb3dcff"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.709413 4933 generic.go:334] "Generic (PLEG): container finished" podID="6fae4840-8fac-4192-8358-cbcae518e70d" containerID="769102d68320fe984ffaababc2e759013ea7756eebffbcf2bed246a58368e7c3" exitCode=143 Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.709479 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6fae4840-8fac-4192-8358-cbcae518e70d","Type":"ContainerDied","Data":"769102d68320fe984ffaababc2e759013ea7756eebffbcf2bed246a58368e7c3"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.711581 4933 generic.go:334] "Generic (PLEG): container finished" podID="0fecb571-89ee-4d10-a1e3-e3755946df2b" containerID="11cdb1302043612d4966e8227c66cd55138cd37ef40b24e2659a9776bb49e386" exitCode=143 Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.711616 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" event={"ID":"0fecb571-89ee-4d10-a1e3-e3755946df2b","Type":"ContainerDied","Data":"11cdb1302043612d4966e8227c66cd55138cd37ef40b24e2659a9776bb49e386"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.713360 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f8f6-account-create-update-n4zdn" event={"ID":"c2022299-4089-4740-ae8f-50ca5b4be2b5","Type":"ContainerStarted","Data":"754300a7fd947cddfed9509af477dc7a3145c7fd9dcaaa5740f2c768f33efa5c"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.744480 4933 generic.go:334] "Generic (PLEG): container finished" podID="eb84c6f9-457d-46df-a4de-b5bfe612e945" containerID="e4c2907944b8cddfe0450299fb215823e03077dd25b111c02c49498256c142f1" exitCode=0 Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.744582 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"eb84c6f9-457d-46df-a4de-b5bfe612e945","Type":"ContainerDied","Data":"e4c2907944b8cddfe0450299fb215823e03077dd25b111c02c49498256c142f1"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.744620 4933 scope.go:117] "RemoveContainer" containerID="e4c2907944b8cddfe0450299fb215823e03077dd25b111c02c49498256c142f1" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.744750 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.762846 4933 generic.go:334] "Generic (PLEG): container finished" podID="62dded9b-a123-4bd0-ab4c-8de7680be023" containerID="88766015ee280911eb0e2545ab5037d409220a2d72a53bab39fa2849ee4efc90" exitCode=0 Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.762909 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-67fd8f79cc-pb6vw" event={"ID":"62dded9b-a123-4bd0-ab4c-8de7680be023","Type":"ContainerDied","Data":"88766015ee280911eb0e2545ab5037d409220a2d72a53bab39fa2849ee4efc90"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.762960 4933 generic.go:334] "Generic (PLEG): container finished" podID="62dded9b-a123-4bd0-ab4c-8de7680be023" containerID="23a7c678de86633dca6c9c40d455ce3ac68ebfa5501131949cd43ab694fb34bd" exitCode=0 Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.762980 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-67fd8f79cc-pb6vw" event={"ID":"62dded9b-a123-4bd0-ab4c-8de7680be023","Type":"ContainerDied","Data":"23a7c678de86633dca6c9c40d455ce3ac68ebfa5501131949cd43ab694fb34bd"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.767506 4933 generic.go:334] "Generic (PLEG): container finished" podID="5505bed5-dba3-4067-b94c-acd00b7c37c7" containerID="ae64a47d0a256e71036f25f3770e6938d214f52aa76de1db83e4a3d607be7dbc" exitCode=143 Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.767642 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6448b46975-jx7gp" event={"ID":"5505bed5-dba3-4067-b94c-acd00b7c37c7","Type":"ContainerDied","Data":"ae64a47d0a256e71036f25f3770e6938d214f52aa76de1db83e4a3d607be7dbc"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.772974 4933 generic.go:334] "Generic (PLEG): container finished" podID="943da5ba-d325-4686-871d-802b7730d02a" containerID="4205f5d3155c766e46db2874af6cdb743c6e1ecc121652ab6d216950365cf512" exitCode=0 Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.773041 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"943da5ba-d325-4686-871d-802b7730d02a","Type":"ContainerDied","Data":"4205f5d3155c766e46db2874af6cdb743c6e1ecc121652ab6d216950365cf512"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.775584 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-98e3-account-create-update-hg9mt" event={"ID":"e581b2e9-57f2-452a-956c-4e8c5d75b3fb","Type":"ContainerStarted","Data":"8c01f9d2210219028262dcb419924ef903622151f70fcc4e673236b39ae25272"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.775721 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.775788 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njkfk\" (UniqueName: \"kubernetes.io/projected/eb84c6f9-457d-46df-a4de-b5bfe612e945-kube-api-access-njkfk\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.775900 4933 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.775986 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-config-data podName:4d712958-1ece-47de-9798-6e852b03c565 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:15.775974143 +0000 UTC m=+1403.613099496 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-config-data") pod "rabbitmq-cell1-server-0" (UID: "4d712958-1ece-47de-9798-6e852b03c565") : configmap "rabbitmq-cell1-config-data" not found Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.781260 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_a4b88c60-2edd-436c-996f-b8f07311f5ef/ovsdbserver-nb/0.log" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.781576 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.782161 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"a4b88c60-2edd-436c-996f-b8f07311f5ef","Type":"ContainerDied","Data":"8338bb35015dc88d234fc367a4444750fee638880a211c6b3c47d3ec80313356"} Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.791951 4933 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/root-account-create-update-kp2cn" secret="" err="secret \"galera-openstack-cell1-dockercfg-7z6wt\" not found" Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.800398 4933 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 22 06:09:11 crc kubenswrapper[4933]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 22 06:09:11 crc kubenswrapper[4933]: Jan 22 06:09:11 crc kubenswrapper[4933]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 22 06:09:11 crc kubenswrapper[4933]: Jan 22 06:09:11 crc kubenswrapper[4933]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 22 06:09:11 crc kubenswrapper[4933]: Jan 22 06:09:11 crc kubenswrapper[4933]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 22 06:09:11 crc kubenswrapper[4933]: Jan 22 06:09:11 crc kubenswrapper[4933]: if [ -n "" ]; then Jan 22 06:09:11 crc kubenswrapper[4933]: GRANT_DATABASE="" Jan 22 06:09:11 crc kubenswrapper[4933]: else Jan 22 06:09:11 crc kubenswrapper[4933]: GRANT_DATABASE="*" Jan 22 06:09:11 crc kubenswrapper[4933]: fi Jan 22 06:09:11 crc kubenswrapper[4933]: Jan 22 06:09:11 crc kubenswrapper[4933]: # going for maximum compatibility here: Jan 22 06:09:11 crc kubenswrapper[4933]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 22 06:09:11 crc kubenswrapper[4933]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 22 06:09:11 crc kubenswrapper[4933]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 22 06:09:11 crc kubenswrapper[4933]: # support updates Jan 22 06:09:11 crc kubenswrapper[4933]: Jan 22 06:09:11 crc kubenswrapper[4933]: $MYSQL_CMD < logger="UnhandledError" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.800892 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-config-data" (OuterVolumeSpecName: "config-data") pod "eb84c6f9-457d-46df-a4de-b5bfe612e945" (UID: "eb84c6f9-457d-46df-a4de-b5bfe612e945"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.808698 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-cell1-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-kp2cn" podUID="1d8e9d8c-961f-4dc5-84b8-51c486220cdc" Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.810196 4933 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 22 06:09:11 crc kubenswrapper[4933]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 22 06:09:11 crc kubenswrapper[4933]: Jan 22 06:09:11 crc kubenswrapper[4933]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 22 06:09:11 crc kubenswrapper[4933]: Jan 22 06:09:11 crc kubenswrapper[4933]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 22 06:09:11 crc kubenswrapper[4933]: Jan 22 06:09:11 crc kubenswrapper[4933]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 22 06:09:11 crc kubenswrapper[4933]: Jan 22 06:09:11 crc kubenswrapper[4933]: if [ -n "barbican" ]; then Jan 22 06:09:11 crc kubenswrapper[4933]: GRANT_DATABASE="barbican" Jan 22 06:09:11 crc kubenswrapper[4933]: else Jan 22 06:09:11 crc kubenswrapper[4933]: GRANT_DATABASE="*" Jan 22 06:09:11 crc kubenswrapper[4933]: fi Jan 22 06:09:11 crc kubenswrapper[4933]: Jan 22 06:09:11 crc kubenswrapper[4933]: # going for maximum compatibility here: Jan 22 06:09:11 crc kubenswrapper[4933]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 22 06:09:11 crc kubenswrapper[4933]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 22 06:09:11 crc kubenswrapper[4933]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 22 06:09:11 crc kubenswrapper[4933]: # support updates Jan 22 06:09:11 crc kubenswrapper[4933]: Jan 22 06:09:11 crc kubenswrapper[4933]: $MYSQL_CMD < logger="UnhandledError" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.811770 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ddd577785-b24fs"] Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.811822 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"barbican-db-secret\\\" not found\"" pod="openstack/barbican-e75b-account-create-update-tmgdt" podUID="841bd4c5-516a-406d-af7f-8d551b970cab" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.841722 4933 scope.go:117] "RemoveContainer" containerID="7561e4048595cee77f6047174945f8a81c575b242d3b3be183508c84bf12d15d" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.852550 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ddd577785-b24fs"] Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.859565 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "eb84c6f9-457d-46df-a4de-b5bfe612e945" (UID: "eb84c6f9-457d-46df-a4de-b5bfe612e945"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.862157 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.886311 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "eb84c6f9-457d-46df-a4de-b5bfe612e945" (UID: "eb84c6f9-457d-46df-a4de-b5bfe612e945"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.889713 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.889752 4933 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.889761 4933 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb84c6f9-457d-46df-a4de-b5bfe612e945-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.897293 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.914920 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.931055 4933 scope.go:117] "RemoveContainer" containerID="61aa1634a7490f2c19d0a61583367b3b595c012d49db045a0f91f868015c9e92" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.957316 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.969664 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.980297 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="727aeb5a63faa31a31c03450f9d3d8823575f9ab0abd3ff2f9a00a0f91ec8597" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.985096 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="727aeb5a63faa31a31c03450f9d3d8823575f9ab0abd3ff2f9a00a0f91ec8597" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.987631 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="727aeb5a63faa31a31c03450f9d3d8823575f9ab0abd3ff2f9a00a0f91ec8597" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 22 06:09:11 crc kubenswrapper[4933]: E0122 06:09:11.987700 4933 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="cf956626-51e3-4aff-b24b-4a553160327c" containerName="nova-scheduler-scheduler" Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.991810 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-combined-ca-bundle\") pod \"62dded9b-a123-4bd0-ab4c-8de7680be023\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.991984 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zr542\" (UniqueName: \"kubernetes.io/projected/62dded9b-a123-4bd0-ab4c-8de7680be023-kube-api-access-zr542\") pod \"62dded9b-a123-4bd0-ab4c-8de7680be023\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.992498 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/62dded9b-a123-4bd0-ab4c-8de7680be023-etc-swift\") pod \"62dded9b-a123-4bd0-ab4c-8de7680be023\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.992545 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-internal-tls-certs\") pod \"62dded9b-a123-4bd0-ab4c-8de7680be023\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.993420 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-config-data\") pod \"62dded9b-a123-4bd0-ab4c-8de7680be023\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.994460 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62dded9b-a123-4bd0-ab4c-8de7680be023-log-httpd\") pod \"62dded9b-a123-4bd0-ab4c-8de7680be023\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.994499 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62dded9b-a123-4bd0-ab4c-8de7680be023-run-httpd\") pod \"62dded9b-a123-4bd0-ab4c-8de7680be023\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.994542 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-public-tls-certs\") pod \"62dded9b-a123-4bd0-ab4c-8de7680be023\" (UID: \"62dded9b-a123-4bd0-ab4c-8de7680be023\") " Jan 22 06:09:11 crc kubenswrapper[4933]: I0122 06:09:11.995042 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62dded9b-a123-4bd0-ab4c-8de7680be023-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "62dded9b-a123-4bd0-ab4c-8de7680be023" (UID: "62dded9b-a123-4bd0-ab4c-8de7680be023"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:12 crc kubenswrapper[4933]: I0122 06:09:11.995555 4933 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62dded9b-a123-4bd0-ab4c-8de7680be023-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:12 crc kubenswrapper[4933]: I0122 06:09:11.995575 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62dded9b-a123-4bd0-ab4c-8de7680be023-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "62dded9b-a123-4bd0-ab4c-8de7680be023" (UID: "62dded9b-a123-4bd0-ab4c-8de7680be023"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:12 crc kubenswrapper[4933]: I0122 06:09:11.996800 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62dded9b-a123-4bd0-ab4c-8de7680be023-kube-api-access-zr542" (OuterVolumeSpecName: "kube-api-access-zr542") pod "62dded9b-a123-4bd0-ab4c-8de7680be023" (UID: "62dded9b-a123-4bd0-ab4c-8de7680be023"). InnerVolumeSpecName "kube-api-access-zr542". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:12 crc kubenswrapper[4933]: I0122 06:09:12.024850 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62dded9b-a123-4bd0-ab4c-8de7680be023-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "62dded9b-a123-4bd0-ab4c-8de7680be023" (UID: "62dded9b-a123-4bd0-ab4c-8de7680be023"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:12 crc kubenswrapper[4933]: I0122 06:09:12.037011 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-config-data" (OuterVolumeSpecName: "config-data") pod "62dded9b-a123-4bd0-ab4c-8de7680be023" (UID: "62dded9b-a123-4bd0-ab4c-8de7680be023"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:12 crc kubenswrapper[4933]: I0122 06:09:12.047209 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "62dded9b-a123-4bd0-ab4c-8de7680be023" (UID: "62dded9b-a123-4bd0-ab4c-8de7680be023"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:12 crc kubenswrapper[4933]: I0122 06:09:12.063418 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "62dded9b-a123-4bd0-ab4c-8de7680be023" (UID: "62dded9b-a123-4bd0-ab4c-8de7680be023"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:12 crc kubenswrapper[4933]: I0122 06:09:12.080723 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "62dded9b-a123-4bd0-ab4c-8de7680be023" (UID: "62dded9b-a123-4bd0-ab4c-8de7680be023"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:12 crc kubenswrapper[4933]: I0122 06:09:12.097542 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zr542\" (UniqueName: \"kubernetes.io/projected/62dded9b-a123-4bd0-ab4c-8de7680be023-kube-api-access-zr542\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:12 crc kubenswrapper[4933]: I0122 06:09:12.097584 4933 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/62dded9b-a123-4bd0-ab4c-8de7680be023-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:12 crc kubenswrapper[4933]: I0122 06:09:12.097594 4933 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:12 crc kubenswrapper[4933]: I0122 06:09:12.097603 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:12 crc kubenswrapper[4933]: I0122 06:09:12.097612 4933 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/62dded9b-a123-4bd0-ab4c-8de7680be023-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:12 crc kubenswrapper[4933]: I0122 06:09:12.097620 4933 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:12 crc kubenswrapper[4933]: E0122 06:09:12.097616 4933 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 22 06:09:12 crc kubenswrapper[4933]: E0122 06:09:12.097690 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1d8e9d8c-961f-4dc5-84b8-51c486220cdc-operator-scripts podName:1d8e9d8c-961f-4dc5-84b8-51c486220cdc nodeName:}" failed. No retries permitted until 2026-01-22 06:09:14.097669955 +0000 UTC m=+1401.934795388 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/1d8e9d8c-961f-4dc5-84b8-51c486220cdc-operator-scripts") pod "root-account-create-update-kp2cn" (UID: "1d8e9d8c-961f-4dc5-84b8-51c486220cdc") : configmap "openstack-cell1-scripts" not found Jan 22 06:09:12 crc kubenswrapper[4933]: I0122 06:09:12.097629 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62dded9b-a123-4bd0-ab4c-8de7680be023-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:12 crc kubenswrapper[4933]: I0122 06:09:12.131730 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 22 06:09:12 crc kubenswrapper[4933]: I0122 06:09:12.147430 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 06:09:12 crc kubenswrapper[4933]: I0122 06:09:12.159737 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.198981 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/943da5ba-d325-4686-871d-802b7730d02a-galera-tls-certs\") pod \"943da5ba-d325-4686-871d-802b7730d02a\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.199232 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/943da5ba-d325-4686-871d-802b7730d02a-kolla-config\") pod \"943da5ba-d325-4686-871d-802b7730d02a\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.199266 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"943da5ba-d325-4686-871d-802b7730d02a\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.199299 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/943da5ba-d325-4686-871d-802b7730d02a-config-data-default\") pod \"943da5ba-d325-4686-871d-802b7730d02a\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.199324 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/943da5ba-d325-4686-871d-802b7730d02a-combined-ca-bundle\") pod \"943da5ba-d325-4686-871d-802b7730d02a\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.199351 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wprwh\" (UniqueName: \"kubernetes.io/projected/943da5ba-d325-4686-871d-802b7730d02a-kube-api-access-wprwh\") pod \"943da5ba-d325-4686-871d-802b7730d02a\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.199381 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/943da5ba-d325-4686-871d-802b7730d02a-operator-scripts\") pod \"943da5ba-d325-4686-871d-802b7730d02a\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.199451 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/943da5ba-d325-4686-871d-802b7730d02a-config-data-generated\") pod \"943da5ba-d325-4686-871d-802b7730d02a\" (UID: \"943da5ba-d325-4686-871d-802b7730d02a\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.201803 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/943da5ba-d325-4686-871d-802b7730d02a-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "943da5ba-d325-4686-871d-802b7730d02a" (UID: "943da5ba-d325-4686-871d-802b7730d02a"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.202925 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/943da5ba-d325-4686-871d-802b7730d02a-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "943da5ba-d325-4686-871d-802b7730d02a" (UID: "943da5ba-d325-4686-871d-802b7730d02a"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.203628 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/943da5ba-d325-4686-871d-802b7730d02a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "943da5ba-d325-4686-871d-802b7730d02a" (UID: "943da5ba-d325-4686-871d-802b7730d02a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.203750 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/943da5ba-d325-4686-871d-802b7730d02a-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "943da5ba-d325-4686-871d-802b7730d02a" (UID: "943da5ba-d325-4686-871d-802b7730d02a"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.210362 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/943da5ba-d325-4686-871d-802b7730d02a-kube-api-access-wprwh" (OuterVolumeSpecName: "kube-api-access-wprwh") pod "943da5ba-d325-4686-871d-802b7730d02a" (UID: "943da5ba-d325-4686-871d-802b7730d02a"). InnerVolumeSpecName "kube-api-access-wprwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.212128 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-548e-account-create-update-c8h9b" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.217471 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "mysql-db") pod "943da5ba-d325-4686-871d-802b7730d02a" (UID: "943da5ba-d325-4686-871d-802b7730d02a"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.238683 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/943da5ba-d325-4686-871d-802b7730d02a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "943da5ba-d325-4686-871d-802b7730d02a" (UID: "943da5ba-d325-4686-871d-802b7730d02a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.266936 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/943da5ba-d325-4686-871d-802b7730d02a-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "943da5ba-d325-4686-871d-802b7730d02a" (UID: "943da5ba-d325-4686-871d-802b7730d02a"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.301500 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9cb8166a-621e-4e96-b4e7-4d4fca32a727-operator-scripts\") pod \"9cb8166a-621e-4e96-b4e7-4d4fca32a727\" (UID: \"9cb8166a-621e-4e96-b4e7-4d4fca32a727\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.301734 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ft7gx\" (UniqueName: \"kubernetes.io/projected/9cb8166a-621e-4e96-b4e7-4d4fca32a727-kube-api-access-ft7gx\") pod \"9cb8166a-621e-4e96-b4e7-4d4fca32a727\" (UID: \"9cb8166a-621e-4e96-b4e7-4d4fca32a727\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.302035 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cb8166a-621e-4e96-b4e7-4d4fca32a727-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9cb8166a-621e-4e96-b4e7-4d4fca32a727" (UID: "9cb8166a-621e-4e96-b4e7-4d4fca32a727"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.302159 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9cb8166a-621e-4e96-b4e7-4d4fca32a727-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.302171 4933 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/943da5ba-d325-4686-871d-802b7730d02a-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.302188 4933 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.302198 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/943da5ba-d325-4686-871d-802b7730d02a-config-data-default\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.302209 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/943da5ba-d325-4686-871d-802b7730d02a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.302217 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wprwh\" (UniqueName: \"kubernetes.io/projected/943da5ba-d325-4686-871d-802b7730d02a-kube-api-access-wprwh\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.302226 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/943da5ba-d325-4686-871d-802b7730d02a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.302234 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/943da5ba-d325-4686-871d-802b7730d02a-config-data-generated\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.302243 4933 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/943da5ba-d325-4686-871d-802b7730d02a-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.309339 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cb8166a-621e-4e96-b4e7-4d4fca32a727-kube-api-access-ft7gx" (OuterVolumeSpecName: "kube-api-access-ft7gx") pod "9cb8166a-621e-4e96-b4e7-4d4fca32a727" (UID: "9cb8166a-621e-4e96-b4e7-4d4fca32a727"). InnerVolumeSpecName "kube-api-access-ft7gx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.322048 4933 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.404019 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ft7gx\" (UniqueName: \"kubernetes.io/projected/9cb8166a-621e-4e96-b4e7-4d4fca32a727-kube-api-access-ft7gx\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.404045 4933 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.483273 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-633c-account-create-update-bklwf" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.499980 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b23f-account-create-update-hfxzc" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.510101 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2160e11a-468c-4bf7-9fdc-e579f3ecf896" path="/var/lib/kubelet/pods/2160e11a-468c-4bf7-9fdc-e579f3ecf896/volumes" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.510854 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="810720aa-b861-48e5-bd66-b1544f4f683a" path="/var/lib/kubelet/pods/810720aa-b861-48e5-bd66-b1544f4f683a/volumes" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.511476 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9266f02b-3fef-4566-a9df-9b570f24d845" path="/var/lib/kubelet/pods/9266f02b-3fef-4566-a9df-9b570f24d845/volumes" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.513562 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-98e3-account-create-update-hg9mt" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.514159 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4b88c60-2edd-436c-996f-b8f07311f5ef" path="/var/lib/kubelet/pods/a4b88c60-2edd-436c-996f-b8f07311f5ef/volumes" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.514881 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e" path="/var/lib/kubelet/pods/a4f22e27-b3bb-4eb2-b037-d7acab0f7c4e/volumes" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.515473 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb84c6f9-457d-46df-a4de-b5bfe612e945" path="/var/lib/kubelet/pods/eb84c6f9-457d-46df-a4de-b5bfe612e945/volumes" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.516909 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a" path="/var/lib/kubelet/pods/eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a/volumes" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.517804 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b41ac3-d05d-4bec-952f-c362cb5aad64" path="/var/lib/kubelet/pods/f4b41ac3-d05d-4bec-952f-c362cb5aad64/volumes" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.520093 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f8f6-account-create-update-n4zdn" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.598034 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.598415 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerName="proxy-httpd" containerID="cri-o://7759c053d18a0cfde4caa05a9fcd933f06f5e1f05af3297873fc5f9ea3a8ae61" gracePeriod=30 Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.598440 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerName="ceilometer-notification-agent" containerID="cri-o://83f50dc0f110857dd2bcd69ddb9eb051bf30ec68e850a625d293c6f90c8de031" gracePeriod=30 Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.598441 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerName="sg-core" containerID="cri-o://1efb8a0dd4a5e96297be6dd03c40644d0d87af777a8a5b3ce08724ee8ab4e337" gracePeriod=30 Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.598552 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerName="ceilometer-central-agent" containerID="cri-o://baeb972a644576e51e82e73e63e77cc4fc3796f87a5f81af36bd84230ffbbb39" gracePeriod=30 Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.604657 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.604886 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="4886348b-6078-41dc-8fab-a8e2e1c4898d" containerName="kube-state-metrics" containerID="cri-o://35c37c5d066e31beb5f68171ba88451347a51fbe1f8550fd13c3cdca949ee224" gracePeriod=30 Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.628918 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.632781 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2022299-4089-4740-ae8f-50ca5b4be2b5-operator-scripts\") pod \"c2022299-4089-4740-ae8f-50ca5b4be2b5\" (UID: \"c2022299-4089-4740-ae8f-50ca5b4be2b5\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.632823 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/729db25e-5864-4305-99dd-24ce61f45029-operator-scripts\") pod \"729db25e-5864-4305-99dd-24ce61f45029\" (UID: \"729db25e-5864-4305-99dd-24ce61f45029\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.632855 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xmvs6\" (UniqueName: \"kubernetes.io/projected/729db25e-5864-4305-99dd-24ce61f45029-kube-api-access-xmvs6\") pod \"729db25e-5864-4305-99dd-24ce61f45029\" (UID: \"729db25e-5864-4305-99dd-24ce61f45029\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.632942 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e581b2e9-57f2-452a-956c-4e8c5d75b3fb-operator-scripts\") pod \"e581b2e9-57f2-452a-956c-4e8c5d75b3fb\" (UID: \"e581b2e9-57f2-452a-956c-4e8c5d75b3fb\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.633150 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xb86w\" (UniqueName: \"kubernetes.io/projected/e581b2e9-57f2-452a-956c-4e8c5d75b3fb-kube-api-access-xb86w\") pod \"e581b2e9-57f2-452a-956c-4e8c5d75b3fb\" (UID: \"e581b2e9-57f2-452a-956c-4e8c5d75b3fb\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.633178 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rw2pc\" (UniqueName: \"kubernetes.io/projected/c2022299-4089-4740-ae8f-50ca5b4be2b5-kube-api-access-rw2pc\") pod \"c2022299-4089-4740-ae8f-50ca5b4be2b5\" (UID: \"c2022299-4089-4740-ae8f-50ca5b4be2b5\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.633213 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xsb6j\" (UniqueName: \"kubernetes.io/projected/2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7-kube-api-access-xsb6j\") pod \"2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7\" (UID: \"2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.633454 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7-operator-scripts\") pod \"2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7\" (UID: \"2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.636882 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c2022299-4089-4740-ae8f-50ca5b4be2b5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c2022299-4089-4740-ae8f-50ca5b4be2b5" (UID: "c2022299-4089-4740-ae8f-50ca5b4be2b5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.636618 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7" (UID: "2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.637323 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/729db25e-5864-4305-99dd-24ce61f45029-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "729db25e-5864-4305-99dd-24ce61f45029" (UID: "729db25e-5864-4305-99dd-24ce61f45029"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.637385 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e581b2e9-57f2-452a-956c-4e8c5d75b3fb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e581b2e9-57f2-452a-956c-4e8c5d75b3fb" (UID: "e581b2e9-57f2-452a-956c-4e8c5d75b3fb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.647599 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/729db25e-5864-4305-99dd-24ce61f45029-kube-api-access-xmvs6" (OuterVolumeSpecName: "kube-api-access-xmvs6") pod "729db25e-5864-4305-99dd-24ce61f45029" (UID: "729db25e-5864-4305-99dd-24ce61f45029"). InnerVolumeSpecName "kube-api-access-xmvs6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.647650 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2022299-4089-4740-ae8f-50ca5b4be2b5-kube-api-access-rw2pc" (OuterVolumeSpecName: "kube-api-access-rw2pc") pod "c2022299-4089-4740-ae8f-50ca5b4be2b5" (UID: "c2022299-4089-4740-ae8f-50ca5b4be2b5"). InnerVolumeSpecName "kube-api-access-rw2pc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.656935 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7-kube-api-access-xsb6j" (OuterVolumeSpecName: "kube-api-access-xsb6j") pod "2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7" (UID: "2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7"). InnerVolumeSpecName "kube-api-access-xsb6j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.670770 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-zqzbb"] Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:12.671177 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a" containerName="openstack-network-exporter" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671189 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a" containerName="openstack-network-exporter" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:12.671205 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="810720aa-b861-48e5-bd66-b1544f4f683a" containerName="init" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671214 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="810720aa-b861-48e5-bd66-b1544f4f683a" containerName="init" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:12.671224 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2160e11a-468c-4bf7-9fdc-e579f3ecf896" containerName="ovn-controller" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671231 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="2160e11a-468c-4bf7-9fdc-e579f3ecf896" containerName="ovn-controller" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:12.671246 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62dded9b-a123-4bd0-ab4c-8de7680be023" containerName="proxy-server" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671251 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="62dded9b-a123-4bd0-ab4c-8de7680be023" containerName="proxy-server" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:12.671263 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="943da5ba-d325-4686-871d-802b7730d02a" containerName="mysql-bootstrap" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671268 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="943da5ba-d325-4686-871d-802b7730d02a" containerName="mysql-bootstrap" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:12.671283 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb84c6f9-457d-46df-a4de-b5bfe612e945" containerName="nova-cell1-novncproxy-novncproxy" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671289 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb84c6f9-457d-46df-a4de-b5bfe612e945" containerName="nova-cell1-novncproxy-novncproxy" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:12.671298 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4b88c60-2edd-436c-996f-b8f07311f5ef" containerName="ovsdbserver-nb" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671304 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4b88c60-2edd-436c-996f-b8f07311f5ef" containerName="ovsdbserver-nb" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:12.671330 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b41ac3-d05d-4bec-952f-c362cb5aad64" containerName="openstack-network-exporter" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671342 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b41ac3-d05d-4bec-952f-c362cb5aad64" containerName="openstack-network-exporter" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:12.671359 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b41ac3-d05d-4bec-952f-c362cb5aad64" containerName="ovsdbserver-sb" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671367 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b41ac3-d05d-4bec-952f-c362cb5aad64" containerName="ovsdbserver-sb" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:12.671385 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62dded9b-a123-4bd0-ab4c-8de7680be023" containerName="proxy-httpd" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671392 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="62dded9b-a123-4bd0-ab4c-8de7680be023" containerName="proxy-httpd" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:12.671401 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a944aa66-1c67-4661-968f-e976494cf1eb" containerName="nova-cell0-conductor-conductor" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671407 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a944aa66-1c67-4661-968f-e976494cf1eb" containerName="nova-cell0-conductor-conductor" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:12.671418 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="810720aa-b861-48e5-bd66-b1544f4f683a" containerName="dnsmasq-dns" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671424 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="810720aa-b861-48e5-bd66-b1544f4f683a" containerName="dnsmasq-dns" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:12.671436 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="943da5ba-d325-4686-871d-802b7730d02a" containerName="galera" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671441 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="943da5ba-d325-4686-871d-802b7730d02a" containerName="galera" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:12.671453 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4b88c60-2edd-436c-996f-b8f07311f5ef" containerName="openstack-network-exporter" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671459 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4b88c60-2edd-436c-996f-b8f07311f5ef" containerName="openstack-network-exporter" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671607 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="810720aa-b861-48e5-bd66-b1544f4f683a" containerName="dnsmasq-dns" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671619 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb84c6f9-457d-46df-a4de-b5bfe612e945" containerName="nova-cell1-novncproxy-novncproxy" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671629 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b41ac3-d05d-4bec-952f-c362cb5aad64" containerName="ovsdbserver-sb" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671638 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="2160e11a-468c-4bf7-9fdc-e579f3ecf896" containerName="ovn-controller" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671646 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4b88c60-2edd-436c-996f-b8f07311f5ef" containerName="ovsdbserver-nb" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671658 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a944aa66-1c67-4661-968f-e976494cf1eb" containerName="nova-cell0-conductor-conductor" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671671 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="943da5ba-d325-4686-871d-802b7730d02a" containerName="galera" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671678 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4b88c60-2edd-436c-996f-b8f07311f5ef" containerName="openstack-network-exporter" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671688 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb8b9caf-9b8a-4fc5-b9cd-6704c9fb2e8a" containerName="openstack-network-exporter" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671695 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="62dded9b-a123-4bd0-ab4c-8de7680be023" containerName="proxy-httpd" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671702 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="62dded9b-a123-4bd0-ab4c-8de7680be023" containerName="proxy-server" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.671709 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b41ac3-d05d-4bec-952f-c362cb5aad64" containerName="openstack-network-exporter" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.672243 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zqzbb" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.672699 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e581b2e9-57f2-452a-956c-4e8c5d75b3fb-kube-api-access-xb86w" (OuterVolumeSpecName: "kube-api-access-xb86w") pod "e581b2e9-57f2-452a-956c-4e8c5d75b3fb" (UID: "e581b2e9-57f2-452a-956c-4e8c5d75b3fb"). InnerVolumeSpecName "kube-api-access-xb86w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.674186 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.680624 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-zqzbb"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.757456 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a944aa66-1c67-4661-968f-e976494cf1eb-config-data\") pod \"a944aa66-1c67-4661-968f-e976494cf1eb\" (UID: \"a944aa66-1c67-4661-968f-e976494cf1eb\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.757497 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2n8x\" (UniqueName: \"kubernetes.io/projected/a944aa66-1c67-4661-968f-e976494cf1eb-kube-api-access-z2n8x\") pod \"a944aa66-1c67-4661-968f-e976494cf1eb\" (UID: \"a944aa66-1c67-4661-968f-e976494cf1eb\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.757585 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a944aa66-1c67-4661-968f-e976494cf1eb-combined-ca-bundle\") pod \"a944aa66-1c67-4661-968f-e976494cf1eb\" (UID: \"a944aa66-1c67-4661-968f-e976494cf1eb\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.757861 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/67bcc01e-7a5e-4f76-889e-d6ed60745cf4-operator-scripts\") pod \"root-account-create-update-zqzbb\" (UID: \"67bcc01e-7a5e-4f76-889e-d6ed60745cf4\") " pod="openstack/root-account-create-update-zqzbb" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.757929 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrm5j\" (UniqueName: \"kubernetes.io/projected/67bcc01e-7a5e-4f76-889e-d6ed60745cf4-kube-api-access-vrm5j\") pod \"root-account-create-update-zqzbb\" (UID: \"67bcc01e-7a5e-4f76-889e-d6ed60745cf4\") " pod="openstack/root-account-create-update-zqzbb" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.758014 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xb86w\" (UniqueName: \"kubernetes.io/projected/e581b2e9-57f2-452a-956c-4e8c5d75b3fb-kube-api-access-xb86w\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.758026 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rw2pc\" (UniqueName: \"kubernetes.io/projected/c2022299-4089-4740-ae8f-50ca5b4be2b5-kube-api-access-rw2pc\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.758037 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xsb6j\" (UniqueName: \"kubernetes.io/projected/2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7-kube-api-access-xsb6j\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.758046 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.758054 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2022299-4089-4740-ae8f-50ca5b4be2b5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.758062 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/729db25e-5864-4305-99dd-24ce61f45029-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.758091 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xmvs6\" (UniqueName: \"kubernetes.io/projected/729db25e-5864-4305-99dd-24ce61f45029-kube-api-access-xmvs6\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.758101 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e581b2e9-57f2-452a-956c-4e8c5d75b3fb-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.771765 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-dc32-account-create-update-nhclt"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.789290 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a944aa66-1c67-4661-968f-e976494cf1eb-kube-api-access-z2n8x" (OuterVolumeSpecName: "kube-api-access-z2n8x") pod "a944aa66-1c67-4661-968f-e976494cf1eb" (UID: "a944aa66-1c67-4661-968f-e976494cf1eb"). InnerVolumeSpecName "kube-api-access-z2n8x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.831135 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.831370 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="e0d0b42d-5a68-46ba-a0de-f26c8dab1af8" containerName="memcached" containerID="cri-o://c8b3c82c4b888183709e4c809da692746a4eb74efe1e21ce5e8fbd16fb29ade5" gracePeriod=30 Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.859677 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrm5j\" (UniqueName: \"kubernetes.io/projected/67bcc01e-7a5e-4f76-889e-d6ed60745cf4-kube-api-access-vrm5j\") pod \"root-account-create-update-zqzbb\" (UID: \"67bcc01e-7a5e-4f76-889e-d6ed60745cf4\") " pod="openstack/root-account-create-update-zqzbb" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.859837 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/67bcc01e-7a5e-4f76-889e-d6ed60745cf4-operator-scripts\") pod \"root-account-create-update-zqzbb\" (UID: \"67bcc01e-7a5e-4f76-889e-d6ed60745cf4\") " pod="openstack/root-account-create-update-zqzbb" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.859895 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2n8x\" (UniqueName: \"kubernetes.io/projected/a944aa66-1c67-4661-968f-e976494cf1eb-kube-api-access-z2n8x\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.875040 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/67bcc01e-7a5e-4f76-889e-d6ed60745cf4-operator-scripts\") pod \"root-account-create-update-zqzbb\" (UID: \"67bcc01e-7a5e-4f76-889e-d6ed60745cf4\") " pod="openstack/root-account-create-update-zqzbb" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.890217 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-dc32-account-create-update-nhclt"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.893209 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a944aa66-1c67-4661-968f-e976494cf1eb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a944aa66-1c67-4661-968f-e976494cf1eb" (UID: "a944aa66-1c67-4661-968f-e976494cf1eb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.895289 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-633c-account-create-update-bklwf" event={"ID":"729db25e-5864-4305-99dd-24ce61f45029","Type":"ContainerDied","Data":"c233fabf2515748b2408ec869d70b5f62bc26742f8dddc1fd6fd491d63916ae3"} Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.895368 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-633c-account-create-update-bklwf" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:12.895375 4933 projected.go:194] Error preparing data for projected volume kube-api-access-vrm5j for pod openstack/root-account-create-update-zqzbb: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:12.895443 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/67bcc01e-7a5e-4f76-889e-d6ed60745cf4-kube-api-access-vrm5j podName:67bcc01e-7a5e-4f76-889e-d6ed60745cf4 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:13.39542584 +0000 UTC m=+1401.232551193 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-vrm5j" (UniqueName: "kubernetes.io/projected/67bcc01e-7a5e-4f76-889e-d6ed60745cf4-kube-api-access-vrm5j") pod "root-account-create-update-zqzbb" (UID: "67bcc01e-7a5e-4f76-889e-d6ed60745cf4") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.909161 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-dc32-account-create-update-26chh"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.910612 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dc32-account-create-update-26chh" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.921634 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.922338 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-zstt6"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.923133 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-67fd8f79cc-pb6vw" event={"ID":"62dded9b-a123-4bd0-ab4c-8de7680be023","Type":"ContainerDied","Data":"0f49032ef93cc426d09840c8c0816a8c8e3a6a7ec82dcc55f9af579a1026986a"} Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.923160 4933 scope.go:117] "RemoveContainer" containerID="88766015ee280911eb0e2545ab5037d409220a2d72a53bab39fa2849ee4efc90" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.923280 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-67fd8f79cc-pb6vw" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.923372 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a944aa66-1c67-4661-968f-e976494cf1eb-config-data" (OuterVolumeSpecName: "config-data") pod "a944aa66-1c67-4661-968f-e976494cf1eb" (UID: "a944aa66-1c67-4661-968f-e976494cf1eb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.945199 4933 generic.go:334] "Generic (PLEG): container finished" podID="4886348b-6078-41dc-8fab-a8e2e1c4898d" containerID="35c37c5d066e31beb5f68171ba88451347a51fbe1f8550fd13c3cdca949ee224" exitCode=2 Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.945250 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4886348b-6078-41dc-8fab-a8e2e1c4898d","Type":"ContainerDied","Data":"35c37c5d066e31beb5f68171ba88451347a51fbe1f8550fd13c3cdca949ee224"} Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.961067 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a944aa66-1c67-4661-968f-e976494cf1eb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.961098 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a944aa66-1c67-4661-968f-e976494cf1eb-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.961170 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-98e3-account-create-update-hg9mt" event={"ID":"e581b2e9-57f2-452a-956c-4e8c5d75b3fb","Type":"ContainerDied","Data":"8c01f9d2210219028262dcb419924ef903622151f70fcc4e673236b39ae25272"} Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.961227 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-98e3-account-create-update-hg9mt" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.997296 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b23f-account-create-update-hfxzc" event={"ID":"2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7","Type":"ContainerDied","Data":"91b93531d4715c42b17a167c922c48c0d916bfc35003d71b5f81a9401598714a"} Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:12.997388 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b23f-account-create-update-hfxzc" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.020627 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-zstt6"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.056214 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-dc32-account-create-update-26chh"] Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.060195 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.060701 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.061067 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.061208 4933 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-rwb6s" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovsdb-server" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.062264 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtbjc\" (UniqueName: \"kubernetes.io/projected/833ac15e-6498-4beb-a0da-b8e600653e3e-kube-api-access-jtbjc\") pod \"keystone-dc32-account-create-update-26chh\" (UID: \"833ac15e-6498-4beb-a0da-b8e600653e3e\") " pod="openstack/keystone-dc32-account-create-update-26chh" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.062363 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/833ac15e-6498-4beb-a0da-b8e600653e3e-operator-scripts\") pod \"keystone-dc32-account-create-update-26chh\" (UID: \"833ac15e-6498-4beb-a0da-b8e600653e3e\") " pod="openstack/keystone-dc32-account-create-update-26chh" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.062516 4933 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.062546 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-config-data podName:47299478-bcfd-4f21-a56c-efcf7b167999 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:17.06253567 +0000 UTC m=+1404.899661013 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-config-data") pod "rabbitmq-server-0" (UID: "47299478-bcfd-4f21-a56c-efcf7b167999") : configmap "rabbitmq-config-data" not found Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.065986 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-bvb58"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.071641 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-bvb58"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.079590 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-7d459c58f9-bc2hf"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.079880 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-7d459c58f9-bc2hf" podUID="c16f2ab8-68b3-43fa-a862-c182aaa3dc23" containerName="keystone-api" containerID="cri-o://abbc72b7cf0aa36b4904038dc83404e06c706971f42e299cdcaa546f6ea3f0e7" gracePeriod=30 Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.089431 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.089744 4933 generic.go:334] "Generic (PLEG): container finished" podID="0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9" containerID="e136629b351ca188667a0d656d1460f094277accee28f74429184e904516ae0c" exitCode=0 Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.089816 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9","Type":"ContainerDied","Data":"e136629b351ca188667a0d656d1460f094277accee28f74429184e904516ae0c"} Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.095884 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.104230 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-548e-account-create-update-c8h9b" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.104237 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.104836 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-548e-account-create-update-c8h9b" event={"ID":"9cb8166a-621e-4e96-b4e7-4d4fca32a727","Type":"ContainerDied","Data":"ff823bdb91836445cb31164596d700ceae160b48349d6e6920accf1be2569f8b"} Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.113487 4933 scope.go:117] "RemoveContainer" containerID="23a7c678de86633dca6c9c40d455ce3ac68ebfa5501131949cd43ab694fb34bd" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.122048 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.122131 4933 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-rwb6s" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovs-vswitchd" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.136026 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="a2bcbc4b-30c4-4ec8-81bf-6cba18171506" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.179:9292/healthcheck\": read tcp 10.217.0.2:42450->10.217.0.179:9292: read: connection reset by peer" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.136207 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="a2bcbc4b-30c4-4ec8-81bf-6cba18171506" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.179:9292/healthcheck\": read tcp 10.217.0.2:42462->10.217.0.179:9292: read: connection reset by peer" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.145108 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.145153 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"943da5ba-d325-4686-871d-802b7730d02a","Type":"ContainerDied","Data":"2a7006cf082e7bc070441078492a82c87a212d57dd7e3883a7c78d26a958b0a5"} Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.161411 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f8f6-account-create-update-n4zdn" event={"ID":"c2022299-4089-4740-ae8f-50ca5b4be2b5","Type":"ContainerDied","Data":"754300a7fd947cddfed9509af477dc7a3145c7fd9dcaaa5740f2c768f33efa5c"} Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.161508 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f8f6-account-create-update-n4zdn" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.165451 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/833ac15e-6498-4beb-a0da-b8e600653e3e-operator-scripts\") pod \"keystone-dc32-account-create-update-26chh\" (UID: \"833ac15e-6498-4beb-a0da-b8e600653e3e\") " pod="openstack/keystone-dc32-account-create-update-26chh" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.166745 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtbjc\" (UniqueName: \"kubernetes.io/projected/833ac15e-6498-4beb-a0da-b8e600653e3e-kube-api-access-jtbjc\") pod \"keystone-dc32-account-create-update-26chh\" (UID: \"833ac15e-6498-4beb-a0da-b8e600653e3e\") " pod="openstack/keystone-dc32-account-create-update-26chh" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.167349 4933 secret.go:188] Couldn't get secret openstack/cinder-config-data: secret "cinder-config-data" not found Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.167392 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data podName:0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:17.167379359 +0000 UTC m=+1405.004504712 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data") pod "cinder-scheduler-0" (UID: "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9") : secret "cinder-config-data" not found Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.167747 4933 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.167779 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/833ac15e-6498-4beb-a0da-b8e600653e3e-operator-scripts podName:833ac15e-6498-4beb-a0da-b8e600653e3e nodeName:}" failed. No retries permitted until 2026-01-22 06:09:13.667771399 +0000 UTC m=+1401.504896752 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/833ac15e-6498-4beb-a0da-b8e600653e3e-operator-scripts") pod "keystone-dc32-account-create-update-26chh" (UID: "833ac15e-6498-4beb-a0da-b8e600653e3e") : configmap "openstack-scripts" not found Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.167840 4933 secret.go:188] Couldn't get secret openstack/cinder-scheduler-config-data: secret "cinder-scheduler-config-data" not found Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.167863 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data-custom podName:0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:17.167857661 +0000 UTC m=+1405.004983014 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data-custom" (UniqueName: "kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data-custom") pod "cinder-scheduler-0" (UID: "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9") : secret "cinder-scheduler-config-data" not found Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.169636 4933 projected.go:194] Error preparing data for projected volume kube-api-access-jtbjc for pod openstack/keystone-dc32-account-create-update-26chh: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.169681 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/833ac15e-6498-4beb-a0da-b8e600653e3e-kube-api-access-jtbjc podName:833ac15e-6498-4beb-a0da-b8e600653e3e nodeName:}" failed. No retries permitted until 2026-01-22 06:09:13.669671965 +0000 UTC m=+1401.506797318 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-jtbjc" (UniqueName: "kubernetes.io/projected/833ac15e-6498-4beb-a0da-b8e600653e3e-kube-api-access-jtbjc") pod "keystone-dc32-account-create-update-26chh" (UID: "833ac15e-6498-4beb-a0da-b8e600653e3e") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.169923 4933 secret.go:188] Couldn't get secret openstack/cinder-scripts: secret "cinder-scripts" not found Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.170732 4933 generic.go:334] "Generic (PLEG): container finished" podID="a944aa66-1c67-4661-968f-e976494cf1eb" containerID="3e78db5f3e39d6ac0502c10f14b200e06daa035604cc2c40311b7a3661cf9a95" exitCode=0 Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.172108 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.172217 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-scripts podName:0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:17.169981792 +0000 UTC m=+1405.007107145 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-scripts") pod "cinder-scheduler-0" (UID: "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9") : secret "cinder-scripts" not found Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.172664 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"a944aa66-1c67-4661-968f-e976494cf1eb","Type":"ContainerDied","Data":"3e78db5f3e39d6ac0502c10f14b200e06daa035604cc2c40311b7a3661cf9a95"} Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.172723 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"a944aa66-1c67-4661-968f-e976494cf1eb","Type":"ContainerDied","Data":"84c996ed7bae8a32c9c37d0037c5088f03ab24320305ef6bd0084a699814fbc9"} Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.188241 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-7wf66"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.197145 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-7wf66"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.253161 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-dc32-account-create-update-26chh"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.266334 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-zqzbb"] Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.266928 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-vrm5j], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/root-account-create-update-zqzbb" podUID="67bcc01e-7a5e-4f76-889e-d6ed60745cf4" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.294518 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-67fd8f79cc-pb6vw"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.312231 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-67fd8f79cc-pb6vw"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.350927 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-633c-account-create-update-bklwf"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.371379 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="1270da6b-1c9e-41b6-b628-c2eaef5d9daf" containerName="galera" containerID="cri-o://728628962bfae73d3592f2a149b8ee7e86c1b9649701dc3ff1d779b1a0028722" gracePeriod=30 Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.378209 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-633c-account-create-update-bklwf"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.431000 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="6fae4840-8fac-4192-8358-cbcae518e70d" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.205:8775/\": read tcp 10.217.0.2:34606->10.217.0.205:8775: read: connection reset by peer" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.431297 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="6fae4840-8fac-4192-8358-cbcae518e70d" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.205:8775/\": read tcp 10.217.0.2:34590->10.217.0.205:8775: read: connection reset by peer" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.472094 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrm5j\" (UniqueName: \"kubernetes.io/projected/67bcc01e-7a5e-4f76-889e-d6ed60745cf4-kube-api-access-vrm5j\") pod \"root-account-create-update-zqzbb\" (UID: \"67bcc01e-7a5e-4f76-889e-d6ed60745cf4\") " pod="openstack/root-account-create-update-zqzbb" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.474999 4933 projected.go:194] Error preparing data for projected volume kube-api-access-vrm5j for pod openstack/root-account-create-update-zqzbb: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.475108 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/67bcc01e-7a5e-4f76-889e-d6ed60745cf4-kube-api-access-vrm5j podName:67bcc01e-7a5e-4f76-889e-d6ed60745cf4 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:14.47505555 +0000 UTC m=+1402.312180933 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-vrm5j" (UniqueName: "kubernetes.io/projected/67bcc01e-7a5e-4f76-889e-d6ed60745cf4-kube-api-access-vrm5j") pod "root-account-create-update-zqzbb" (UID: "67bcc01e-7a5e-4f76-889e-d6ed60745cf4") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.568380 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-jtbjc operator-scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/keystone-dc32-account-create-update-26chh" podUID="833ac15e-6498-4beb-a0da-b8e600653e3e" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.572648 4933 scope.go:117] "RemoveContainer" containerID="4205f5d3155c766e46db2874af6cdb743c6e1ecc121652ab6d216950365cf512" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.607416 4933 scope.go:117] "RemoveContainer" containerID="b956f6b2dbe48cf74ff7e20f1605a13bc4acfeb9f49ccdf24ca9c5172e1373b4" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.608427 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-98e3-account-create-update-hg9mt"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.629564 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-98e3-account-create-update-hg9mt"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.661287 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-548e-account-create-update-c8h9b"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.676665 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e75b-account-create-update-tmgdt" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.678254 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kp2cn" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.683425 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/833ac15e-6498-4beb-a0da-b8e600653e3e-operator-scripts\") pod \"keystone-dc32-account-create-update-26chh\" (UID: \"833ac15e-6498-4beb-a0da-b8e600653e3e\") " pod="openstack/keystone-dc32-account-create-update-26chh" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.683690 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtbjc\" (UniqueName: \"kubernetes.io/projected/833ac15e-6498-4beb-a0da-b8e600653e3e-kube-api-access-jtbjc\") pod \"keystone-dc32-account-create-update-26chh\" (UID: \"833ac15e-6498-4beb-a0da-b8e600653e3e\") " pod="openstack/keystone-dc32-account-create-update-26chh" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.684194 4933 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.684246 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/833ac15e-6498-4beb-a0da-b8e600653e3e-operator-scripts podName:833ac15e-6498-4beb-a0da-b8e600653e3e nodeName:}" failed. No retries permitted until 2026-01-22 06:09:14.684230417 +0000 UTC m=+1402.521355770 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/833ac15e-6498-4beb-a0da-b8e600653e3e-operator-scripts") pod "keystone-dc32-account-create-update-26chh" (UID: "833ac15e-6498-4beb-a0da-b8e600653e3e") : configmap "openstack-scripts" not found Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.688948 4933 projected.go:194] Error preparing data for projected volume kube-api-access-jtbjc for pod openstack/keystone-dc32-account-create-update-26chh: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.689022 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/833ac15e-6498-4beb-a0da-b8e600653e3e-kube-api-access-jtbjc podName:833ac15e-6498-4beb-a0da-b8e600653e3e nodeName:}" failed. No retries permitted until 2026-01-22 06:09:14.689002123 +0000 UTC m=+1402.526127476 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-jtbjc" (UniqueName: "kubernetes.io/projected/833ac15e-6498-4beb-a0da-b8e600653e3e-kube-api-access-jtbjc") pod "keystone-dc32-account-create-update-26chh" (UID: "833ac15e-6498-4beb-a0da-b8e600653e3e") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.691206 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-548e-account-create-update-c8h9b"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.717237 4933 scope.go:117] "RemoveContainer" containerID="3e78db5f3e39d6ac0502c10f14b200e06daa035604cc2c40311b7a3661cf9a95" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.718050 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.730127 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.737106 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-f8f6-account-create-update-n4zdn"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.737932 4933 scope.go:117] "RemoveContainer" containerID="3e78db5f3e39d6ac0502c10f14b200e06daa035604cc2c40311b7a3661cf9a95" Jan 22 06:09:13 crc kubenswrapper[4933]: E0122 06:09:13.738364 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e78db5f3e39d6ac0502c10f14b200e06daa035604cc2c40311b7a3661cf9a95\": container with ID starting with 3e78db5f3e39d6ac0502c10f14b200e06daa035604cc2c40311b7a3661cf9a95 not found: ID does not exist" containerID="3e78db5f3e39d6ac0502c10f14b200e06daa035604cc2c40311b7a3661cf9a95" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.738391 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e78db5f3e39d6ac0502c10f14b200e06daa035604cc2c40311b7a3661cf9a95"} err="failed to get container status \"3e78db5f3e39d6ac0502c10f14b200e06daa035604cc2c40311b7a3661cf9a95\": rpc error: code = NotFound desc = could not find container \"3e78db5f3e39d6ac0502c10f14b200e06daa035604cc2c40311b7a3661cf9a95\": container with ID starting with 3e78db5f3e39d6ac0502c10f14b200e06daa035604cc2c40311b7a3661cf9a95 not found: ID does not exist" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.740973 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-f8f6-account-create-update-n4zdn"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.745472 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.750300 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.761777 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-b23f-account-create-update-hfxzc"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.767088 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-b23f-account-create-update-hfxzc"] Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.784732 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-twf88\" (UniqueName: \"kubernetes.io/projected/1d8e9d8c-961f-4dc5-84b8-51c486220cdc-kube-api-access-twf88\") pod \"1d8e9d8c-961f-4dc5-84b8-51c486220cdc\" (UID: \"1d8e9d8c-961f-4dc5-84b8-51c486220cdc\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.784855 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/841bd4c5-516a-406d-af7f-8d551b970cab-operator-scripts\") pod \"841bd4c5-516a-406d-af7f-8d551b970cab\" (UID: \"841bd4c5-516a-406d-af7f-8d551b970cab\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.784954 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d8e9d8c-961f-4dc5-84b8-51c486220cdc-operator-scripts\") pod \"1d8e9d8c-961f-4dc5-84b8-51c486220cdc\" (UID: \"1d8e9d8c-961f-4dc5-84b8-51c486220cdc\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.785176 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dpkl\" (UniqueName: \"kubernetes.io/projected/841bd4c5-516a-406d-af7f-8d551b970cab-kube-api-access-6dpkl\") pod \"841bd4c5-516a-406d-af7f-8d551b970cab\" (UID: \"841bd4c5-516a-406d-af7f-8d551b970cab\") " Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.785402 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/841bd4c5-516a-406d-af7f-8d551b970cab-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "841bd4c5-516a-406d-af7f-8d551b970cab" (UID: "841bd4c5-516a-406d-af7f-8d551b970cab"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.785405 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1d8e9d8c-961f-4dc5-84b8-51c486220cdc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1d8e9d8c-961f-4dc5-84b8-51c486220cdc" (UID: "1d8e9d8c-961f-4dc5-84b8-51c486220cdc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.785776 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/841bd4c5-516a-406d-af7f-8d551b970cab-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.785926 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d8e9d8c-961f-4dc5-84b8-51c486220cdc-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.793506 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d8e9d8c-961f-4dc5-84b8-51c486220cdc-kube-api-access-twf88" (OuterVolumeSpecName: "kube-api-access-twf88") pod "1d8e9d8c-961f-4dc5-84b8-51c486220cdc" (UID: "1d8e9d8c-961f-4dc5-84b8-51c486220cdc"). InnerVolumeSpecName "kube-api-access-twf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.793617 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/841bd4c5-516a-406d-af7f-8d551b970cab-kube-api-access-6dpkl" (OuterVolumeSpecName: "kube-api-access-6dpkl") pod "841bd4c5-516a-406d-af7f-8d551b970cab" (UID: "841bd4c5-516a-406d-af7f-8d551b970cab"). InnerVolumeSpecName "kube-api-access-6dpkl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.839260 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.203:3000/\": dial tcp 10.217.0.203:3000: connect: connection refused" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.896141 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6dpkl\" (UniqueName: \"kubernetes.io/projected/841bd4c5-516a-406d-af7f-8d551b970cab-kube-api-access-6dpkl\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:13 crc kubenswrapper[4933]: I0122 06:09:13.896402 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-twf88\" (UniqueName: \"kubernetes.io/projected/1d8e9d8c-961f-4dc5-84b8-51c486220cdc-kube-api-access-twf88\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.111945 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.119278 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.120518 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.128485 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-566788757d-gkrdt" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.154616 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.188688 4933 generic.go:334] "Generic (PLEG): container finished" podID="56625c99-64dc-4742-9927-0210d8fe8d9d" containerID="b6f0f60869caf1ac1eeabf3cf5ea90ed5b94691c40db36a9d775cc676248cd2f" exitCode=0 Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.188754 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"56625c99-64dc-4742-9927-0210d8fe8d9d","Type":"ContainerDied","Data":"b6f0f60869caf1ac1eeabf3cf5ea90ed5b94691c40db36a9d775cc676248cd2f"} Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.188780 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"56625c99-64dc-4742-9927-0210d8fe8d9d","Type":"ContainerDied","Data":"170c3882fe58d599e3fb221c49bb5456562e7747aa88579e72a325d48eaef23a"} Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.188800 4933 scope.go:117] "RemoveContainer" containerID="b6f0f60869caf1ac1eeabf3cf5ea90ed5b94691c40db36a9d775cc676248cd2f" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.188913 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.193688 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-e75b-account-create-update-tmgdt" event={"ID":"841bd4c5-516a-406d-af7f-8d551b970cab","Type":"ContainerDied","Data":"8d6ce78b44adbc00bbbea797d8a4719944afbdc8d13de056a505efe339ac7184"} Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.193782 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-e75b-account-create-update-tmgdt" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.201827 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/603c9f42-93c4-4268-b513-d2309571ac20-etc-machine-id\") pod \"603c9f42-93c4-4268-b513-d2309571ac20\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.201871 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-scripts\") pod \"603c9f42-93c4-4268-b513-d2309571ac20\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.201974 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/603c9f42-93c4-4268-b513-d2309571ac20-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "603c9f42-93c4-4268-b513-d2309571ac20" (UID: "603c9f42-93c4-4268-b513-d2309571ac20"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.204409 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-config-data\") pod \"b4c8b893-2e30-4273-bbec-7ff7efee686e\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.204483 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-combined-ca-bundle\") pod \"603c9f42-93c4-4268-b513-d2309571ac20\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.204525 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56625c99-64dc-4742-9927-0210d8fe8d9d-logs\") pod \"56625c99-64dc-4742-9927-0210d8fe8d9d\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.204549 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-config-data-custom\") pod \"603c9f42-93c4-4268-b513-d2309571ac20\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.204570 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4c8b893-2e30-4273-bbec-7ff7efee686e-logs\") pod \"b4c8b893-2e30-4273-bbec-7ff7efee686e\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.204638 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-config-data\") pod \"56625c99-64dc-4742-9927-0210d8fe8d9d\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.204664 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"b4c8b893-2e30-4273-bbec-7ff7efee686e\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.204743 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-combined-ca-bundle\") pod \"b4c8b893-2e30-4273-bbec-7ff7efee686e\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.204787 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/603c9f42-93c4-4268-b513-d2309571ac20-logs\") pod \"603c9f42-93c4-4268-b513-d2309571ac20\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.204813 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-internal-tls-certs\") pod \"56625c99-64dc-4742-9927-0210d8fe8d9d\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.204840 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-combined-ca-bundle\") pod \"56625c99-64dc-4742-9927-0210d8fe8d9d\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.204875 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-public-tls-certs\") pod \"56625c99-64dc-4742-9927-0210d8fe8d9d\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.204904 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-clvhb\" (UniqueName: \"kubernetes.io/projected/b4c8b893-2e30-4273-bbec-7ff7efee686e-kube-api-access-clvhb\") pod \"b4c8b893-2e30-4273-bbec-7ff7efee686e\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.204944 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b4c8b893-2e30-4273-bbec-7ff7efee686e-httpd-run\") pod \"b4c8b893-2e30-4273-bbec-7ff7efee686e\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.204987 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-public-tls-certs\") pod \"603c9f42-93c4-4268-b513-d2309571ac20\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.205012 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mjclj\" (UniqueName: \"kubernetes.io/projected/603c9f42-93c4-4268-b513-d2309571ac20-kube-api-access-mjclj\") pod \"603c9f42-93c4-4268-b513-d2309571ac20\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.205043 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-scripts\") pod \"b4c8b893-2e30-4273-bbec-7ff7efee686e\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.205157 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8pdjn\" (UniqueName: \"kubernetes.io/projected/56625c99-64dc-4742-9927-0210d8fe8d9d-kube-api-access-8pdjn\") pod \"56625c99-64dc-4742-9927-0210d8fe8d9d\" (UID: \"56625c99-64dc-4742-9927-0210d8fe8d9d\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.205184 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-public-tls-certs\") pod \"b4c8b893-2e30-4273-bbec-7ff7efee686e\" (UID: \"b4c8b893-2e30-4273-bbec-7ff7efee686e\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.205212 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-internal-tls-certs\") pod \"603c9f42-93c4-4268-b513-d2309571ac20\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.205255 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-config-data\") pod \"603c9f42-93c4-4268-b513-d2309571ac20\" (UID: \"603c9f42-93c4-4268-b513-d2309571ac20\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.206114 4933 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/603c9f42-93c4-4268-b513-d2309571ac20-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.206988 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56625c99-64dc-4742-9927-0210d8fe8d9d-logs" (OuterVolumeSpecName: "logs") pod "56625c99-64dc-4742-9927-0210d8fe8d9d" (UID: "56625c99-64dc-4742-9927-0210d8fe8d9d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.207696 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4c8b893-2e30-4273-bbec-7ff7efee686e-logs" (OuterVolumeSpecName: "logs") pod "b4c8b893-2e30-4273-bbec-7ff7efee686e" (UID: "b4c8b893-2e30-4273-bbec-7ff7efee686e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.215177 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/603c9f42-93c4-4268-b513-d2309571ac20-logs" (OuterVolumeSpecName: "logs") pod "603c9f42-93c4-4268-b513-d2309571ac20" (UID: "603c9f42-93c4-4268-b513-d2309571ac20"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.216725 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4c8b893-2e30-4273-bbec-7ff7efee686e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "b4c8b893-2e30-4273-bbec-7ff7efee686e" (UID: "b4c8b893-2e30-4273-bbec-7ff7efee686e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.251277 4933 generic.go:334] "Generic (PLEG): container finished" podID="6fae4840-8fac-4192-8358-cbcae518e70d" containerID="f46d4f43b41aa5fe98df750c180e6af7780c3a5b7b9665b0eea715c193df207c" exitCode=0 Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.251349 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6fae4840-8fac-4192-8358-cbcae518e70d","Type":"ContainerDied","Data":"f46d4f43b41aa5fe98df750c180e6af7780c3a5b7b9665b0eea715c193df207c"} Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.251463 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.259521 4933 generic.go:334] "Generic (PLEG): container finished" podID="a2bcbc4b-30c4-4ec8-81bf-6cba18171506" containerID="7e0168931d199a81b487119803e1944a792da4293cd792448c0c7c6cb8c0b855" exitCode=0 Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.259597 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a2bcbc4b-30c4-4ec8-81bf-6cba18171506","Type":"ContainerDied","Data":"7e0168931d199a81b487119803e1944a792da4293cd792448c0c7c6cb8c0b855"} Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.263927 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56625c99-64dc-4742-9927-0210d8fe8d9d-kube-api-access-8pdjn" (OuterVolumeSpecName: "kube-api-access-8pdjn") pod "56625c99-64dc-4742-9927-0210d8fe8d9d" (UID: "56625c99-64dc-4742-9927-0210d8fe8d9d"). InnerVolumeSpecName "kube-api-access-8pdjn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.264830 4933 scope.go:117] "RemoveContainer" containerID="a37bad5d083026a83896f1bbe2715cb126a4a56637c064f44bca0ac67965f6f0" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.267305 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kp2cn" event={"ID":"1d8e9d8c-961f-4dc5-84b8-51c486220cdc","Type":"ContainerDied","Data":"e9346d264c86bdb6698627f8ff5484c15a226ea21f8a99a1bd92d89a5e2b3110"} Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.267372 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kp2cn" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.275430 4933 generic.go:334] "Generic (PLEG): container finished" podID="d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194" containerID="9656bc111246901626a3ae6303bb9f3055280024dabd06af3b590f5dad87fafe" exitCode=0 Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.275500 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-566788757d-gkrdt" event={"ID":"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194","Type":"ContainerDied","Data":"9656bc111246901626a3ae6303bb9f3055280024dabd06af3b590f5dad87fafe"} Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.275526 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-566788757d-gkrdt" event={"ID":"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194","Type":"ContainerDied","Data":"0a10a03f273eed558bb474577d5acd3969def978c71514dbe156b0931a26f53b"} Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.275595 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-566788757d-gkrdt" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.308005 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-combined-ca-bundle\") pod \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.308263 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmp5k\" (UniqueName: \"kubernetes.io/projected/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-kube-api-access-tmp5k\") pod \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.308292 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/4886348b-6078-41dc-8fab-a8e2e1c4898d-kube-state-metrics-tls-config\") pod \"4886348b-6078-41dc-8fab-a8e2e1c4898d\" (UID: \"4886348b-6078-41dc-8fab-a8e2e1c4898d\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.308322 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/4886348b-6078-41dc-8fab-a8e2e1c4898d-kube-state-metrics-tls-certs\") pod \"4886348b-6078-41dc-8fab-a8e2e1c4898d\" (UID: \"4886348b-6078-41dc-8fab-a8e2e1c4898d\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.308347 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-public-tls-certs\") pod \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.308399 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-logs\") pod \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.308433 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lffj7\" (UniqueName: \"kubernetes.io/projected/4886348b-6078-41dc-8fab-a8e2e1c4898d-kube-api-access-lffj7\") pod \"4886348b-6078-41dc-8fab-a8e2e1c4898d\" (UID: \"4886348b-6078-41dc-8fab-a8e2e1c4898d\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.308465 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-internal-tls-certs\") pod \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.308573 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-config-data\") pod \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.308595 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4886348b-6078-41dc-8fab-a8e2e1c4898d-combined-ca-bundle\") pod \"4886348b-6078-41dc-8fab-a8e2e1c4898d\" (UID: \"4886348b-6078-41dc-8fab-a8e2e1c4898d\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.308613 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-scripts\") pod \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\" (UID: \"d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.309003 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4c8b893-2e30-4273-bbec-7ff7efee686e-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.309019 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/603c9f42-93c4-4268-b513-d2309571ac20-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.309027 4933 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b4c8b893-2e30-4273-bbec-7ff7efee686e-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.309035 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8pdjn\" (UniqueName: \"kubernetes.io/projected/56625c99-64dc-4742-9927-0210d8fe8d9d-kube-api-access-8pdjn\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.309045 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56625c99-64dc-4742-9927-0210d8fe8d9d-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.315138 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "b4c8b893-2e30-4273-bbec-7ff7efee686e" (UID: "b4c8b893-2e30-4273-bbec-7ff7efee686e"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.315693 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-logs" (OuterVolumeSpecName: "logs") pod "d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194" (UID: "d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.319358 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/603c9f42-93c4-4268-b513-d2309571ac20-kube-api-access-mjclj" (OuterVolumeSpecName: "kube-api-access-mjclj") pod "603c9f42-93c4-4268-b513-d2309571ac20" (UID: "603c9f42-93c4-4268-b513-d2309571ac20"). InnerVolumeSpecName "kube-api-access-mjclj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.320137 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4886348b-6078-41dc-8fab-a8e2e1c4898d","Type":"ContainerDied","Data":"6b733f58820b28605f33186c29501971ea92d7a0b8029f812c88f4b5fc33485f"} Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.320262 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.329185 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4886348b-6078-41dc-8fab-a8e2e1c4898d-kube-api-access-lffj7" (OuterVolumeSpecName: "kube-api-access-lffj7") pod "4886348b-6078-41dc-8fab-a8e2e1c4898d" (UID: "4886348b-6078-41dc-8fab-a8e2e1c4898d"). InnerVolumeSpecName "kube-api-access-lffj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.337039 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-kube-api-access-tmp5k" (OuterVolumeSpecName: "kube-api-access-tmp5k") pod "d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194" (UID: "d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194"). InnerVolumeSpecName "kube-api-access-tmp5k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.347885 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-config-data" (OuterVolumeSpecName: "config-data") pod "56625c99-64dc-4742-9927-0210d8fe8d9d" (UID: "56625c99-64dc-4742-9927-0210d8fe8d9d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.348901 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-scripts" (OuterVolumeSpecName: "scripts") pod "d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194" (UID: "d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.351675 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-scripts" (OuterVolumeSpecName: "scripts") pod "603c9f42-93c4-4268-b513-d2309571ac20" (UID: "603c9f42-93c4-4268-b513-d2309571ac20"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.381938 4933 generic.go:334] "Generic (PLEG): container finished" podID="b4c8b893-2e30-4273-bbec-7ff7efee686e" containerID="80ec94b4b71bb2bb6fa66b1913564ba57af12174724640c7e8888c5d410e87a8" exitCode=0 Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.382084 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.382057 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b4c8b893-2e30-4273-bbec-7ff7efee686e","Type":"ContainerDied","Data":"80ec94b4b71bb2bb6fa66b1913564ba57af12174724640c7e8888c5d410e87a8"} Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.382134 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b4c8b893-2e30-4273-bbec-7ff7efee686e","Type":"ContainerDied","Data":"3daf3c9a01e2b97020adb3f2480965ebf20834cf860553b875b1f8a5e46cb9a5"} Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.406476 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "603c9f42-93c4-4268-b513-d2309571ac20" (UID: "603c9f42-93c4-4268-b513-d2309571ac20"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.406548 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4c8b893-2e30-4273-bbec-7ff7efee686e-kube-api-access-clvhb" (OuterVolumeSpecName: "kube-api-access-clvhb") pod "b4c8b893-2e30-4273-bbec-7ff7efee686e" (UID: "b4c8b893-2e30-4273-bbec-7ff7efee686e"). InnerVolumeSpecName "kube-api-access-clvhb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.411064 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-scripts\") pod \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.411282 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data\") pod \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.411302 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-etc-machine-id\") pod \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.412527 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wzz4j\" (UniqueName: \"kubernetes.io/projected/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-kube-api-access-wzz4j\") pod \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.412604 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-combined-ca-bundle\") pod \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.412631 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data-custom\") pod \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\" (UID: \"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.413158 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lffj7\" (UniqueName: \"kubernetes.io/projected/4886348b-6078-41dc-8fab-a8e2e1c4898d-kube-api-access-lffj7\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.413174 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.413182 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.413192 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.413201 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.413219 4933 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.413371 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-clvhb\" (UniqueName: \"kubernetes.io/projected/b4c8b893-2e30-4273-bbec-7ff7efee686e-kube-api-access-clvhb\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.413385 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmp5k\" (UniqueName: \"kubernetes.io/projected/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-kube-api-access-tmp5k\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.413393 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mjclj\" (UniqueName: \"kubernetes.io/projected/603c9f42-93c4-4268-b513-d2309571ac20-kube-api-access-mjclj\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.413402 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.422766 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9" (UID: "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.431062 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9","Type":"ContainerDied","Data":"2327536d5ea9d48fbf38777fee202924a5b1fae43f6ea007a86d7bd093d310c4"} Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.431206 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.434390 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-575b89575b-kkrzb" podUID="91864da0-319b-46e9-b4ef-8ccee4c52d37" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.165:9311/healthcheck\": read tcp 10.217.0.2:36158->10.217.0.165:9311: read: connection reset by peer" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.434513 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-575b89575b-kkrzb" podUID="91864da0-319b-46e9-b4ef-8ccee4c52d37" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.165:9311/healthcheck\": read tcp 10.217.0.2:36164->10.217.0.165:9311: read: connection reset by peer" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.447252 4933 scope.go:117] "RemoveContainer" containerID="b6f0f60869caf1ac1eeabf3cf5ea90ed5b94691c40db36a9d775cc676248cd2f" Jan 22 06:09:14 crc kubenswrapper[4933]: E0122 06:09:14.448178 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6f0f60869caf1ac1eeabf3cf5ea90ed5b94691c40db36a9d775cc676248cd2f\": container with ID starting with b6f0f60869caf1ac1eeabf3cf5ea90ed5b94691c40db36a9d775cc676248cd2f not found: ID does not exist" containerID="b6f0f60869caf1ac1eeabf3cf5ea90ed5b94691c40db36a9d775cc676248cd2f" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.448207 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6f0f60869caf1ac1eeabf3cf5ea90ed5b94691c40db36a9d775cc676248cd2f"} err="failed to get container status \"b6f0f60869caf1ac1eeabf3cf5ea90ed5b94691c40db36a9d775cc676248cd2f\": rpc error: code = NotFound desc = could not find container \"b6f0f60869caf1ac1eeabf3cf5ea90ed5b94691c40db36a9d775cc676248cd2f\": container with ID starting with b6f0f60869caf1ac1eeabf3cf5ea90ed5b94691c40db36a9d775cc676248cd2f not found: ID does not exist" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.448228 4933 scope.go:117] "RemoveContainer" containerID="a37bad5d083026a83896f1bbe2715cb126a4a56637c064f44bca0ac67965f6f0" Jan 22 06:09:14 crc kubenswrapper[4933]: E0122 06:09:14.450633 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a37bad5d083026a83896f1bbe2715cb126a4a56637c064f44bca0ac67965f6f0\": container with ID starting with a37bad5d083026a83896f1bbe2715cb126a4a56637c064f44bca0ac67965f6f0 not found: ID does not exist" containerID="a37bad5d083026a83896f1bbe2715cb126a4a56637c064f44bca0ac67965f6f0" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.450696 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a37bad5d083026a83896f1bbe2715cb126a4a56637c064f44bca0ac67965f6f0"} err="failed to get container status \"a37bad5d083026a83896f1bbe2715cb126a4a56637c064f44bca0ac67965f6f0\": rpc error: code = NotFound desc = could not find container \"a37bad5d083026a83896f1bbe2715cb126a4a56637c064f44bca0ac67965f6f0\": container with ID starting with a37bad5d083026a83896f1bbe2715cb126a4a56637c064f44bca0ac67965f6f0 not found: ID does not exist" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.450714 4933 scope.go:117] "RemoveContainer" containerID="9656bc111246901626a3ae6303bb9f3055280024dabd06af3b590f5dad87fafe" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.452342 4933 generic.go:334] "Generic (PLEG): container finished" podID="603c9f42-93c4-4268-b513-d2309571ac20" containerID="a5442678c4d8db8a7d32c5e8c0b8ef799742f21cd0c0918ab7cc4e0588f933fc" exitCode=0 Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.452403 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"603c9f42-93c4-4268-b513-d2309571ac20","Type":"ContainerDied","Data":"a5442678c4d8db8a7d32c5e8c0b8ef799742f21cd0c0918ab7cc4e0588f933fc"} Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.452429 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"603c9f42-93c4-4268-b513-d2309571ac20","Type":"ContainerDied","Data":"8aba54981644ff82dc6633776e219d26daca6577f1495bfce8dc2469b5517867"} Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.452480 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.456727 4933 generic.go:334] "Generic (PLEG): container finished" podID="e0d0b42d-5a68-46ba-a0de-f26c8dab1af8" containerID="c8b3c82c4b888183709e4c809da692746a4eb74efe1e21ce5e8fbd16fb29ade5" exitCode=0 Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.456794 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8","Type":"ContainerDied","Data":"c8b3c82c4b888183709e4c809da692746a4eb74efe1e21ce5e8fbd16fb29ade5"} Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.464783 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-scripts" (OuterVolumeSpecName: "scripts") pod "b4c8b893-2e30-4273-bbec-7ff7efee686e" (UID: "b4c8b893-2e30-4273-bbec-7ff7efee686e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.465153 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "603c9f42-93c4-4268-b513-d2309571ac20" (UID: "603c9f42-93c4-4268-b513-d2309571ac20"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.465444 4933 generic.go:334] "Generic (PLEG): container finished" podID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerID="7759c053d18a0cfde4caa05a9fcd933f06f5e1f05af3297873fc5f9ea3a8ae61" exitCode=0 Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.465554 4933 generic.go:334] "Generic (PLEG): container finished" podID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerID="1efb8a0dd4a5e96297be6dd03c40644d0d87af777a8a5b3ce08724ee8ab4e337" exitCode=2 Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.465628 4933 generic.go:334] "Generic (PLEG): container finished" podID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerID="baeb972a644576e51e82e73e63e77cc4fc3796f87a5f81af36bd84230ffbbb39" exitCode=0 Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.465753 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zqzbb" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.466721 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8","Type":"ContainerDied","Data":"7759c053d18a0cfde4caa05a9fcd933f06f5e1f05af3297873fc5f9ea3a8ae61"} Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.466832 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8","Type":"ContainerDied","Data":"1efb8a0dd4a5e96297be6dd03c40644d0d87af777a8a5b3ce08724ee8ab4e337"} Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.466916 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8","Type":"ContainerDied","Data":"baeb972a644576e51e82e73e63e77cc4fc3796f87a5f81af36bd84230ffbbb39"} Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.467024 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dc32-account-create-update-26chh" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.478713 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-e75b-account-create-update-tmgdt"] Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.479435 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zqzbb" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.489592 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-scripts" (OuterVolumeSpecName: "scripts") pod "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9" (UID: "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.489630 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-kube-api-access-wzz4j" (OuterVolumeSpecName: "kube-api-access-wzz4j") pod "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9" (UID: "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9"). InnerVolumeSpecName "kube-api-access-wzz4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.492983 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9" (UID: "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.510036 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17180ad8-5646-46f7-ad5a-75608d596672" path="/var/lib/kubelet/pods/17180ad8-5646-46f7-ad5a-75608d596672/volumes" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.510720 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b555a2a-0c59-4b4c-901e-6b160e5d57a1" path="/var/lib/kubelet/pods/1b555a2a-0c59-4b4c-901e-6b160e5d57a1/volumes" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.511486 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7" path="/var/lib/kubelet/pods/2c6cfc4a-371c-496c-93dc-cdd3ea57e2d7/volumes" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.512292 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62dded9b-a123-4bd0-ab4c-8de7680be023" path="/var/lib/kubelet/pods/62dded9b-a123-4bd0-ab4c-8de7680be023/volumes" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.512817 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="729db25e-5864-4305-99dd-24ce61f45029" path="/var/lib/kubelet/pods/729db25e-5864-4305-99dd-24ce61f45029/volumes" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.513374 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="943da5ba-d325-4686-871d-802b7730d02a" path="/var/lib/kubelet/pods/943da5ba-d325-4686-871d-802b7730d02a/volumes" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.514420 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9cb8166a-621e-4e96-b4e7-4d4fca32a727" path="/var/lib/kubelet/pods/9cb8166a-621e-4e96-b4e7-4d4fca32a727/volumes" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.514865 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a944aa66-1c67-4661-968f-e976494cf1eb" path="/var/lib/kubelet/pods/a944aa66-1c67-4661-968f-e976494cf1eb/volumes" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.515503 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd2884cc-3d1a-4865-a232-16c459d0372d" path="/var/lib/kubelet/pods/bd2884cc-3d1a-4865-a232-16c459d0372d/volumes" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.516028 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2022299-4089-4740-ae8f-50ca5b4be2b5" path="/var/lib/kubelet/pods/c2022299-4089-4740-ae8f-50ca5b4be2b5/volumes" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.516928 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e581b2e9-57f2-452a-956c-4e8c5d75b3fb" path="/var/lib/kubelet/pods/e581b2e9-57f2-452a-956c-4e8c5d75b3fb/volumes" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.517278 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrm5j\" (UniqueName: \"kubernetes.io/projected/67bcc01e-7a5e-4f76-889e-d6ed60745cf4-kube-api-access-vrm5j\") pod \"root-account-create-update-zqzbb\" (UID: \"67bcc01e-7a5e-4f76-889e-d6ed60745cf4\") " pod="openstack/root-account-create-update-zqzbb" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.517354 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb88ea31-d557-48b6-82b9-7e3843f9935c" path="/var/lib/kubelet/pods/eb88ea31-d557-48b6-82b9-7e3843f9935c/volumes" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.517443 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wzz4j\" (UniqueName: \"kubernetes.io/projected/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-kube-api-access-wzz4j\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.517456 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.517464 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.517472 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.517481 4933 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.517489 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: E0122 06:09:14.520534 4933 projected.go:194] Error preparing data for projected volume kube-api-access-vrm5j for pod openstack/root-account-create-update-zqzbb: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 22 06:09:14 crc kubenswrapper[4933]: E0122 06:09:14.520655 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/67bcc01e-7a5e-4f76-889e-d6ed60745cf4-kube-api-access-vrm5j podName:67bcc01e-7a5e-4f76-889e-d6ed60745cf4 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:16.520618144 +0000 UTC m=+1404.357743497 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-vrm5j" (UniqueName: "kubernetes.io/projected/67bcc01e-7a5e-4f76-889e-d6ed60745cf4-kube-api-access-vrm5j") pod "root-account-create-update-zqzbb" (UID: "67bcc01e-7a5e-4f76-889e-d6ed60745cf4") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.536931 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b4c8b893-2e30-4273-bbec-7ff7efee686e" (UID: "b4c8b893-2e30-4273-bbec-7ff7efee686e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.560779 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4886348b-6078-41dc-8fab-a8e2e1c4898d-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "4886348b-6078-41dc-8fab-a8e2e1c4898d" (UID: "4886348b-6078-41dc-8fab-a8e2e1c4898d"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.562120 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "56625c99-64dc-4742-9927-0210d8fe8d9d" (UID: "56625c99-64dc-4742-9927-0210d8fe8d9d"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.570872 4933 scope.go:117] "RemoveContainer" containerID="9ef7106268a25e14e2c1b4b8ae2a3405385542e15442cd78aec71a27dca326ba" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.572175 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "56625c99-64dc-4742-9927-0210d8fe8d9d" (UID: "56625c99-64dc-4742-9927-0210d8fe8d9d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.578322 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b4c8b893-2e30-4273-bbec-7ff7efee686e" (UID: "b4c8b893-2e30-4273-bbec-7ff7efee686e"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.588776 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194" (UID: "d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.599744 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-config-data" (OuterVolumeSpecName: "config-data") pod "603c9f42-93c4-4268-b513-d2309571ac20" (UID: "603c9f42-93c4-4268-b513-d2309571ac20"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.601436 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dc32-account-create-update-26chh" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.607827 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "603c9f42-93c4-4268-b513-d2309571ac20" (UID: "603c9f42-93c4-4268-b513-d2309571ac20"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.611764 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "56625c99-64dc-4742-9927-0210d8fe8d9d" (UID: "56625c99-64dc-4742-9927-0210d8fe8d9d"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.614569 4933 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.618613 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/67bcc01e-7a5e-4f76-889e-d6ed60745cf4-operator-scripts\") pod \"67bcc01e-7a5e-4f76-889e-d6ed60745cf4\" (UID: \"67bcc01e-7a5e-4f76-889e-d6ed60745cf4\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.619616 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67bcc01e-7a5e-4f76-889e-d6ed60745cf4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "67bcc01e-7a5e-4f76-889e-d6ed60745cf4" (UID: "67bcc01e-7a5e-4f76-889e-d6ed60745cf4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.620314 4933 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.620334 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.620347 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/67bcc01e-7a5e-4f76-889e-d6ed60745cf4-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.620359 4933 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.620370 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.620382 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.620392 4933 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/56625c99-64dc-4742-9927-0210d8fe8d9d-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.620402 4933 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/4886348b-6078-41dc-8fab-a8e2e1c4898d-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.620413 4933 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.620423 4933 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.620435 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.628107 4933 scope.go:117] "RemoveContainer" containerID="9656bc111246901626a3ae6303bb9f3055280024dabd06af3b590f5dad87fafe" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.631766 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-e75b-account-create-update-tmgdt"] Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.631816 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-kp2cn"] Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.631827 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-kp2cn"] Jan 22 06:09:14 crc kubenswrapper[4933]: E0122 06:09:14.632605 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9656bc111246901626a3ae6303bb9f3055280024dabd06af3b590f5dad87fafe\": container with ID starting with 9656bc111246901626a3ae6303bb9f3055280024dabd06af3b590f5dad87fafe not found: ID does not exist" containerID="9656bc111246901626a3ae6303bb9f3055280024dabd06af3b590f5dad87fafe" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.632639 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9656bc111246901626a3ae6303bb9f3055280024dabd06af3b590f5dad87fafe"} err="failed to get container status \"9656bc111246901626a3ae6303bb9f3055280024dabd06af3b590f5dad87fafe\": rpc error: code = NotFound desc = could not find container \"9656bc111246901626a3ae6303bb9f3055280024dabd06af3b590f5dad87fafe\": container with ID starting with 9656bc111246901626a3ae6303bb9f3055280024dabd06af3b590f5dad87fafe not found: ID does not exist" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.632660 4933 scope.go:117] "RemoveContainer" containerID="9ef7106268a25e14e2c1b4b8ae2a3405385542e15442cd78aec71a27dca326ba" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.632798 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-config-data" (OuterVolumeSpecName: "config-data") pod "d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194" (UID: "d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: E0122 06:09:14.637393 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ef7106268a25e14e2c1b4b8ae2a3405385542e15442cd78aec71a27dca326ba\": container with ID starting with 9ef7106268a25e14e2c1b4b8ae2a3405385542e15442cd78aec71a27dca326ba not found: ID does not exist" containerID="9ef7106268a25e14e2c1b4b8ae2a3405385542e15442cd78aec71a27dca326ba" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.637424 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ef7106268a25e14e2c1b4b8ae2a3405385542e15442cd78aec71a27dca326ba"} err="failed to get container status \"9ef7106268a25e14e2c1b4b8ae2a3405385542e15442cd78aec71a27dca326ba\": rpc error: code = NotFound desc = could not find container \"9ef7106268a25e14e2c1b4b8ae2a3405385542e15442cd78aec71a27dca326ba\": container with ID starting with 9ef7106268a25e14e2c1b4b8ae2a3405385542e15442cd78aec71a27dca326ba not found: ID does not exist" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.637445 4933 scope.go:117] "RemoveContainer" containerID="35c37c5d066e31beb5f68171ba88451347a51fbe1f8550fd13c3cdca949ee224" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.638463 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.639089 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.639544 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.642404 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4886348b-6078-41dc-8fab-a8e2e1c4898d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4886348b-6078-41dc-8fab-a8e2e1c4898d" (UID: "4886348b-6078-41dc-8fab-a8e2e1c4898d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.643618 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9" (UID: "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.672486 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "603c9f42-93c4-4268-b513-d2309571ac20" (UID: "603c9f42-93c4-4268-b513-d2309571ac20"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.673390 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4886348b-6078-41dc-8fab-a8e2e1c4898d-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "4886348b-6078-41dc-8fab-a8e2e1c4898d" (UID: "4886348b-6078-41dc-8fab-a8e2e1c4898d"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.682734 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-config-data" (OuterVolumeSpecName: "config-data") pod "b4c8b893-2e30-4273-bbec-7ff7efee686e" (UID: "b4c8b893-2e30-4273-bbec-7ff7efee686e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.683959 4933 scope.go:117] "RemoveContainer" containerID="80ec94b4b71bb2bb6fa66b1913564ba57af12174724640c7e8888c5d410e87a8" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.699707 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194" (UID: "d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.712326 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194" (UID: "d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.720992 4933 scope.go:117] "RemoveContainer" containerID="99672693509f4dfe6f517b698528ee0345ec420a893d4e6ab3cda20b8fb397ea" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.721849 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtbjc\" (UniqueName: \"kubernetes.io/projected/833ac15e-6498-4beb-a0da-b8e600653e3e-kube-api-access-jtbjc\") pod \"keystone-dc32-account-create-update-26chh\" (UID: \"833ac15e-6498-4beb-a0da-b8e600653e3e\") " pod="openstack/keystone-dc32-account-create-update-26chh" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.721941 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/833ac15e-6498-4beb-a0da-b8e600653e3e-operator-scripts\") pod \"keystone-dc32-account-create-update-26chh\" (UID: \"833ac15e-6498-4beb-a0da-b8e600653e3e\") " pod="openstack/keystone-dc32-account-create-update-26chh" Jan 22 06:09:14 crc kubenswrapper[4933]: E0122 06:09:14.722015 4933 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 22 06:09:14 crc kubenswrapper[4933]: E0122 06:09:14.722092 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/833ac15e-6498-4beb-a0da-b8e600653e3e-operator-scripts podName:833ac15e-6498-4beb-a0da-b8e600653e3e nodeName:}" failed. No retries permitted until 2026-01-22 06:09:16.722058901 +0000 UTC m=+1404.559184254 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/833ac15e-6498-4beb-a0da-b8e600653e3e-operator-scripts") pod "keystone-dc32-account-create-update-26chh" (UID: "833ac15e-6498-4beb-a0da-b8e600653e3e") : configmap "openstack-scripts" not found Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.722161 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.722175 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4886348b-6078-41dc-8fab-a8e2e1c4898d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.722187 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.722200 4933 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/4886348b-6078-41dc-8fab-a8e2e1c4898d-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.722211 4933 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.722221 4933 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/603c9f42-93c4-4268-b513-d2309571ac20-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.722232 4933 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.722242 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4c8b893-2e30-4273-bbec-7ff7efee686e-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: E0122 06:09:14.725031 4933 projected.go:194] Error preparing data for projected volume kube-api-access-jtbjc for pod openstack/keystone-dc32-account-create-update-26chh: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 22 06:09:14 crc kubenswrapper[4933]: E0122 06:09:14.725103 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/833ac15e-6498-4beb-a0da-b8e600653e3e-kube-api-access-jtbjc podName:833ac15e-6498-4beb-a0da-b8e600653e3e nodeName:}" failed. No retries permitted until 2026-01-22 06:09:16.725064095 +0000 UTC m=+1404.562189448 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-jtbjc" (UniqueName: "kubernetes.io/projected/833ac15e-6498-4beb-a0da-b8e600653e3e-kube-api-access-jtbjc") pod "keystone-dc32-account-create-update-26chh" (UID: "833ac15e-6498-4beb-a0da-b8e600653e3e") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.741947 4933 scope.go:117] "RemoveContainer" containerID="80ec94b4b71bb2bb6fa66b1913564ba57af12174724640c7e8888c5d410e87a8" Jan 22 06:09:14 crc kubenswrapper[4933]: E0122 06:09:14.742436 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80ec94b4b71bb2bb6fa66b1913564ba57af12174724640c7e8888c5d410e87a8\": container with ID starting with 80ec94b4b71bb2bb6fa66b1913564ba57af12174724640c7e8888c5d410e87a8 not found: ID does not exist" containerID="80ec94b4b71bb2bb6fa66b1913564ba57af12174724640c7e8888c5d410e87a8" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.742481 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80ec94b4b71bb2bb6fa66b1913564ba57af12174724640c7e8888c5d410e87a8"} err="failed to get container status \"80ec94b4b71bb2bb6fa66b1913564ba57af12174724640c7e8888c5d410e87a8\": rpc error: code = NotFound desc = could not find container \"80ec94b4b71bb2bb6fa66b1913564ba57af12174724640c7e8888c5d410e87a8\": container with ID starting with 80ec94b4b71bb2bb6fa66b1913564ba57af12174724640c7e8888c5d410e87a8 not found: ID does not exist" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.742514 4933 scope.go:117] "RemoveContainer" containerID="99672693509f4dfe6f517b698528ee0345ec420a893d4e6ab3cda20b8fb397ea" Jan 22 06:09:14 crc kubenswrapper[4933]: E0122 06:09:14.742786 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99672693509f4dfe6f517b698528ee0345ec420a893d4e6ab3cda20b8fb397ea\": container with ID starting with 99672693509f4dfe6f517b698528ee0345ec420a893d4e6ab3cda20b8fb397ea not found: ID does not exist" containerID="99672693509f4dfe6f517b698528ee0345ec420a893d4e6ab3cda20b8fb397ea" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.742841 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99672693509f4dfe6f517b698528ee0345ec420a893d4e6ab3cda20b8fb397ea"} err="failed to get container status \"99672693509f4dfe6f517b698528ee0345ec420a893d4e6ab3cda20b8fb397ea\": rpc error: code = NotFound desc = could not find container \"99672693509f4dfe6f517b698528ee0345ec420a893d4e6ab3cda20b8fb397ea\": container with ID starting with 99672693509f4dfe6f517b698528ee0345ec420a893d4e6ab3cda20b8fb397ea not found: ID does not exist" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.742866 4933 scope.go:117] "RemoveContainer" containerID="47d5a47f9c85c2d9dc14b7850a92bcef2cae8e70655cb013dd920c1d39eb6587" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.762525 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_9445b2f3-83ea-4e79-8312-ceffa2208f77/ovn-northd/0.log" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.762598 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.766920 4933 scope.go:117] "RemoveContainer" containerID="e136629b351ca188667a0d656d1460f094277accee28f74429184e904516ae0c" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.772028 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data" (OuterVolumeSpecName: "config-data") pod "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9" (UID: "0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.792349 4933 scope.go:117] "RemoveContainer" containerID="a5442678c4d8db8a7d32c5e8c0b8ef799742f21cd0c0918ab7cc4e0588f933fc" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.811628 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.817742 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.823217 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l2h8w\" (UniqueName: \"kubernetes.io/projected/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-kube-api-access-l2h8w\") pod \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.823244 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fae4840-8fac-4192-8358-cbcae518e70d-config-data\") pod \"6fae4840-8fac-4192-8358-cbcae518e70d\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.823271 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-config-data\") pod \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.823308 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fae4840-8fac-4192-8358-cbcae518e70d-combined-ca-bundle\") pod \"6fae4840-8fac-4192-8358-cbcae518e70d\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.823332 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-logs\") pod \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.823346 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-config-data\") pod \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.823392 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6fae4840-8fac-4192-8358-cbcae518e70d-logs\") pod \"6fae4840-8fac-4192-8358-cbcae518e70d\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.823407 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-internal-tls-certs\") pod \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.823425 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.823568 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x9z6f\" (UniqueName: \"kubernetes.io/projected/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-kube-api-access-x9z6f\") pod \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.823594 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-combined-ca-bundle\") pod \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.823614 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-memcached-tls-certs\") pod \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.823633 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-scripts\") pod \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.823655 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-httpd-run\") pod \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.823688 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6fae4840-8fac-4192-8358-cbcae518e70d-nova-metadata-tls-certs\") pod \"6fae4840-8fac-4192-8358-cbcae518e70d\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.823714 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-combined-ca-bundle\") pod \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\" (UID: \"a2bcbc4b-30c4-4ec8-81bf-6cba18171506\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.823730 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfvqs\" (UniqueName: \"kubernetes.io/projected/6fae4840-8fac-4192-8358-cbcae518e70d-kube-api-access-pfvqs\") pod \"6fae4840-8fac-4192-8358-cbcae518e70d\" (UID: \"6fae4840-8fac-4192-8358-cbcae518e70d\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.823748 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-kolla-config\") pod \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\" (UID: \"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.824057 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.824585 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "e0d0b42d-5a68-46ba-a0de-f26c8dab1af8" (UID: "e0d0b42d-5a68-46ba-a0de-f26c8dab1af8"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.825870 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fae4840-8fac-4192-8358-cbcae518e70d-logs" (OuterVolumeSpecName: "logs") pod "6fae4840-8fac-4192-8358-cbcae518e70d" (UID: "6fae4840-8fac-4192-8358-cbcae518e70d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.825986 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-logs" (OuterVolumeSpecName: "logs") pod "a2bcbc4b-30c4-4ec8-81bf-6cba18171506" (UID: "a2bcbc4b-30c4-4ec8-81bf-6cba18171506"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.829201 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a2bcbc4b-30c4-4ec8-81bf-6cba18171506" (UID: "a2bcbc4b-30c4-4ec8-81bf-6cba18171506"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.829466 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "a2bcbc4b-30c4-4ec8-81bf-6cba18171506" (UID: "a2bcbc4b-30c4-4ec8-81bf-6cba18171506"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.833837 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-scripts" (OuterVolumeSpecName: "scripts") pod "a2bcbc4b-30c4-4ec8-81bf-6cba18171506" (UID: "a2bcbc4b-30c4-4ec8-81bf-6cba18171506"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.835995 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-kube-api-access-l2h8w" (OuterVolumeSpecName: "kube-api-access-l2h8w") pod "e0d0b42d-5a68-46ba-a0de-f26c8dab1af8" (UID: "e0d0b42d-5a68-46ba-a0de-f26c8dab1af8"). InnerVolumeSpecName "kube-api-access-l2h8w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.840405 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-config-data" (OuterVolumeSpecName: "config-data") pod "e0d0b42d-5a68-46ba-a0de-f26c8dab1af8" (UID: "e0d0b42d-5a68-46ba-a0de-f26c8dab1af8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.856283 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fae4840-8fac-4192-8358-cbcae518e70d-kube-api-access-pfvqs" (OuterVolumeSpecName: "kube-api-access-pfvqs") pod "6fae4840-8fac-4192-8358-cbcae518e70d" (UID: "6fae4840-8fac-4192-8358-cbcae518e70d"). InnerVolumeSpecName "kube-api-access-pfvqs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.858486 4933 scope.go:117] "RemoveContainer" containerID="2f820ff473f51e41089b39b460bd1b0e17ef3dee077f8634fc530dbd7212251b" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.860632 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-kube-api-access-x9z6f" (OuterVolumeSpecName: "kube-api-access-x9z6f") pod "a2bcbc4b-30c4-4ec8-81bf-6cba18171506" (UID: "a2bcbc4b-30c4-4ec8-81bf-6cba18171506"). InnerVolumeSpecName "kube-api-access-x9z6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.871715 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.895336 4933 scope.go:117] "RemoveContainer" containerID="a5442678c4d8db8a7d32c5e8c0b8ef799742f21cd0c0918ab7cc4e0588f933fc" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.896455 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 22 06:09:14 crc kubenswrapper[4933]: E0122 06:09:14.897663 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5442678c4d8db8a7d32c5e8c0b8ef799742f21cd0c0918ab7cc4e0588f933fc\": container with ID starting with a5442678c4d8db8a7d32c5e8c0b8ef799742f21cd0c0918ab7cc4e0588f933fc not found: ID does not exist" containerID="a5442678c4d8db8a7d32c5e8c0b8ef799742f21cd0c0918ab7cc4e0588f933fc" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.897705 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5442678c4d8db8a7d32c5e8c0b8ef799742f21cd0c0918ab7cc4e0588f933fc"} err="failed to get container status \"a5442678c4d8db8a7d32c5e8c0b8ef799742f21cd0c0918ab7cc4e0588f933fc\": rpc error: code = NotFound desc = could not find container \"a5442678c4d8db8a7d32c5e8c0b8ef799742f21cd0c0918ab7cc4e0588f933fc\": container with ID starting with a5442678c4d8db8a7d32c5e8c0b8ef799742f21cd0c0918ab7cc4e0588f933fc not found: ID does not exist" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.897735 4933 scope.go:117] "RemoveContainer" containerID="2f820ff473f51e41089b39b460bd1b0e17ef3dee077f8634fc530dbd7212251b" Jan 22 06:09:14 crc kubenswrapper[4933]: E0122 06:09:14.898845 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f820ff473f51e41089b39b460bd1b0e17ef3dee077f8634fc530dbd7212251b\": container with ID starting with 2f820ff473f51e41089b39b460bd1b0e17ef3dee077f8634fc530dbd7212251b not found: ID does not exist" containerID="2f820ff473f51e41089b39b460bd1b0e17ef3dee077f8634fc530dbd7212251b" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.898912 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f820ff473f51e41089b39b460bd1b0e17ef3dee077f8634fc530dbd7212251b"} err="failed to get container status \"2f820ff473f51e41089b39b460bd1b0e17ef3dee077f8634fc530dbd7212251b\": rpc error: code = NotFound desc = could not find container \"2f820ff473f51e41089b39b460bd1b0e17ef3dee077f8634fc530dbd7212251b\": container with ID starting with 2f820ff473f51e41089b39b460bd1b0e17ef3dee077f8634fc530dbd7212251b not found: ID does not exist" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.901202 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a2bcbc4b-30c4-4ec8-81bf-6cba18171506" (UID: "a2bcbc4b-30c4-4ec8-81bf-6cba18171506"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.927130 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-shsz5\" (UniqueName: \"kubernetes.io/projected/9445b2f3-83ea-4e79-8312-ceffa2208f77-kube-api-access-shsz5\") pod \"9445b2f3-83ea-4e79-8312-ceffa2208f77\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.927206 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9445b2f3-83ea-4e79-8312-ceffa2208f77-metrics-certs-tls-certs\") pod \"9445b2f3-83ea-4e79-8312-ceffa2208f77\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.927273 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/9445b2f3-83ea-4e79-8312-ceffa2208f77-ovn-northd-tls-certs\") pod \"9445b2f3-83ea-4e79-8312-ceffa2208f77\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.927350 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9445b2f3-83ea-4e79-8312-ceffa2208f77-ovn-rundir\") pod \"9445b2f3-83ea-4e79-8312-ceffa2208f77\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.927422 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9445b2f3-83ea-4e79-8312-ceffa2208f77-config\") pod \"9445b2f3-83ea-4e79-8312-ceffa2208f77\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.927441 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9445b2f3-83ea-4e79-8312-ceffa2208f77-scripts\") pod \"9445b2f3-83ea-4e79-8312-ceffa2208f77\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.927464 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9445b2f3-83ea-4e79-8312-ceffa2208f77-combined-ca-bundle\") pod \"9445b2f3-83ea-4e79-8312-ceffa2208f77\" (UID: \"9445b2f3-83ea-4e79-8312-ceffa2208f77\") " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.927844 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x9z6f\" (UniqueName: \"kubernetes.io/projected/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-kube-api-access-x9z6f\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.927857 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.927866 4933 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.927874 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.927882 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfvqs\" (UniqueName: \"kubernetes.io/projected/6fae4840-8fac-4192-8358-cbcae518e70d-kube-api-access-pfvqs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.927892 4933 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.927900 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l2h8w\" (UniqueName: \"kubernetes.io/projected/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-kube-api-access-l2h8w\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.927908 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.927915 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.927923 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6fae4840-8fac-4192-8358-cbcae518e70d-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.927941 4933 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.928495 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9445b2f3-83ea-4e79-8312-ceffa2208f77-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "9445b2f3-83ea-4e79-8312-ceffa2208f77" (UID: "9445b2f3-83ea-4e79-8312-ceffa2208f77"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.929217 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9445b2f3-83ea-4e79-8312-ceffa2208f77-scripts" (OuterVolumeSpecName: "scripts") pod "9445b2f3-83ea-4e79-8312-ceffa2208f77" (UID: "9445b2f3-83ea-4e79-8312-ceffa2208f77"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.929340 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9445b2f3-83ea-4e79-8312-ceffa2208f77-config" (OuterVolumeSpecName: "config") pod "9445b2f3-83ea-4e79-8312-ceffa2208f77" (UID: "9445b2f3-83ea-4e79-8312-ceffa2208f77"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.956373 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9445b2f3-83ea-4e79-8312-ceffa2208f77-kube-api-access-shsz5" (OuterVolumeSpecName: "kube-api-access-shsz5") pod "9445b2f3-83ea-4e79-8312-ceffa2208f77" (UID: "9445b2f3-83ea-4e79-8312-ceffa2208f77"). InnerVolumeSpecName "kube-api-access-shsz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.988539 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e0d0b42d-5a68-46ba-a0de-f26c8dab1af8" (UID: "e0d0b42d-5a68-46ba-a0de-f26c8dab1af8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.992533 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fae4840-8fac-4192-8358-cbcae518e70d-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "6fae4840-8fac-4192-8358-cbcae518e70d" (UID: "6fae4840-8fac-4192-8358-cbcae518e70d"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:14 crc kubenswrapper[4933]: I0122 06:09:14.994242 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "a2bcbc4b-30c4-4ec8-81bf-6cba18171506" (UID: "a2bcbc4b-30c4-4ec8-81bf-6cba18171506"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.009191 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fae4840-8fac-4192-8358-cbcae518e70d-config-data" (OuterVolumeSpecName: "config-data") pod "6fae4840-8fac-4192-8358-cbcae518e70d" (UID: "6fae4840-8fac-4192-8358-cbcae518e70d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.023653 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9445b2f3-83ea-4e79-8312-ceffa2208f77-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9445b2f3-83ea-4e79-8312-ceffa2208f77" (UID: "9445b2f3-83ea-4e79-8312-ceffa2208f77"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.027582 4933 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.029475 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.029508 4933 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9445b2f3-83ea-4e79-8312-ceffa2208f77-ovn-rundir\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.029520 4933 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6fae4840-8fac-4192-8358-cbcae518e70d-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.029533 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9445b2f3-83ea-4e79-8312-ceffa2208f77-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.029545 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9445b2f3-83ea-4e79-8312-ceffa2208f77-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.029559 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9445b2f3-83ea-4e79-8312-ceffa2208f77-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.029571 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fae4840-8fac-4192-8358-cbcae518e70d-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.029584 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-shsz5\" (UniqueName: \"kubernetes.io/projected/9445b2f3-83ea-4e79-8312-ceffa2208f77-kube-api-access-shsz5\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.029595 4933 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.029606 4933 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.032790 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fae4840-8fac-4192-8358-cbcae518e70d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6fae4840-8fac-4192-8358-cbcae518e70d" (UID: "6fae4840-8fac-4192-8358-cbcae518e70d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.048774 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "e0d0b42d-5a68-46ba-a0de-f26c8dab1af8" (UID: "e0d0b42d-5a68-46ba-a0de-f26c8dab1af8"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.052864 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-config-data" (OuterVolumeSpecName: "config-data") pod "a2bcbc4b-30c4-4ec8-81bf-6cba18171506" (UID: "a2bcbc4b-30c4-4ec8-81bf-6cba18171506"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.063325 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="47299478-bcfd-4f21-a56c-efcf7b167999" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.102:5671: connect: connection refused" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.094722 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9445b2f3-83ea-4e79-8312-ceffa2208f77-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "9445b2f3-83ea-4e79-8312-ceffa2208f77" (UID: "9445b2f3-83ea-4e79-8312-ceffa2208f77"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.101230 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9445b2f3-83ea-4e79-8312-ceffa2208f77-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "9445b2f3-83ea-4e79-8312-ceffa2208f77" (UID: "9445b2f3-83ea-4e79-8312-ceffa2208f77"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.102760 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="4d712958-1ece-47de-9798-6e852b03c565" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.103:5671: connect: connection refused" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.131882 4933 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9445b2f3-83ea-4e79-8312-ceffa2208f77-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.131912 4933 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/9445b2f3-83ea-4e79-8312-ceffa2208f77-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.131923 4933 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.131935 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fae4840-8fac-4192-8358-cbcae518e70d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.131945 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2bcbc4b-30c4-4ec8-81bf-6cba18171506-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: E0122 06:09:15.173141 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6 is running failed: container process not found" containerID="38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 22 06:09:15 crc kubenswrapper[4933]: E0122 06:09:15.173373 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6 is running failed: container process not found" containerID="38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.173948 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:09:15 crc kubenswrapper[4933]: E0122 06:09:15.174349 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6 is running failed: container process not found" containerID="38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 22 06:09:15 crc kubenswrapper[4933]: E0122 06:09:15.174388 4933 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="2a218455-793d-4ccf-880a-d89b28e98b2d" containerName="nova-cell1-conductor-conductor" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.209041 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-566788757d-gkrdt"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.211793 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.218917 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-566788757d-gkrdt"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.231588 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.241765 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.253506 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.260758 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.273366 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.283029 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.290986 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.335409 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-combined-ca-bundle\") pod \"91864da0-319b-46e9-b4ef-8ccee4c52d37\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.335476 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-29k5l\" (UniqueName: \"kubernetes.io/projected/91864da0-319b-46e9-b4ef-8ccee4c52d37-kube-api-access-29k5l\") pod \"91864da0-319b-46e9-b4ef-8ccee4c52d37\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.335515 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-config-data-custom\") pod \"91864da0-319b-46e9-b4ef-8ccee4c52d37\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.335535 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-config-data\") pod \"91864da0-319b-46e9-b4ef-8ccee4c52d37\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.335557 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-internal-tls-certs\") pod \"91864da0-319b-46e9-b4ef-8ccee4c52d37\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.335590 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grgk4\" (UniqueName: \"kubernetes.io/projected/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-kube-api-access-grgk4\") pod \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.335632 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-ceilometer-tls-certs\") pod \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.335653 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-run-httpd\") pod \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.335692 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-combined-ca-bundle\") pod \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.335751 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91864da0-319b-46e9-b4ef-8ccee4c52d37-logs\") pod \"91864da0-319b-46e9-b4ef-8ccee4c52d37\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.335767 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-log-httpd\") pod \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.335786 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-public-tls-certs\") pod \"91864da0-319b-46e9-b4ef-8ccee4c52d37\" (UID: \"91864da0-319b-46e9-b4ef-8ccee4c52d37\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.335810 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-sg-core-conf-yaml\") pod \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.335839 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-scripts\") pod \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.335858 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-config-data\") pod \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\" (UID: \"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.336798 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" (UID: "578144dd-08a3-4c4b-8dd3-38ebe2d4dde8"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.337055 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" (UID: "578144dd-08a3-4c4b-8dd3-38ebe2d4dde8"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.338754 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91864da0-319b-46e9-b4ef-8ccee4c52d37-logs" (OuterVolumeSpecName: "logs") pod "91864da0-319b-46e9-b4ef-8ccee4c52d37" (UID: "91864da0-319b-46e9-b4ef-8ccee4c52d37"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.340258 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "91864da0-319b-46e9-b4ef-8ccee4c52d37" (UID: "91864da0-319b-46e9-b4ef-8ccee4c52d37"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.344672 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-scripts" (OuterVolumeSpecName: "scripts") pod "578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" (UID: "578144dd-08a3-4c4b-8dd3-38ebe2d4dde8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.346320 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-kube-api-access-grgk4" (OuterVolumeSpecName: "kube-api-access-grgk4") pod "578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" (UID: "578144dd-08a3-4c4b-8dd3-38ebe2d4dde8"). InnerVolumeSpecName "kube-api-access-grgk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.352856 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91864da0-319b-46e9-b4ef-8ccee4c52d37-kube-api-access-29k5l" (OuterVolumeSpecName: "kube-api-access-29k5l") pod "91864da0-319b-46e9-b4ef-8ccee4c52d37" (UID: "91864da0-319b-46e9-b4ef-8ccee4c52d37"). InnerVolumeSpecName "kube-api-access-29k5l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.364327 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" (UID: "578144dd-08a3-4c4b-8dd3-38ebe2d4dde8"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.370173 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "91864da0-319b-46e9-b4ef-8ccee4c52d37" (UID: "91864da0-319b-46e9-b4ef-8ccee4c52d37"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.407085 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "91864da0-319b-46e9-b4ef-8ccee4c52d37" (UID: "91864da0-319b-46e9-b4ef-8ccee4c52d37"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.419148 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" (UID: "578144dd-08a3-4c4b-8dd3-38ebe2d4dde8"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.445699 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" (UID: "578144dd-08a3-4c4b-8dd3-38ebe2d4dde8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.446310 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "91864da0-319b-46e9-b4ef-8ccee4c52d37" (UID: "91864da0-319b-46e9-b4ef-8ccee4c52d37"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.447126 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-config-data" (OuterVolumeSpecName: "config-data") pod "91864da0-319b-46e9-b4ef-8ccee4c52d37" (UID: "91864da0-319b-46e9-b4ef-8ccee4c52d37"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.447942 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a218455-793d-4ccf-880a-d89b28e98b2d-combined-ca-bundle\") pod \"2a218455-793d-4ccf-880a-d89b28e98b2d\" (UID: \"2a218455-793d-4ccf-880a-d89b28e98b2d\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.447985 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kp2dj\" (UniqueName: \"kubernetes.io/projected/2a218455-793d-4ccf-880a-d89b28e98b2d-kube-api-access-kp2dj\") pod \"2a218455-793d-4ccf-880a-d89b28e98b2d\" (UID: \"2a218455-793d-4ccf-880a-d89b28e98b2d\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.448037 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a218455-793d-4ccf-880a-d89b28e98b2d-config-data\") pod \"2a218455-793d-4ccf-880a-d89b28e98b2d\" (UID: \"2a218455-793d-4ccf-880a-d89b28e98b2d\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.448880 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/91864da0-319b-46e9-b4ef-8ccee4c52d37-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.448898 4933 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.448913 4933 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.448924 4933 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.448932 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.448941 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.448953 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-29k5l\" (UniqueName: \"kubernetes.io/projected/91864da0-319b-46e9-b4ef-8ccee4c52d37-kube-api-access-29k5l\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.448961 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.448970 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.448978 4933 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/91864da0-319b-46e9-b4ef-8ccee4c52d37-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.448989 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-grgk4\" (UniqueName: \"kubernetes.io/projected/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-kube-api-access-grgk4\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.449000 4933 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.449009 4933 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.449017 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.450521 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a218455-793d-4ccf-880a-d89b28e98b2d-kube-api-access-kp2dj" (OuterVolumeSpecName: "kube-api-access-kp2dj") pod "2a218455-793d-4ccf-880a-d89b28e98b2d" (UID: "2a218455-793d-4ccf-880a-d89b28e98b2d"). InnerVolumeSpecName "kube-api-access-kp2dj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.478897 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-config-data" (OuterVolumeSpecName: "config-data") pod "578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" (UID: "578144dd-08a3-4c4b-8dd3-38ebe2d4dde8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.489640 4933 generic.go:334] "Generic (PLEG): container finished" podID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerID="83f50dc0f110857dd2bcd69ddb9eb051bf30ec68e850a625d293c6f90c8de031" exitCode=0 Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.489646 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8","Type":"ContainerDied","Data":"83f50dc0f110857dd2bcd69ddb9eb051bf30ec68e850a625d293c6f90c8de031"} Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.489750 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.489766 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"578144dd-08a3-4c4b-8dd3-38ebe2d4dde8","Type":"ContainerDied","Data":"ab185a8031470817b118de7fce3c92732e6b839c83b25ee26538e0efb0265b02"} Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.489789 4933 scope.go:117] "RemoveContainer" containerID="7759c053d18a0cfde4caa05a9fcd933f06f5e1f05af3297873fc5f9ea3a8ae61" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.493593 4933 generic.go:334] "Generic (PLEG): container finished" podID="91864da0-319b-46e9-b4ef-8ccee4c52d37" containerID="c6536d22d6c93b55c99436f1124a25adb4cae4775413fcbf11c9e30b27a18603" exitCode=0 Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.493684 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-575b89575b-kkrzb" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.493705 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-575b89575b-kkrzb" event={"ID":"91864da0-319b-46e9-b4ef-8ccee4c52d37","Type":"ContainerDied","Data":"c6536d22d6c93b55c99436f1124a25adb4cae4775413fcbf11c9e30b27a18603"} Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.493833 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-575b89575b-kkrzb" event={"ID":"91864da0-319b-46e9-b4ef-8ccee4c52d37","Type":"ContainerDied","Data":"b0eceadb1d255469a928409a36ac9832735a249064e555e1cf2847e6302b19bc"} Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.496926 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6fae4840-8fac-4192-8358-cbcae518e70d","Type":"ContainerDied","Data":"22019c00eacee40fbbe6d90e8f349b8275d96b73d0c0369b14596fa5817fd488"} Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.497029 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.505416 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"e0d0b42d-5a68-46ba-a0de-f26c8dab1af8","Type":"ContainerDied","Data":"ceaee3bb0ae50b783c3f6dc6b46ed72fd8c6ba129c276ba7e0aa397688638198"} Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.505469 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.507188 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a218455-793d-4ccf-880a-d89b28e98b2d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2a218455-793d-4ccf-880a-d89b28e98b2d" (UID: "2a218455-793d-4ccf-880a-d89b28e98b2d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.510043 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_9445b2f3-83ea-4e79-8312-ceffa2208f77/ovn-northd/0.log" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.510142 4933 generic.go:334] "Generic (PLEG): container finished" podID="9445b2f3-83ea-4e79-8312-ceffa2208f77" containerID="480d1b66c4dcca2c067fc5041e62b5b6640e85f510d6d67079fc834d3bbb885a" exitCode=139 Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.510195 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.510222 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"9445b2f3-83ea-4e79-8312-ceffa2208f77","Type":"ContainerDied","Data":"480d1b66c4dcca2c067fc5041e62b5b6640e85f510d6d67079fc834d3bbb885a"} Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.510251 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"9445b2f3-83ea-4e79-8312-ceffa2208f77","Type":"ContainerDied","Data":"a02dd2e2dbbd38834a52749a21bc07d14bccdd49bfd8071c336dac65fb9b12f4"} Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.513532 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a218455-793d-4ccf-880a-d89b28e98b2d-config-data" (OuterVolumeSpecName: "config-data") pod "2a218455-793d-4ccf-880a-d89b28e98b2d" (UID: "2a218455-793d-4ccf-880a-d89b28e98b2d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.522059 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1270da6b-1c9e-41b6-b628-c2eaef5d9daf","Type":"ContainerDied","Data":"728628962bfae73d3592f2a149b8ee7e86c1b9649701dc3ff1d779b1a0028722"} Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.522117 4933 generic.go:334] "Generic (PLEG): container finished" podID="1270da6b-1c9e-41b6-b628-c2eaef5d9daf" containerID="728628962bfae73d3592f2a149b8ee7e86c1b9649701dc3ff1d779b1a0028722" exitCode=0 Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.526272 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a2bcbc4b-30c4-4ec8-81bf-6cba18171506","Type":"ContainerDied","Data":"aee258747d2f542abd72648829ad18150e8f7017099d71ed9dd21d17eb70bd91"} Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.526312 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.536237 4933 generic.go:334] "Generic (PLEG): container finished" podID="2a218455-793d-4ccf-880a-d89b28e98b2d" containerID="38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6" exitCode=0 Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.536325 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-dc32-account-create-update-26chh" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.536402 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.537095 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"2a218455-793d-4ccf-880a-d89b28e98b2d","Type":"ContainerDied","Data":"38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6"} Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.537131 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"2a218455-793d-4ccf-880a-d89b28e98b2d","Type":"ContainerDied","Data":"e4722e0c49280f70153bac74392b77a6e18349fe1106d512c16e872d54701fc8"} Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.537170 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zqzbb" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.551007 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a218455-793d-4ccf-880a-d89b28e98b2d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.551032 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kp2dj\" (UniqueName: \"kubernetes.io/projected/2a218455-793d-4ccf-880a-d89b28e98b2d-kube-api-access-kp2dj\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.551042 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a218455-793d-4ccf-880a-d89b28e98b2d-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.551052 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.579479 4933 scope.go:117] "RemoveContainer" containerID="1efb8a0dd4a5e96297be6dd03c40644d0d87af777a8a5b3ce08724ee8ab4e337" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.623614 4933 scope.go:117] "RemoveContainer" containerID="83f50dc0f110857dd2bcd69ddb9eb051bf30ec68e850a625d293c6f90c8de031" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.626124 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-zqzbb"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.631752 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-zqzbb"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.644906 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.652571 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.667366 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.676557 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.682765 4933 scope.go:117] "RemoveContainer" containerID="baeb972a644576e51e82e73e63e77cc4fc3796f87a5f81af36bd84230ffbbb39" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.688045 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-575b89575b-kkrzb"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.706829 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-575b89575b-kkrzb"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.713440 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 22 06:09:15 crc kubenswrapper[4933]: E0122 06:09:15.717308 4933 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1270da6b_1c9e_41b6_b628_c2eaef5d9daf.slice/crio-728628962bfae73d3592f2a149b8ee7e86c1b9649701dc3ff1d779b1a0028722.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1270da6b_1c9e_41b6_b628_c2eaef5d9daf.slice/crio-conmon-728628962bfae73d3592f2a149b8ee7e86c1b9649701dc3ff1d779b1a0028722.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod91864da0_319b_46e9_b4ef_8ccee4c52d37.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9445b2f3_83ea_4e79_8312_ceffa2208f77.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode0d0b42d_5a68_46ba_a0de_f26c8dab1af8.slice\": RecentStats: unable to find data in memory cache]" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.721731 4933 scope.go:117] "RemoveContainer" containerID="7759c053d18a0cfde4caa05a9fcd933f06f5e1f05af3297873fc5f9ea3a8ae61" Jan 22 06:09:15 crc kubenswrapper[4933]: E0122 06:09:15.723839 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7759c053d18a0cfde4caa05a9fcd933f06f5e1f05af3297873fc5f9ea3a8ae61\": container with ID starting with 7759c053d18a0cfde4caa05a9fcd933f06f5e1f05af3297873fc5f9ea3a8ae61 not found: ID does not exist" containerID="7759c053d18a0cfde4caa05a9fcd933f06f5e1f05af3297873fc5f9ea3a8ae61" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.723889 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7759c053d18a0cfde4caa05a9fcd933f06f5e1f05af3297873fc5f9ea3a8ae61"} err="failed to get container status \"7759c053d18a0cfde4caa05a9fcd933f06f5e1f05af3297873fc5f9ea3a8ae61\": rpc error: code = NotFound desc = could not find container \"7759c053d18a0cfde4caa05a9fcd933f06f5e1f05af3297873fc5f9ea3a8ae61\": container with ID starting with 7759c053d18a0cfde4caa05a9fcd933f06f5e1f05af3297873fc5f9ea3a8ae61 not found: ID does not exist" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.723915 4933 scope.go:117] "RemoveContainer" containerID="1efb8a0dd4a5e96297be6dd03c40644d0d87af777a8a5b3ce08724ee8ab4e337" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.726780 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-dc32-account-create-update-26chh"] Jan 22 06:09:15 crc kubenswrapper[4933]: E0122 06:09:15.727120 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1efb8a0dd4a5e96297be6dd03c40644d0d87af777a8a5b3ce08724ee8ab4e337\": container with ID starting with 1efb8a0dd4a5e96297be6dd03c40644d0d87af777a8a5b3ce08724ee8ab4e337 not found: ID does not exist" containerID="1efb8a0dd4a5e96297be6dd03c40644d0d87af777a8a5b3ce08724ee8ab4e337" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.727210 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1efb8a0dd4a5e96297be6dd03c40644d0d87af777a8a5b3ce08724ee8ab4e337"} err="failed to get container status \"1efb8a0dd4a5e96297be6dd03c40644d0d87af777a8a5b3ce08724ee8ab4e337\": rpc error: code = NotFound desc = could not find container \"1efb8a0dd4a5e96297be6dd03c40644d0d87af777a8a5b3ce08724ee8ab4e337\": container with ID starting with 1efb8a0dd4a5e96297be6dd03c40644d0d87af777a8a5b3ce08724ee8ab4e337 not found: ID does not exist" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.727316 4933 scope.go:117] "RemoveContainer" containerID="83f50dc0f110857dd2bcd69ddb9eb051bf30ec68e850a625d293c6f90c8de031" Jan 22 06:09:15 crc kubenswrapper[4933]: E0122 06:09:15.728234 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83f50dc0f110857dd2bcd69ddb9eb051bf30ec68e850a625d293c6f90c8de031\": container with ID starting with 83f50dc0f110857dd2bcd69ddb9eb051bf30ec68e850a625d293c6f90c8de031 not found: ID does not exist" containerID="83f50dc0f110857dd2bcd69ddb9eb051bf30ec68e850a625d293c6f90c8de031" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.728260 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83f50dc0f110857dd2bcd69ddb9eb051bf30ec68e850a625d293c6f90c8de031"} err="failed to get container status \"83f50dc0f110857dd2bcd69ddb9eb051bf30ec68e850a625d293c6f90c8de031\": rpc error: code = NotFound desc = could not find container \"83f50dc0f110857dd2bcd69ddb9eb051bf30ec68e850a625d293c6f90c8de031\": container with ID starting with 83f50dc0f110857dd2bcd69ddb9eb051bf30ec68e850a625d293c6f90c8de031 not found: ID does not exist" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.728274 4933 scope.go:117] "RemoveContainer" containerID="baeb972a644576e51e82e73e63e77cc4fc3796f87a5f81af36bd84230ffbbb39" Jan 22 06:09:15 crc kubenswrapper[4933]: E0122 06:09:15.728697 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"baeb972a644576e51e82e73e63e77cc4fc3796f87a5f81af36bd84230ffbbb39\": container with ID starting with baeb972a644576e51e82e73e63e77cc4fc3796f87a5f81af36bd84230ffbbb39 not found: ID does not exist" containerID="baeb972a644576e51e82e73e63e77cc4fc3796f87a5f81af36bd84230ffbbb39" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.728717 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"baeb972a644576e51e82e73e63e77cc4fc3796f87a5f81af36bd84230ffbbb39"} err="failed to get container status \"baeb972a644576e51e82e73e63e77cc4fc3796f87a5f81af36bd84230ffbbb39\": rpc error: code = NotFound desc = could not find container \"baeb972a644576e51e82e73e63e77cc4fc3796f87a5f81af36bd84230ffbbb39\": container with ID starting with baeb972a644576e51e82e73e63e77cc4fc3796f87a5f81af36bd84230ffbbb39 not found: ID does not exist" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.728729 4933 scope.go:117] "RemoveContainer" containerID="c6536d22d6c93b55c99436f1124a25adb4cae4775413fcbf11c9e30b27a18603" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.740870 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-dc32-account-create-update-26chh"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.749858 4933 scope.go:117] "RemoveContainer" containerID="919264dcf0b82bef1d83226e9c6472a32457bb0515e5a68ff0d276535cb3dcff" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.756861 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrm5j\" (UniqueName: \"kubernetes.io/projected/67bcc01e-7a5e-4f76-889e-d6ed60745cf4-kube-api-access-vrm5j\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.761323 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.767948 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.774433 4933 scope.go:117] "RemoveContainer" containerID="c6536d22d6c93b55c99436f1124a25adb4cae4775413fcbf11c9e30b27a18603" Jan 22 06:09:15 crc kubenswrapper[4933]: E0122 06:09:15.776704 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6536d22d6c93b55c99436f1124a25adb4cae4775413fcbf11c9e30b27a18603\": container with ID starting with c6536d22d6c93b55c99436f1124a25adb4cae4775413fcbf11c9e30b27a18603 not found: ID does not exist" containerID="c6536d22d6c93b55c99436f1124a25adb4cae4775413fcbf11c9e30b27a18603" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.776776 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6536d22d6c93b55c99436f1124a25adb4cae4775413fcbf11c9e30b27a18603"} err="failed to get container status \"c6536d22d6c93b55c99436f1124a25adb4cae4775413fcbf11c9e30b27a18603\": rpc error: code = NotFound desc = could not find container \"c6536d22d6c93b55c99436f1124a25adb4cae4775413fcbf11c9e30b27a18603\": container with ID starting with c6536d22d6c93b55c99436f1124a25adb4cae4775413fcbf11c9e30b27a18603 not found: ID does not exist" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.776805 4933 scope.go:117] "RemoveContainer" containerID="919264dcf0b82bef1d83226e9c6472a32457bb0515e5a68ff0d276535cb3dcff" Jan 22 06:09:15 crc kubenswrapper[4933]: E0122 06:09:15.777990 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"919264dcf0b82bef1d83226e9c6472a32457bb0515e5a68ff0d276535cb3dcff\": container with ID starting with 919264dcf0b82bef1d83226e9c6472a32457bb0515e5a68ff0d276535cb3dcff not found: ID does not exist" containerID="919264dcf0b82bef1d83226e9c6472a32457bb0515e5a68ff0d276535cb3dcff" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.778046 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"919264dcf0b82bef1d83226e9c6472a32457bb0515e5a68ff0d276535cb3dcff"} err="failed to get container status \"919264dcf0b82bef1d83226e9c6472a32457bb0515e5a68ff0d276535cb3dcff\": rpc error: code = NotFound desc = could not find container \"919264dcf0b82bef1d83226e9c6472a32457bb0515e5a68ff0d276535cb3dcff\": container with ID starting with 919264dcf0b82bef1d83226e9c6472a32457bb0515e5a68ff0d276535cb3dcff not found: ID does not exist" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.778094 4933 scope.go:117] "RemoveContainer" containerID="f46d4f43b41aa5fe98df750c180e6af7780c3a5b7b9665b0eea715c193df207c" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.779558 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.787787 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.796172 4933 scope.go:117] "RemoveContainer" containerID="769102d68320fe984ffaababc2e759013ea7756eebffbcf2bed246a58368e7c3" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.797431 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.803727 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.808206 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.812421 4933 scope.go:117] "RemoveContainer" containerID="c8b3c82c4b888183709e4c809da692746a4eb74efe1e21ce5e8fbd16fb29ade5" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.815885 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.835485 4933 scope.go:117] "RemoveContainer" containerID="bb6a7eef37a247e6e0eaaa17e2b10e4afc7b1b1032f493709b0fed75abf884df" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.858123 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-galera-tls-certs\") pod \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.858222 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-config-data-default\") pod \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.858315 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-operator-scripts\") pod \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.858421 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-config-data-generated\") pod \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.858507 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9ttcs\" (UniqueName: \"kubernetes.io/projected/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-kube-api-access-9ttcs\") pod \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.858545 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.858570 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-kolla-config\") pod \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.858686 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-combined-ca-bundle\") pod \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\" (UID: \"1270da6b-1c9e-41b6-b628-c2eaef5d9daf\") " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.859160 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtbjc\" (UniqueName: \"kubernetes.io/projected/833ac15e-6498-4beb-a0da-b8e600653e3e-kube-api-access-jtbjc\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.859186 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/833ac15e-6498-4beb-a0da-b8e600653e3e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: E0122 06:09:15.859288 4933 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 22 06:09:15 crc kubenswrapper[4933]: E0122 06:09:15.859367 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-config-data podName:4d712958-1ece-47de-9798-6e852b03c565 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:23.859329974 +0000 UTC m=+1411.696455337 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-config-data") pod "rabbitmq-cell1-server-0" (UID: "4d712958-1ece-47de-9798-6e852b03c565") : configmap "rabbitmq-cell1-config-data" not found Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.860800 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "1270da6b-1c9e-41b6-b628-c2eaef5d9daf" (UID: "1270da6b-1c9e-41b6-b628-c2eaef5d9daf"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.861662 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "1270da6b-1c9e-41b6-b628-c2eaef5d9daf" (UID: "1270da6b-1c9e-41b6-b628-c2eaef5d9daf"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.861458 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1270da6b-1c9e-41b6-b628-c2eaef5d9daf" (UID: "1270da6b-1c9e-41b6-b628-c2eaef5d9daf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.862047 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "1270da6b-1c9e-41b6-b628-c2eaef5d9daf" (UID: "1270da6b-1c9e-41b6-b628-c2eaef5d9daf"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.866642 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-kube-api-access-9ttcs" (OuterVolumeSpecName: "kube-api-access-9ttcs") pod "1270da6b-1c9e-41b6-b628-c2eaef5d9daf" (UID: "1270da6b-1c9e-41b6-b628-c2eaef5d9daf"). InnerVolumeSpecName "kube-api-access-9ttcs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.876260 4933 scope.go:117] "RemoveContainer" containerID="480d1b66c4dcca2c067fc5041e62b5b6640e85f510d6d67079fc834d3bbb885a" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.910875 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "mysql-db") pod "1270da6b-1c9e-41b6-b628-c2eaef5d9daf" (UID: "1270da6b-1c9e-41b6-b628-c2eaef5d9daf"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.933694 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1270da6b-1c9e-41b6-b628-c2eaef5d9daf" (UID: "1270da6b-1c9e-41b6-b628-c2eaef5d9daf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.961180 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.961207 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-config-data-default\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.961218 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.961226 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-config-data-generated\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.961236 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9ttcs\" (UniqueName: \"kubernetes.io/projected/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-kube-api-access-9ttcs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.961264 4933 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.961274 4933 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.973161 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "1270da6b-1c9e-41b6-b628-c2eaef5d9daf" (UID: "1270da6b-1c9e-41b6-b628-c2eaef5d9daf"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:15 crc kubenswrapper[4933]: I0122 06:09:15.974763 4933 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.038308 4933 scope.go:117] "RemoveContainer" containerID="bb6a7eef37a247e6e0eaaa17e2b10e4afc7b1b1032f493709b0fed75abf884df" Jan 22 06:09:16 crc kubenswrapper[4933]: E0122 06:09:16.038996 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb6a7eef37a247e6e0eaaa17e2b10e4afc7b1b1032f493709b0fed75abf884df\": container with ID starting with bb6a7eef37a247e6e0eaaa17e2b10e4afc7b1b1032f493709b0fed75abf884df not found: ID does not exist" containerID="bb6a7eef37a247e6e0eaaa17e2b10e4afc7b1b1032f493709b0fed75abf884df" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.039041 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb6a7eef37a247e6e0eaaa17e2b10e4afc7b1b1032f493709b0fed75abf884df"} err="failed to get container status \"bb6a7eef37a247e6e0eaaa17e2b10e4afc7b1b1032f493709b0fed75abf884df\": rpc error: code = NotFound desc = could not find container \"bb6a7eef37a247e6e0eaaa17e2b10e4afc7b1b1032f493709b0fed75abf884df\": container with ID starting with bb6a7eef37a247e6e0eaaa17e2b10e4afc7b1b1032f493709b0fed75abf884df not found: ID does not exist" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.039083 4933 scope.go:117] "RemoveContainer" containerID="480d1b66c4dcca2c067fc5041e62b5b6640e85f510d6d67079fc834d3bbb885a" Jan 22 06:09:16 crc kubenswrapper[4933]: E0122 06:09:16.039578 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"480d1b66c4dcca2c067fc5041e62b5b6640e85f510d6d67079fc834d3bbb885a\": container with ID starting with 480d1b66c4dcca2c067fc5041e62b5b6640e85f510d6d67079fc834d3bbb885a not found: ID does not exist" containerID="480d1b66c4dcca2c067fc5041e62b5b6640e85f510d6d67079fc834d3bbb885a" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.039601 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"480d1b66c4dcca2c067fc5041e62b5b6640e85f510d6d67079fc834d3bbb885a"} err="failed to get container status \"480d1b66c4dcca2c067fc5041e62b5b6640e85f510d6d67079fc834d3bbb885a\": rpc error: code = NotFound desc = could not find container \"480d1b66c4dcca2c067fc5041e62b5b6640e85f510d6d67079fc834d3bbb885a\": container with ID starting with 480d1b66c4dcca2c067fc5041e62b5b6640e85f510d6d67079fc834d3bbb885a not found: ID does not exist" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.039614 4933 scope.go:117] "RemoveContainer" containerID="7e0168931d199a81b487119803e1944a792da4293cd792448c0c7c6cb8c0b855" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.063002 4933 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1270da6b-1c9e-41b6-b628-c2eaef5d9daf-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.063033 4933 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.066530 4933 scope.go:117] "RemoveContainer" containerID="ec11285d9cdded033c8043b14c5171616e5845163e0327ea1a11b2d67c958235" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.084627 4933 scope.go:117] "RemoveContainer" containerID="38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.106915 4933 scope.go:117] "RemoveContainer" containerID="38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6" Jan 22 06:09:16 crc kubenswrapper[4933]: E0122 06:09:16.107245 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6\": container with ID starting with 38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6 not found: ID does not exist" containerID="38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.107274 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6"} err="failed to get container status \"38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6\": rpc error: code = NotFound desc = could not find container \"38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6\": container with ID starting with 38208bba151b5aef67b3039fba534cb60f5a8a95e9c63a84f708a2f3f493fde6 not found: ID does not exist" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.502013 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9" path="/var/lib/kubelet/pods/0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9/volumes" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.502737 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d8e9d8c-961f-4dc5-84b8-51c486220cdc" path="/var/lib/kubelet/pods/1d8e9d8c-961f-4dc5-84b8-51c486220cdc/volumes" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.503053 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a218455-793d-4ccf-880a-d89b28e98b2d" path="/var/lib/kubelet/pods/2a218455-793d-4ccf-880a-d89b28e98b2d/volumes" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.503511 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4886348b-6078-41dc-8fab-a8e2e1c4898d" path="/var/lib/kubelet/pods/4886348b-6078-41dc-8fab-a8e2e1c4898d/volumes" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.504488 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56625c99-64dc-4742-9927-0210d8fe8d9d" path="/var/lib/kubelet/pods/56625c99-64dc-4742-9927-0210d8fe8d9d/volumes" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.505065 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" path="/var/lib/kubelet/pods/578144dd-08a3-4c4b-8dd3-38ebe2d4dde8/volumes" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.506173 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="603c9f42-93c4-4268-b513-d2309571ac20" path="/var/lib/kubelet/pods/603c9f42-93c4-4268-b513-d2309571ac20/volumes" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.506686 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67bcc01e-7a5e-4f76-889e-d6ed60745cf4" path="/var/lib/kubelet/pods/67bcc01e-7a5e-4f76-889e-d6ed60745cf4/volumes" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.507043 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fae4840-8fac-4192-8358-cbcae518e70d" path="/var/lib/kubelet/pods/6fae4840-8fac-4192-8358-cbcae518e70d/volumes" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.507464 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="833ac15e-6498-4beb-a0da-b8e600653e3e" path="/var/lib/kubelet/pods/833ac15e-6498-4beb-a0da-b8e600653e3e/volumes" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.508230 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="841bd4c5-516a-406d-af7f-8d551b970cab" path="/var/lib/kubelet/pods/841bd4c5-516a-406d-af7f-8d551b970cab/volumes" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.508581 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91864da0-319b-46e9-b4ef-8ccee4c52d37" path="/var/lib/kubelet/pods/91864da0-319b-46e9-b4ef-8ccee4c52d37/volumes" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.509157 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9445b2f3-83ea-4e79-8312-ceffa2208f77" path="/var/lib/kubelet/pods/9445b2f3-83ea-4e79-8312-ceffa2208f77/volumes" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.510173 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2bcbc4b-30c4-4ec8-81bf-6cba18171506" path="/var/lib/kubelet/pods/a2bcbc4b-30c4-4ec8-81bf-6cba18171506/volumes" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.510769 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4c8b893-2e30-4273-bbec-7ff7efee686e" path="/var/lib/kubelet/pods/b4c8b893-2e30-4273-bbec-7ff7efee686e/volumes" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.511640 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194" path="/var/lib/kubelet/pods/d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194/volumes" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.512662 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0d0b42d-5a68-46ba-a0de-f26c8dab1af8" path="/var/lib/kubelet/pods/e0d0b42d-5a68-46ba-a0de-f26c8dab1af8/volumes" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.568891 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1270da6b-1c9e-41b6-b628-c2eaef5d9daf","Type":"ContainerDied","Data":"670f97d2d0f9cb510deb369cedda881b1b1cec9255583ae6dea27865a4173a3f"} Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.568947 4933 scope.go:117] "RemoveContainer" containerID="728628962bfae73d3592f2a149b8ee7e86c1b9649701dc3ff1d779b1a0028722" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.569017 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.646945 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.653392 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.657578 4933 scope.go:117] "RemoveContainer" containerID="48a217a83e3da85312b55ef33ee0ad86d71c50c2b1e74dfbffefed794bc3c01e" Jan 22 06:09:16 crc kubenswrapper[4933]: E0122 06:09:16.981185 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="727aeb5a63faa31a31c03450f9d3d8823575f9ab0abd3ff2f9a00a0f91ec8597" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 22 06:09:16 crc kubenswrapper[4933]: E0122 06:09:16.982256 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="727aeb5a63faa31a31c03450f9d3d8823575f9ab0abd3ff2f9a00a0f91ec8597" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 22 06:09:16 crc kubenswrapper[4933]: E0122 06:09:16.983286 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="727aeb5a63faa31a31c03450f9d3d8823575f9ab0abd3ff2f9a00a0f91ec8597" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 22 06:09:16 crc kubenswrapper[4933]: E0122 06:09:16.983324 4933 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="cf956626-51e3-4aff-b24b-4a553160327c" containerName="nova-scheduler-scheduler" Jan 22 06:09:16 crc kubenswrapper[4933]: I0122 06:09:16.993789 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.079401 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-combined-ca-bundle\") pod \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.079762 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-credential-keys\") pod \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.079823 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-config-data\") pod \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.079859 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lb62m\" (UniqueName: \"kubernetes.io/projected/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-kube-api-access-lb62m\") pod \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.079886 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-fernet-keys\") pod \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.079906 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-scripts\") pod \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.079990 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-internal-tls-certs\") pod \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.080031 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-public-tls-certs\") pod \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\" (UID: \"c16f2ab8-68b3-43fa-a862-c182aaa3dc23\") " Jan 22 06:09:17 crc kubenswrapper[4933]: E0122 06:09:17.080443 4933 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 22 06:09:17 crc kubenswrapper[4933]: E0122 06:09:17.080497 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-config-data podName:47299478-bcfd-4f21-a56c-efcf7b167999 nodeName:}" failed. No retries permitted until 2026-01-22 06:09:25.080482354 +0000 UTC m=+1412.917607707 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-config-data") pod "rabbitmq-server-0" (UID: "47299478-bcfd-4f21-a56c-efcf7b167999") : configmap "rabbitmq-config-data" not found Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.084667 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-kube-api-access-lb62m" (OuterVolumeSpecName: "kube-api-access-lb62m") pod "c16f2ab8-68b3-43fa-a862-c182aaa3dc23" (UID: "c16f2ab8-68b3-43fa-a862-c182aaa3dc23"). InnerVolumeSpecName "kube-api-access-lb62m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.085276 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-scripts" (OuterVolumeSpecName: "scripts") pod "c16f2ab8-68b3-43fa-a862-c182aaa3dc23" (UID: "c16f2ab8-68b3-43fa-a862-c182aaa3dc23"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.085357 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "c16f2ab8-68b3-43fa-a862-c182aaa3dc23" (UID: "c16f2ab8-68b3-43fa-a862-c182aaa3dc23"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.095882 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "c16f2ab8-68b3-43fa-a862-c182aaa3dc23" (UID: "c16f2ab8-68b3-43fa-a862-c182aaa3dc23"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.100500 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c16f2ab8-68b3-43fa-a862-c182aaa3dc23" (UID: "c16f2ab8-68b3-43fa-a862-c182aaa3dc23"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.145397 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-config-data" (OuterVolumeSpecName: "config-data") pod "c16f2ab8-68b3-43fa-a862-c182aaa3dc23" (UID: "c16f2ab8-68b3-43fa-a862-c182aaa3dc23"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.151674 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c16f2ab8-68b3-43fa-a862-c182aaa3dc23" (UID: "c16f2ab8-68b3-43fa-a862-c182aaa3dc23"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.154052 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c16f2ab8-68b3-43fa-a862-c182aaa3dc23" (UID: "c16f2ab8-68b3-43fa-a862-c182aaa3dc23"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.181534 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.181568 4933 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.181580 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.181591 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lb62m\" (UniqueName: \"kubernetes.io/projected/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-kube-api-access-lb62m\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.181603 4933 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.181614 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.181625 4933 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.181634 4933 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c16f2ab8-68b3-43fa-a862-c182aaa3dc23-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.563851 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.581379 4933 generic.go:334] "Generic (PLEG): container finished" podID="c16f2ab8-68b3-43fa-a862-c182aaa3dc23" containerID="abbc72b7cf0aa36b4904038dc83404e06c706971f42e299cdcaa546f6ea3f0e7" exitCode=0 Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.581420 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7d459c58f9-bc2hf" event={"ID":"c16f2ab8-68b3-43fa-a862-c182aaa3dc23","Type":"ContainerDied","Data":"abbc72b7cf0aa36b4904038dc83404e06c706971f42e299cdcaa546f6ea3f0e7"} Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.581442 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7d459c58f9-bc2hf" event={"ID":"c16f2ab8-68b3-43fa-a862-c182aaa3dc23","Type":"ContainerDied","Data":"ac8e3f1db194ec4cc7df2929d79be99c4c7f4bcbf1e2b486e2ea5a0322300503"} Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.581458 4933 scope.go:117] "RemoveContainer" containerID="abbc72b7cf0aa36b4904038dc83404e06c706971f42e299cdcaa546f6ea3f0e7" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.581536 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7d459c58f9-bc2hf" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.594288 4933 generic.go:334] "Generic (PLEG): container finished" podID="4d712958-1ece-47de-9798-6e852b03c565" containerID="c1710389b2dae67dfbd6fae597c0b78a024bc303c5cb265a90023ca99e2818b9" exitCode=0 Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.594373 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4d712958-1ece-47de-9798-6e852b03c565","Type":"ContainerDied","Data":"c1710389b2dae67dfbd6fae597c0b78a024bc303c5cb265a90023ca99e2818b9"} Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.594419 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4d712958-1ece-47de-9798-6e852b03c565","Type":"ContainerDied","Data":"7e93c9faa209124fdc4fc8894fd120b5e20dc803c2d608ce5d6c1cc292ca8b16"} Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.594499 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.598609 4933 generic.go:334] "Generic (PLEG): container finished" podID="47299478-bcfd-4f21-a56c-efcf7b167999" containerID="9c99762ed66dc820d592fe5b2a44c175901d1c948185099ec445d18b9d3c9e4e" exitCode=0 Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.598746 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"47299478-bcfd-4f21-a56c-efcf7b167999","Type":"ContainerDied","Data":"9c99762ed66dc820d592fe5b2a44c175901d1c948185099ec445d18b9d3c9e4e"} Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.629701 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-7d459c58f9-bc2hf"] Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.633066 4933 scope.go:117] "RemoveContainer" containerID="abbc72b7cf0aa36b4904038dc83404e06c706971f42e299cdcaa546f6ea3f0e7" Jan 22 06:09:17 crc kubenswrapper[4933]: E0122 06:09:17.633802 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"abbc72b7cf0aa36b4904038dc83404e06c706971f42e299cdcaa546f6ea3f0e7\": container with ID starting with abbc72b7cf0aa36b4904038dc83404e06c706971f42e299cdcaa546f6ea3f0e7 not found: ID does not exist" containerID="abbc72b7cf0aa36b4904038dc83404e06c706971f42e299cdcaa546f6ea3f0e7" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.633882 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abbc72b7cf0aa36b4904038dc83404e06c706971f42e299cdcaa546f6ea3f0e7"} err="failed to get container status \"abbc72b7cf0aa36b4904038dc83404e06c706971f42e299cdcaa546f6ea3f0e7\": rpc error: code = NotFound desc = could not find container \"abbc72b7cf0aa36b4904038dc83404e06c706971f42e299cdcaa546f6ea3f0e7\": container with ID starting with abbc72b7cf0aa36b4904038dc83404e06c706971f42e299cdcaa546f6ea3f0e7 not found: ID does not exist" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.633910 4933 scope.go:117] "RemoveContainer" containerID="c1710389b2dae67dfbd6fae597c0b78a024bc303c5cb265a90023ca99e2818b9" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.633811 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-7d459c58f9-bc2hf"] Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.649511 4933 scope.go:117] "RemoveContainer" containerID="452184a465d8d1be0e80a527dbc5f992b7ee47b495271d69908b57e67655f195" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.651481 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.673245 4933 scope.go:117] "RemoveContainer" containerID="c1710389b2dae67dfbd6fae597c0b78a024bc303c5cb265a90023ca99e2818b9" Jan 22 06:09:17 crc kubenswrapper[4933]: E0122 06:09:17.673891 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1710389b2dae67dfbd6fae597c0b78a024bc303c5cb265a90023ca99e2818b9\": container with ID starting with c1710389b2dae67dfbd6fae597c0b78a024bc303c5cb265a90023ca99e2818b9 not found: ID does not exist" containerID="c1710389b2dae67dfbd6fae597c0b78a024bc303c5cb265a90023ca99e2818b9" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.673955 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1710389b2dae67dfbd6fae597c0b78a024bc303c5cb265a90023ca99e2818b9"} err="failed to get container status \"c1710389b2dae67dfbd6fae597c0b78a024bc303c5cb265a90023ca99e2818b9\": rpc error: code = NotFound desc = could not find container \"c1710389b2dae67dfbd6fae597c0b78a024bc303c5cb265a90023ca99e2818b9\": container with ID starting with c1710389b2dae67dfbd6fae597c0b78a024bc303c5cb265a90023ca99e2818b9 not found: ID does not exist" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.673984 4933 scope.go:117] "RemoveContainer" containerID="452184a465d8d1be0e80a527dbc5f992b7ee47b495271d69908b57e67655f195" Jan 22 06:09:17 crc kubenswrapper[4933]: E0122 06:09:17.674696 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"452184a465d8d1be0e80a527dbc5f992b7ee47b495271d69908b57e67655f195\": container with ID starting with 452184a465d8d1be0e80a527dbc5f992b7ee47b495271d69908b57e67655f195 not found: ID does not exist" containerID="452184a465d8d1be0e80a527dbc5f992b7ee47b495271d69908b57e67655f195" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.674756 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"452184a465d8d1be0e80a527dbc5f992b7ee47b495271d69908b57e67655f195"} err="failed to get container status \"452184a465d8d1be0e80a527dbc5f992b7ee47b495271d69908b57e67655f195\": rpc error: code = NotFound desc = could not find container \"452184a465d8d1be0e80a527dbc5f992b7ee47b495271d69908b57e67655f195\": container with ID starting with 452184a465d8d1be0e80a527dbc5f992b7ee47b495271d69908b57e67655f195 not found: ID does not exist" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.692491 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-config-data\") pod \"4d712958-1ece-47de-9798-6e852b03c565\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.692559 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-server-conf\") pod \"4d712958-1ece-47de-9798-6e852b03c565\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.692591 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4d712958-1ece-47de-9798-6e852b03c565-pod-info\") pod \"4d712958-1ece-47de-9798-6e852b03c565\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.692626 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6ctq\" (UniqueName: \"kubernetes.io/projected/4d712958-1ece-47de-9798-6e852b03c565-kube-api-access-c6ctq\") pod \"4d712958-1ece-47de-9798-6e852b03c565\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.692648 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-plugins-conf\") pod \"4d712958-1ece-47de-9798-6e852b03c565\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.692678 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-erlang-cookie\") pod \"4d712958-1ece-47de-9798-6e852b03c565\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.692707 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-plugins\") pod \"4d712958-1ece-47de-9798-6e852b03c565\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.692768 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-confd\") pod \"4d712958-1ece-47de-9798-6e852b03c565\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.692805 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-tls\") pod \"4d712958-1ece-47de-9798-6e852b03c565\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.692895 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4d712958-1ece-47de-9798-6e852b03c565-erlang-cookie-secret\") pod \"4d712958-1ece-47de-9798-6e852b03c565\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.692943 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"4d712958-1ece-47de-9798-6e852b03c565\" (UID: \"4d712958-1ece-47de-9798-6e852b03c565\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.693328 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "4d712958-1ece-47de-9798-6e852b03c565" (UID: "4d712958-1ece-47de-9798-6e852b03c565"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.693489 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "4d712958-1ece-47de-9798-6e852b03c565" (UID: "4d712958-1ece-47de-9798-6e852b03c565"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.693896 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "4d712958-1ece-47de-9798-6e852b03c565" (UID: "4d712958-1ece-47de-9798-6e852b03c565"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.696746 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/4d712958-1ece-47de-9798-6e852b03c565-pod-info" (OuterVolumeSpecName: "pod-info") pod "4d712958-1ece-47de-9798-6e852b03c565" (UID: "4d712958-1ece-47de-9798-6e852b03c565"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.696787 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "4d712958-1ece-47de-9798-6e852b03c565" (UID: "4d712958-1ece-47de-9798-6e852b03c565"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.699247 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d712958-1ece-47de-9798-6e852b03c565-kube-api-access-c6ctq" (OuterVolumeSpecName: "kube-api-access-c6ctq") pod "4d712958-1ece-47de-9798-6e852b03c565" (UID: "4d712958-1ece-47de-9798-6e852b03c565"). InnerVolumeSpecName "kube-api-access-c6ctq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.701521 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "4d712958-1ece-47de-9798-6e852b03c565" (UID: "4d712958-1ece-47de-9798-6e852b03c565"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.702239 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d712958-1ece-47de-9798-6e852b03c565-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "4d712958-1ece-47de-9798-6e852b03c565" (UID: "4d712958-1ece-47de-9798-6e852b03c565"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.712682 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-config-data" (OuterVolumeSpecName: "config-data") pod "4d712958-1ece-47de-9798-6e852b03c565" (UID: "4d712958-1ece-47de-9798-6e852b03c565"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.731033 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-server-conf" (OuterVolumeSpecName: "server-conf") pod "4d712958-1ece-47de-9798-6e852b03c565" (UID: "4d712958-1ece-47de-9798-6e852b03c565"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.761993 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "4d712958-1ece-47de-9798-6e852b03c565" (UID: "4d712958-1ece-47de-9798-6e852b03c565"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.794580 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-config-data\") pod \"47299478-bcfd-4f21-a56c-efcf7b167999\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.794861 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-plugins-conf\") pod \"47299478-bcfd-4f21-a56c-efcf7b167999\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.794985 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/47299478-bcfd-4f21-a56c-efcf7b167999-pod-info\") pod \"47299478-bcfd-4f21-a56c-efcf7b167999\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.795119 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-erlang-cookie\") pod \"47299478-bcfd-4f21-a56c-efcf7b167999\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.795296 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-server-conf\") pod \"47299478-bcfd-4f21-a56c-efcf7b167999\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.795766 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-tls\") pod \"47299478-bcfd-4f21-a56c-efcf7b167999\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.795959 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-blrb2\" (UniqueName: \"kubernetes.io/projected/47299478-bcfd-4f21-a56c-efcf7b167999-kube-api-access-blrb2\") pod \"47299478-bcfd-4f21-a56c-efcf7b167999\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.796145 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/47299478-bcfd-4f21-a56c-efcf7b167999-erlang-cookie-secret\") pod \"47299478-bcfd-4f21-a56c-efcf7b167999\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.795458 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "47299478-bcfd-4f21-a56c-efcf7b167999" (UID: "47299478-bcfd-4f21-a56c-efcf7b167999"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.795535 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "47299478-bcfd-4f21-a56c-efcf7b167999" (UID: "47299478-bcfd-4f21-a56c-efcf7b167999"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.796508 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-confd\") pod \"47299478-bcfd-4f21-a56c-efcf7b167999\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.796638 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"47299478-bcfd-4f21-a56c-efcf7b167999\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.796823 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-plugins\") pod \"47299478-bcfd-4f21-a56c-efcf7b167999\" (UID: \"47299478-bcfd-4f21-a56c-efcf7b167999\") " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.797393 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "47299478-bcfd-4f21-a56c-efcf7b167999" (UID: "47299478-bcfd-4f21-a56c-efcf7b167999"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.797973 4933 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.798221 4933 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.798378 4933 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.798819 4933 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.798952 4933 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.799103 4933 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4d712958-1ece-47de-9798-6e852b03c565-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.798511 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/47299478-bcfd-4f21-a56c-efcf7b167999-pod-info" (OuterVolumeSpecName: "pod-info") pod "47299478-bcfd-4f21-a56c-efcf7b167999" (UID: "47299478-bcfd-4f21-a56c-efcf7b167999"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.798975 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47299478-bcfd-4f21-a56c-efcf7b167999-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "47299478-bcfd-4f21-a56c-efcf7b167999" (UID: "47299478-bcfd-4f21-a56c-efcf7b167999"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.799269 4933 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.799318 4933 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.799331 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.799342 4933 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-server-conf\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.799352 4933 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4d712958-1ece-47de-9798-6e852b03c565-pod-info\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.799363 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6ctq\" (UniqueName: \"kubernetes.io/projected/4d712958-1ece-47de-9798-6e852b03c565-kube-api-access-c6ctq\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.799374 4933 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4d712958-1ece-47de-9798-6e852b03c565-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.799386 4933 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4d712958-1ece-47de-9798-6e852b03c565-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.799708 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "47299478-bcfd-4f21-a56c-efcf7b167999" (UID: "47299478-bcfd-4f21-a56c-efcf7b167999"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.800138 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "persistence") pod "47299478-bcfd-4f21-a56c-efcf7b167999" (UID: "47299478-bcfd-4f21-a56c-efcf7b167999"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.802120 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47299478-bcfd-4f21-a56c-efcf7b167999-kube-api-access-blrb2" (OuterVolumeSpecName: "kube-api-access-blrb2") pod "47299478-bcfd-4f21-a56c-efcf7b167999" (UID: "47299478-bcfd-4f21-a56c-efcf7b167999"). InnerVolumeSpecName "kube-api-access-blrb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.811478 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-config-data" (OuterVolumeSpecName: "config-data") pod "47299478-bcfd-4f21-a56c-efcf7b167999" (UID: "47299478-bcfd-4f21-a56c-efcf7b167999"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.821585 4933 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.827889 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-server-conf" (OuterVolumeSpecName: "server-conf") pod "47299478-bcfd-4f21-a56c-efcf7b167999" (UID: "47299478-bcfd-4f21-a56c-efcf7b167999"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.865868 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "47299478-bcfd-4f21-a56c-efcf7b167999" (UID: "47299478-bcfd-4f21-a56c-efcf7b167999"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.901213 4933 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.901288 4933 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.901318 4933 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.901345 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.901370 4933 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/47299478-bcfd-4f21-a56c-efcf7b167999-pod-info\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.901394 4933 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/47299478-bcfd-4f21-a56c-efcf7b167999-server-conf\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.901416 4933 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/47299478-bcfd-4f21-a56c-efcf7b167999-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.901442 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-blrb2\" (UniqueName: \"kubernetes.io/projected/47299478-bcfd-4f21-a56c-efcf7b167999-kube-api-access-blrb2\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.901465 4933 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/47299478-bcfd-4f21-a56c-efcf7b167999-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:17 crc kubenswrapper[4933]: I0122 06:09:17.932760 4933 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.003284 4933 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.003930 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.012570 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 06:09:18 crc kubenswrapper[4933]: E0122 06:09:18.044254 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 22 06:09:18 crc kubenswrapper[4933]: E0122 06:09:18.044709 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 22 06:09:18 crc kubenswrapper[4933]: E0122 06:09:18.044997 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 22 06:09:18 crc kubenswrapper[4933]: E0122 06:09:18.045025 4933 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-rwb6s" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovsdb-server" Jan 22 06:09:18 crc kubenswrapper[4933]: E0122 06:09:18.045666 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 22 06:09:18 crc kubenswrapper[4933]: E0122 06:09:18.047849 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 22 06:09:18 crc kubenswrapper[4933]: E0122 06:09:18.056768 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 22 06:09:18 crc kubenswrapper[4933]: E0122 06:09:18.056976 4933 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-rwb6s" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovs-vswitchd" Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.506720 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1270da6b-1c9e-41b6-b628-c2eaef5d9daf" path="/var/lib/kubelet/pods/1270da6b-1c9e-41b6-b628-c2eaef5d9daf/volumes" Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.508021 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d712958-1ece-47de-9798-6e852b03c565" path="/var/lib/kubelet/pods/4d712958-1ece-47de-9798-6e852b03c565/volumes" Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.509174 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c16f2ab8-68b3-43fa-a862-c182aaa3dc23" path="/var/lib/kubelet/pods/c16f2ab8-68b3-43fa-a862-c182aaa3dc23/volumes" Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.619563 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="603c9f42-93c4-4268-b513-d2309571ac20" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.166:8776/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.627545 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"47299478-bcfd-4f21-a56c-efcf7b167999","Type":"ContainerDied","Data":"f4435327f556a1535a4d5a954a3602c39782b2ebe8dfcef70552e445db24c9c0"} Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.627595 4933 scope.go:117] "RemoveContainer" containerID="9c99762ed66dc820d592fe5b2a44c175901d1c948185099ec445d18b9d3c9e4e" Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.627698 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.634501 4933 generic.go:334] "Generic (PLEG): container finished" podID="0fecb571-89ee-4d10-a1e3-e3755946df2b" containerID="19349f4fb699d96d982ef68a33cea6a25a5b8d3f3671b4a1b34a92f90876b922" exitCode=0 Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.634551 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" event={"ID":"0fecb571-89ee-4d10-a1e3-e3755946df2b","Type":"ContainerDied","Data":"19349f4fb699d96d982ef68a33cea6a25a5b8d3f3671b4a1b34a92f90876b922"} Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.637773 4933 generic.go:334] "Generic (PLEG): container finished" podID="5505bed5-dba3-4067-b94c-acd00b7c37c7" containerID="45526307e9624de914d45bbd929b47ffd667a4044ab9422715456c34fe59622d" exitCode=0 Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.637821 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6448b46975-jx7gp" event={"ID":"5505bed5-dba3-4067-b94c-acd00b7c37c7","Type":"ContainerDied","Data":"45526307e9624de914d45bbd929b47ffd667a4044ab9422715456c34fe59622d"} Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.639390 4933 generic.go:334] "Generic (PLEG): container finished" podID="cf956626-51e3-4aff-b24b-4a553160327c" containerID="727aeb5a63faa31a31c03450f9d3d8823575f9ab0abd3ff2f9a00a0f91ec8597" exitCode=0 Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.639413 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cf956626-51e3-4aff-b24b-4a553160327c","Type":"ContainerDied","Data":"727aeb5a63faa31a31c03450f9d3d8823575f9ab0abd3ff2f9a00a0f91ec8597"} Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.667155 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.684409 4933 scope.go:117] "RemoveContainer" containerID="83e14ee02b552e375ddb43f6d79d9fe6adc343bd3efcacc2ece24e2451dc5275" Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.690623 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.817797 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6448b46975-jx7gp" Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.822840 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.918604 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5505bed5-dba3-4067-b94c-acd00b7c37c7-config-data-custom\") pod \"5505bed5-dba3-4067-b94c-acd00b7c37c7\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.918665 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5505bed5-dba3-4067-b94c-acd00b7c37c7-logs\") pod \"5505bed5-dba3-4067-b94c-acd00b7c37c7\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.918709 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rr2vd\" (UniqueName: \"kubernetes.io/projected/5505bed5-dba3-4067-b94c-acd00b7c37c7-kube-api-access-rr2vd\") pod \"5505bed5-dba3-4067-b94c-acd00b7c37c7\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.918754 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5505bed5-dba3-4067-b94c-acd00b7c37c7-combined-ca-bundle\") pod \"5505bed5-dba3-4067-b94c-acd00b7c37c7\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.918775 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5505bed5-dba3-4067-b94c-acd00b7c37c7-config-data\") pod \"5505bed5-dba3-4067-b94c-acd00b7c37c7\" (UID: \"5505bed5-dba3-4067-b94c-acd00b7c37c7\") " Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.920788 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5505bed5-dba3-4067-b94c-acd00b7c37c7-logs" (OuterVolumeSpecName: "logs") pod "5505bed5-dba3-4067-b94c-acd00b7c37c7" (UID: "5505bed5-dba3-4067-b94c-acd00b7c37c7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.924797 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5505bed5-dba3-4067-b94c-acd00b7c37c7-kube-api-access-rr2vd" (OuterVolumeSpecName: "kube-api-access-rr2vd") pod "5505bed5-dba3-4067-b94c-acd00b7c37c7" (UID: "5505bed5-dba3-4067-b94c-acd00b7c37c7"). InnerVolumeSpecName "kube-api-access-rr2vd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.932214 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5505bed5-dba3-4067-b94c-acd00b7c37c7-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5505bed5-dba3-4067-b94c-acd00b7c37c7" (UID: "5505bed5-dba3-4067-b94c-acd00b7c37c7"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.944302 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5505bed5-dba3-4067-b94c-acd00b7c37c7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5505bed5-dba3-4067-b94c-acd00b7c37c7" (UID: "5505bed5-dba3-4067-b94c-acd00b7c37c7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:18 crc kubenswrapper[4933]: I0122 06:09:18.987428 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5505bed5-dba3-4067-b94c-acd00b7c37c7-config-data" (OuterVolumeSpecName: "config-data") pod "5505bed5-dba3-4067-b94c-acd00b7c37c7" (UID: "5505bed5-dba3-4067-b94c-acd00b7c37c7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.020000 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fecb571-89ee-4d10-a1e3-e3755946df2b-combined-ca-bundle\") pod \"0fecb571-89ee-4d10-a1e3-e3755946df2b\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.020123 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fecb571-89ee-4d10-a1e3-e3755946df2b-config-data\") pod \"0fecb571-89ee-4d10-a1e3-e3755946df2b\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.020185 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2dqw\" (UniqueName: \"kubernetes.io/projected/0fecb571-89ee-4d10-a1e3-e3755946df2b-kube-api-access-r2dqw\") pod \"0fecb571-89ee-4d10-a1e3-e3755946df2b\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.020216 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fecb571-89ee-4d10-a1e3-e3755946df2b-logs\") pod \"0fecb571-89ee-4d10-a1e3-e3755946df2b\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.020253 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0fecb571-89ee-4d10-a1e3-e3755946df2b-config-data-custom\") pod \"0fecb571-89ee-4d10-a1e3-e3755946df2b\" (UID: \"0fecb571-89ee-4d10-a1e3-e3755946df2b\") " Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.020605 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5505bed5-dba3-4067-b94c-acd00b7c37c7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.020631 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5505bed5-dba3-4067-b94c-acd00b7c37c7-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.020644 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5505bed5-dba3-4067-b94c-acd00b7c37c7-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.020657 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5505bed5-dba3-4067-b94c-acd00b7c37c7-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.020669 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rr2vd\" (UniqueName: \"kubernetes.io/projected/5505bed5-dba3-4067-b94c-acd00b7c37c7-kube-api-access-rr2vd\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.020758 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0fecb571-89ee-4d10-a1e3-e3755946df2b-logs" (OuterVolumeSpecName: "logs") pod "0fecb571-89ee-4d10-a1e3-e3755946df2b" (UID: "0fecb571-89ee-4d10-a1e3-e3755946df2b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.022688 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fecb571-89ee-4d10-a1e3-e3755946df2b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0fecb571-89ee-4d10-a1e3-e3755946df2b" (UID: "0fecb571-89ee-4d10-a1e3-e3755946df2b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.025022 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0fecb571-89ee-4d10-a1e3-e3755946df2b-kube-api-access-r2dqw" (OuterVolumeSpecName: "kube-api-access-r2dqw") pod "0fecb571-89ee-4d10-a1e3-e3755946df2b" (UID: "0fecb571-89ee-4d10-a1e3-e3755946df2b"). InnerVolumeSpecName "kube-api-access-r2dqw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.040266 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fecb571-89ee-4d10-a1e3-e3755946df2b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0fecb571-89ee-4d10-a1e3-e3755946df2b" (UID: "0fecb571-89ee-4d10-a1e3-e3755946df2b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.041142 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.088861 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fecb571-89ee-4d10-a1e3-e3755946df2b-config-data" (OuterVolumeSpecName: "config-data") pod "0fecb571-89ee-4d10-a1e3-e3755946df2b" (UID: "0fecb571-89ee-4d10-a1e3-e3755946df2b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.122447 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2dqw\" (UniqueName: \"kubernetes.io/projected/0fecb571-89ee-4d10-a1e3-e3755946df2b-kube-api-access-r2dqw\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.122513 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fecb571-89ee-4d10-a1e3-e3755946df2b-logs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.122536 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0fecb571-89ee-4d10-a1e3-e3755946df2b-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.122553 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fecb571-89ee-4d10-a1e3-e3755946df2b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.122567 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fecb571-89ee-4d10-a1e3-e3755946df2b-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.223829 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf956626-51e3-4aff-b24b-4a553160327c-combined-ca-bundle\") pod \"cf956626-51e3-4aff-b24b-4a553160327c\" (UID: \"cf956626-51e3-4aff-b24b-4a553160327c\") " Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.223923 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxg58\" (UniqueName: \"kubernetes.io/projected/cf956626-51e3-4aff-b24b-4a553160327c-kube-api-access-vxg58\") pod \"cf956626-51e3-4aff-b24b-4a553160327c\" (UID: \"cf956626-51e3-4aff-b24b-4a553160327c\") " Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.223971 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf956626-51e3-4aff-b24b-4a553160327c-config-data\") pod \"cf956626-51e3-4aff-b24b-4a553160327c\" (UID: \"cf956626-51e3-4aff-b24b-4a553160327c\") " Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.226629 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf956626-51e3-4aff-b24b-4a553160327c-kube-api-access-vxg58" (OuterVolumeSpecName: "kube-api-access-vxg58") pod "cf956626-51e3-4aff-b24b-4a553160327c" (UID: "cf956626-51e3-4aff-b24b-4a553160327c"). InnerVolumeSpecName "kube-api-access-vxg58". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.242784 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf956626-51e3-4aff-b24b-4a553160327c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cf956626-51e3-4aff-b24b-4a553160327c" (UID: "cf956626-51e3-4aff-b24b-4a553160327c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.247666 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf956626-51e3-4aff-b24b-4a553160327c-config-data" (OuterVolumeSpecName: "config-data") pod "cf956626-51e3-4aff-b24b-4a553160327c" (UID: "cf956626-51e3-4aff-b24b-4a553160327c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.326051 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf956626-51e3-4aff-b24b-4a553160327c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.326766 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vxg58\" (UniqueName: \"kubernetes.io/projected/cf956626-51e3-4aff-b24b-4a553160327c-kube-api-access-vxg58\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.326795 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf956626-51e3-4aff-b24b-4a553160327c-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.649980 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6448b46975-jx7gp" event={"ID":"5505bed5-dba3-4067-b94c-acd00b7c37c7","Type":"ContainerDied","Data":"ee2bb284bfa725b795ef8face4a2108b16ca4a0f464eff6c1baa136a33a15c0a"} Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.650027 4933 scope.go:117] "RemoveContainer" containerID="45526307e9624de914d45bbd929b47ffd667a4044ab9422715456c34fe59622d" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.650160 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6448b46975-jx7gp" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.652489 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"cf956626-51e3-4aff-b24b-4a553160327c","Type":"ContainerDied","Data":"518642500f820671c936da21dd017610349e0eef6ffdec8c64931e903ec802eb"} Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.652556 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.658902 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" event={"ID":"0fecb571-89ee-4d10-a1e3-e3755946df2b","Type":"ContainerDied","Data":"9ab2c674b5e475c40a5e7792bf4e54e39a93136dd4fe22c3d2e0528252cbef03"} Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.658946 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7c6944456-lk7l7" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.687380 4933 scope.go:117] "RemoveContainer" containerID="ae64a47d0a256e71036f25f3770e6938d214f52aa76de1db83e4a3d607be7dbc" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.701385 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.708448 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.717535 4933 scope.go:117] "RemoveContainer" containerID="727aeb5a63faa31a31c03450f9d3d8823575f9ab0abd3ff2f9a00a0f91ec8597" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.720649 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-6448b46975-jx7gp"] Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.725512 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-6448b46975-jx7gp"] Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.731347 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-7c6944456-lk7l7"] Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.735469 4933 scope.go:117] "RemoveContainer" containerID="19349f4fb699d96d982ef68a33cea6a25a5b8d3f3671b4a1b34a92f90876b922" Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.737780 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-7c6944456-lk7l7"] Jan 22 06:09:19 crc kubenswrapper[4933]: I0122 06:09:19.753521 4933 scope.go:117] "RemoveContainer" containerID="11cdb1302043612d4966e8227c66cd55138cd37ef40b24e2659a9776bb49e386" Jan 22 06:09:20 crc kubenswrapper[4933]: I0122 06:09:20.503147 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0fecb571-89ee-4d10-a1e3-e3755946df2b" path="/var/lib/kubelet/pods/0fecb571-89ee-4d10-a1e3-e3755946df2b/volumes" Jan 22 06:09:20 crc kubenswrapper[4933]: I0122 06:09:20.506116 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47299478-bcfd-4f21-a56c-efcf7b167999" path="/var/lib/kubelet/pods/47299478-bcfd-4f21-a56c-efcf7b167999/volumes" Jan 22 06:09:20 crc kubenswrapper[4933]: I0122 06:09:20.508505 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5505bed5-dba3-4067-b94c-acd00b7c37c7" path="/var/lib/kubelet/pods/5505bed5-dba3-4067-b94c-acd00b7c37c7/volumes" Jan 22 06:09:20 crc kubenswrapper[4933]: I0122 06:09:20.509869 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf956626-51e3-4aff-b24b-4a553160327c" path="/var/lib/kubelet/pods/cf956626-51e3-4aff-b24b-4a553160327c/volumes" Jan 22 06:09:23 crc kubenswrapper[4933]: E0122 06:09:23.043222 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 22 06:09:23 crc kubenswrapper[4933]: E0122 06:09:23.044008 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 22 06:09:23 crc kubenswrapper[4933]: E0122 06:09:23.044339 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 22 06:09:23 crc kubenswrapper[4933]: E0122 06:09:23.044424 4933 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-rwb6s" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovsdb-server" Jan 22 06:09:23 crc kubenswrapper[4933]: E0122 06:09:23.045137 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 22 06:09:23 crc kubenswrapper[4933]: E0122 06:09:23.047776 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 22 06:09:23 crc kubenswrapper[4933]: E0122 06:09:23.050474 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 22 06:09:23 crc kubenswrapper[4933]: E0122 06:09:23.050541 4933 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-rwb6s" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovs-vswitchd" Jan 22 06:09:28 crc kubenswrapper[4933]: E0122 06:09:28.043602 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 22 06:09:28 crc kubenswrapper[4933]: E0122 06:09:28.044721 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 22 06:09:28 crc kubenswrapper[4933]: E0122 06:09:28.045008 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 22 06:09:28 crc kubenswrapper[4933]: E0122 06:09:28.045050 4933 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-rwb6s" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovsdb-server" Jan 22 06:09:28 crc kubenswrapper[4933]: E0122 06:09:28.045975 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 22 06:09:28 crc kubenswrapper[4933]: E0122 06:09:28.047185 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 22 06:09:28 crc kubenswrapper[4933]: E0122 06:09:28.048751 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 22 06:09:28 crc kubenswrapper[4933]: E0122 06:09:28.048782 4933 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-rwb6s" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovs-vswitchd" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.749981 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-w8wv6"] Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753018 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fae4840-8fac-4192-8358-cbcae518e70d" containerName="nova-metadata-metadata" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753064 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fae4840-8fac-4192-8358-cbcae518e70d" containerName="nova-metadata-metadata" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753137 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47299478-bcfd-4f21-a56c-efcf7b167999" containerName="setup-container" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753156 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="47299478-bcfd-4f21-a56c-efcf7b167999" containerName="setup-container" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753183 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9445b2f3-83ea-4e79-8312-ceffa2208f77" containerName="ovn-northd" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753200 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9445b2f3-83ea-4e79-8312-ceffa2208f77" containerName="ovn-northd" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753224 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fae4840-8fac-4192-8358-cbcae518e70d" containerName="nova-metadata-log" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753237 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fae4840-8fac-4192-8358-cbcae518e70d" containerName="nova-metadata-log" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753259 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2bcbc4b-30c4-4ec8-81bf-6cba18171506" containerName="glance-httpd" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753271 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2bcbc4b-30c4-4ec8-81bf-6cba18171506" containerName="glance-httpd" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753293 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1270da6b-1c9e-41b6-b628-c2eaef5d9daf" containerName="mysql-bootstrap" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753306 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="1270da6b-1c9e-41b6-b628-c2eaef5d9daf" containerName="mysql-bootstrap" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753328 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47299478-bcfd-4f21-a56c-efcf7b167999" containerName="rabbitmq" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753339 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="47299478-bcfd-4f21-a56c-efcf7b167999" containerName="rabbitmq" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753354 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="603c9f42-93c4-4268-b513-d2309571ac20" containerName="cinder-api" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753366 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="603c9f42-93c4-4268-b513-d2309571ac20" containerName="cinder-api" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753381 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="603c9f42-93c4-4268-b513-d2309571ac20" containerName="cinder-api-log" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753393 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="603c9f42-93c4-4268-b513-d2309571ac20" containerName="cinder-api-log" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753413 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf956626-51e3-4aff-b24b-4a553160327c" containerName="nova-scheduler-scheduler" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753424 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf956626-51e3-4aff-b24b-4a553160327c" containerName="nova-scheduler-scheduler" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753448 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d712958-1ece-47de-9798-6e852b03c565" containerName="rabbitmq" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753463 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d712958-1ece-47de-9798-6e852b03c565" containerName="rabbitmq" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753483 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9" containerName="cinder-scheduler" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753499 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9" containerName="cinder-scheduler" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753527 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194" containerName="placement-log" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753543 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194" containerName="placement-log" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753562 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5505bed5-dba3-4067-b94c-acd00b7c37c7" containerName="barbican-worker" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753575 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="5505bed5-dba3-4067-b94c-acd00b7c37c7" containerName="barbican-worker" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753602 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4886348b-6078-41dc-8fab-a8e2e1c4898d" containerName="kube-state-metrics" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753616 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4886348b-6078-41dc-8fab-a8e2e1c4898d" containerName="kube-state-metrics" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753634 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91864da0-319b-46e9-b4ef-8ccee4c52d37" containerName="barbican-api" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753645 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="91864da0-319b-46e9-b4ef-8ccee4c52d37" containerName="barbican-api" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753693 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerName="ceilometer-notification-agent" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753706 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerName="ceilometer-notification-agent" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753719 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerName="sg-core" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753730 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerName="sg-core" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753755 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2bcbc4b-30c4-4ec8-81bf-6cba18171506" containerName="glance-log" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753768 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2bcbc4b-30c4-4ec8-81bf-6cba18171506" containerName="glance-log" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753791 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d712958-1ece-47de-9798-6e852b03c565" containerName="setup-container" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753802 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d712958-1ece-47de-9798-6e852b03c565" containerName="setup-container" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753818 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4c8b893-2e30-4273-bbec-7ff7efee686e" containerName="glance-httpd" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753829 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4c8b893-2e30-4273-bbec-7ff7efee686e" containerName="glance-httpd" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753859 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4c8b893-2e30-4273-bbec-7ff7efee686e" containerName="glance-log" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753871 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4c8b893-2e30-4273-bbec-7ff7efee686e" containerName="glance-log" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753892 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56625c99-64dc-4742-9927-0210d8fe8d9d" containerName="nova-api-log" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753904 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="56625c99-64dc-4742-9927-0210d8fe8d9d" containerName="nova-api-log" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753919 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5505bed5-dba3-4067-b94c-acd00b7c37c7" containerName="barbican-worker-log" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753930 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="5505bed5-dba3-4067-b94c-acd00b7c37c7" containerName="barbican-worker-log" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753946 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1270da6b-1c9e-41b6-b628-c2eaef5d9daf" containerName="galera" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753958 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="1270da6b-1c9e-41b6-b628-c2eaef5d9daf" containerName="galera" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.753972 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9445b2f3-83ea-4e79-8312-ceffa2208f77" containerName="openstack-network-exporter" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.753986 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9445b2f3-83ea-4e79-8312-ceffa2208f77" containerName="openstack-network-exporter" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.754001 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a218455-793d-4ccf-880a-d89b28e98b2d" containerName="nova-cell1-conductor-conductor" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754013 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a218455-793d-4ccf-880a-d89b28e98b2d" containerName="nova-cell1-conductor-conductor" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.754035 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194" containerName="placement-api" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754098 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194" containerName="placement-api" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.754121 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c16f2ab8-68b3-43fa-a862-c182aaa3dc23" containerName="keystone-api" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754133 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c16f2ab8-68b3-43fa-a862-c182aaa3dc23" containerName="keystone-api" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.754157 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0d0b42d-5a68-46ba-a0de-f26c8dab1af8" containerName="memcached" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754171 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0d0b42d-5a68-46ba-a0de-f26c8dab1af8" containerName="memcached" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.754197 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fecb571-89ee-4d10-a1e3-e3755946df2b" containerName="barbican-keystone-listener" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754213 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fecb571-89ee-4d10-a1e3-e3755946df2b" containerName="barbican-keystone-listener" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.754236 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91864da0-319b-46e9-b4ef-8ccee4c52d37" containerName="barbican-api-log" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754251 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="91864da0-319b-46e9-b4ef-8ccee4c52d37" containerName="barbican-api-log" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.754272 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9" containerName="probe" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754287 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9" containerName="probe" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.754314 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerName="ceilometer-central-agent" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754328 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerName="ceilometer-central-agent" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.754359 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fecb571-89ee-4d10-a1e3-e3755946df2b" containerName="barbican-keystone-listener-log" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754374 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fecb571-89ee-4d10-a1e3-e3755946df2b" containerName="barbican-keystone-listener-log" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.754392 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56625c99-64dc-4742-9927-0210d8fe8d9d" containerName="nova-api-api" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754407 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="56625c99-64dc-4742-9927-0210d8fe8d9d" containerName="nova-api-api" Jan 22 06:09:29 crc kubenswrapper[4933]: E0122 06:09:29.754443 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerName="proxy-httpd" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754470 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerName="proxy-httpd" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754800 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2bcbc4b-30c4-4ec8-81bf-6cba18171506" containerName="glance-httpd" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754838 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="56625c99-64dc-4742-9927-0210d8fe8d9d" containerName="nova-api-api" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754874 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194" containerName="placement-api" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754895 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9" containerName="probe" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754910 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fae4840-8fac-4192-8358-cbcae518e70d" containerName="nova-metadata-metadata" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754923 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="9445b2f3-83ea-4e79-8312-ceffa2208f77" containerName="openstack-network-exporter" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754947 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="603c9f42-93c4-4268-b513-d2309571ac20" containerName="cinder-api-log" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754963 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="47299478-bcfd-4f21-a56c-efcf7b167999" containerName="rabbitmq" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754978 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="5505bed5-dba3-4067-b94c-acd00b7c37c7" containerName="barbican-worker-log" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.754995 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf956626-51e3-4aff-b24b-4a553160327c" containerName="nova-scheduler-scheduler" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755017 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a218455-793d-4ccf-880a-d89b28e98b2d" containerName="nova-cell1-conductor-conductor" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755036 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2bcbc4b-30c4-4ec8-81bf-6cba18171506" containerName="glance-log" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755061 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="603c9f42-93c4-4268-b513-d2309571ac20" containerName="cinder-api" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755115 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerName="proxy-httpd" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755128 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fecb571-89ee-4d10-a1e3-e3755946df2b" containerName="barbican-keystone-listener-log" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755145 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4886348b-6078-41dc-8fab-a8e2e1c4898d" containerName="kube-state-metrics" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755163 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerName="ceilometer-central-agent" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755188 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="1270da6b-1c9e-41b6-b628-c2eaef5d9daf" containerName="galera" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755211 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fecb571-89ee-4d10-a1e3-e3755946df2b" containerName="barbican-keystone-listener" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755230 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="5505bed5-dba3-4067-b94c-acd00b7c37c7" containerName="barbican-worker" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755271 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4c8b893-2e30-4273-bbec-7ff7efee686e" containerName="glance-log" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755291 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="9445b2f3-83ea-4e79-8312-ceffa2208f77" containerName="ovn-northd" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755306 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0d0b42d-5a68-46ba-a0de-f26c8dab1af8" containerName="memcached" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755324 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d712958-1ece-47de-9798-6e852b03c565" containerName="rabbitmq" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755337 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1bcdbaa-0f6d-4bc8-95ec-892db9c8d194" containerName="placement-log" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755352 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerName="sg-core" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755366 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="578144dd-08a3-4c4b-8dd3-38ebe2d4dde8" containerName="ceilometer-notification-agent" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755386 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4c8b893-2e30-4273-bbec-7ff7efee686e" containerName="glance-httpd" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755399 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="91864da0-319b-46e9-b4ef-8ccee4c52d37" containerName="barbican-api-log" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755416 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="91864da0-319b-46e9-b4ef-8ccee4c52d37" containerName="barbican-api" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755433 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="0cbe0b3c-c2f0-4097-a6f4-5ceee78337b9" containerName="cinder-scheduler" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755456 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="56625c99-64dc-4742-9927-0210d8fe8d9d" containerName="nova-api-log" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755475 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="c16f2ab8-68b3-43fa-a862-c182aaa3dc23" containerName="keystone-api" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.755495 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fae4840-8fac-4192-8358-cbcae518e70d" containerName="nova-metadata-log" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.760745 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w8wv6" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.776275 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w8wv6"] Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.799574 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05f0680f-5545-41f5-8036-ae5810a7257e-utilities\") pod \"redhat-operators-w8wv6\" (UID: \"05f0680f-5545-41f5-8036-ae5810a7257e\") " pod="openshift-marketplace/redhat-operators-w8wv6" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.799679 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05f0680f-5545-41f5-8036-ae5810a7257e-catalog-content\") pod \"redhat-operators-w8wv6\" (UID: \"05f0680f-5545-41f5-8036-ae5810a7257e\") " pod="openshift-marketplace/redhat-operators-w8wv6" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.799837 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9kgw\" (UniqueName: \"kubernetes.io/projected/05f0680f-5545-41f5-8036-ae5810a7257e-kube-api-access-w9kgw\") pod \"redhat-operators-w8wv6\" (UID: \"05f0680f-5545-41f5-8036-ae5810a7257e\") " pod="openshift-marketplace/redhat-operators-w8wv6" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.900813 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05f0680f-5545-41f5-8036-ae5810a7257e-utilities\") pod \"redhat-operators-w8wv6\" (UID: \"05f0680f-5545-41f5-8036-ae5810a7257e\") " pod="openshift-marketplace/redhat-operators-w8wv6" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.901180 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05f0680f-5545-41f5-8036-ae5810a7257e-catalog-content\") pod \"redhat-operators-w8wv6\" (UID: \"05f0680f-5545-41f5-8036-ae5810a7257e\") " pod="openshift-marketplace/redhat-operators-w8wv6" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.901257 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9kgw\" (UniqueName: \"kubernetes.io/projected/05f0680f-5545-41f5-8036-ae5810a7257e-kube-api-access-w9kgw\") pod \"redhat-operators-w8wv6\" (UID: \"05f0680f-5545-41f5-8036-ae5810a7257e\") " pod="openshift-marketplace/redhat-operators-w8wv6" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.901565 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05f0680f-5545-41f5-8036-ae5810a7257e-utilities\") pod \"redhat-operators-w8wv6\" (UID: \"05f0680f-5545-41f5-8036-ae5810a7257e\") " pod="openshift-marketplace/redhat-operators-w8wv6" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.901754 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05f0680f-5545-41f5-8036-ae5810a7257e-catalog-content\") pod \"redhat-operators-w8wv6\" (UID: \"05f0680f-5545-41f5-8036-ae5810a7257e\") " pod="openshift-marketplace/redhat-operators-w8wv6" Jan 22 06:09:29 crc kubenswrapper[4933]: I0122 06:09:29.923845 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9kgw\" (UniqueName: \"kubernetes.io/projected/05f0680f-5545-41f5-8036-ae5810a7257e-kube-api-access-w9kgw\") pod \"redhat-operators-w8wv6\" (UID: \"05f0680f-5545-41f5-8036-ae5810a7257e\") " pod="openshift-marketplace/redhat-operators-w8wv6" Jan 22 06:09:30 crc kubenswrapper[4933]: I0122 06:09:30.118047 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w8wv6" Jan 22 06:09:30 crc kubenswrapper[4933]: I0122 06:09:30.558199 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w8wv6"] Jan 22 06:09:30 crc kubenswrapper[4933]: I0122 06:09:30.768940 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w8wv6" event={"ID":"05f0680f-5545-41f5-8036-ae5810a7257e","Type":"ContainerStarted","Data":"72a3e6fee60ff2e88a5cd4bd635789332335898307ab07aed7492a882c11eca1"} Jan 22 06:09:31 crc kubenswrapper[4933]: I0122 06:09:31.780356 4933 generic.go:334] "Generic (PLEG): container finished" podID="05f0680f-5545-41f5-8036-ae5810a7257e" containerID="45b09c98072d0db057b65e8cca6305c7356579481715ebb7b778a47cfcb21af9" exitCode=0 Jan 22 06:09:31 crc kubenswrapper[4933]: I0122 06:09:31.780579 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w8wv6" event={"ID":"05f0680f-5545-41f5-8036-ae5810a7257e","Type":"ContainerDied","Data":"45b09c98072d0db057b65e8cca6305c7356579481715ebb7b778a47cfcb21af9"} Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.707608 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.749874 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6nrz\" (UniqueName: \"kubernetes.io/projected/fe53ac25-75b3-42c3-802f-5359023b26e7-kube-api-access-k6nrz\") pod \"fe53ac25-75b3-42c3-802f-5359023b26e7\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.749949 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-ovndb-tls-certs\") pod \"fe53ac25-75b3-42c3-802f-5359023b26e7\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.749998 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-httpd-config\") pod \"fe53ac25-75b3-42c3-802f-5359023b26e7\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.750040 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-combined-ca-bundle\") pod \"fe53ac25-75b3-42c3-802f-5359023b26e7\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.750107 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-config\") pod \"fe53ac25-75b3-42c3-802f-5359023b26e7\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.750153 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-public-tls-certs\") pod \"fe53ac25-75b3-42c3-802f-5359023b26e7\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.750212 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-internal-tls-certs\") pod \"fe53ac25-75b3-42c3-802f-5359023b26e7\" (UID: \"fe53ac25-75b3-42c3-802f-5359023b26e7\") " Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.758011 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe53ac25-75b3-42c3-802f-5359023b26e7-kube-api-access-k6nrz" (OuterVolumeSpecName: "kube-api-access-k6nrz") pod "fe53ac25-75b3-42c3-802f-5359023b26e7" (UID: "fe53ac25-75b3-42c3-802f-5359023b26e7"). InnerVolumeSpecName "kube-api-access-k6nrz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.758697 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "fe53ac25-75b3-42c3-802f-5359023b26e7" (UID: "fe53ac25-75b3-42c3-802f-5359023b26e7"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.794568 4933 generic.go:334] "Generic (PLEG): container finished" podID="fe53ac25-75b3-42c3-802f-5359023b26e7" containerID="2ae19b8623001ea92970743871f64a042fee6abb1332e83f11c894e81eff91b0" exitCode=0 Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.794685 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5f996bcdbf-kwx6s" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.794656 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f996bcdbf-kwx6s" event={"ID":"fe53ac25-75b3-42c3-802f-5359023b26e7","Type":"ContainerDied","Data":"2ae19b8623001ea92970743871f64a042fee6abb1332e83f11c894e81eff91b0"} Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.794810 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f996bcdbf-kwx6s" event={"ID":"fe53ac25-75b3-42c3-802f-5359023b26e7","Type":"ContainerDied","Data":"92c106dd1cfea34377fcb0355c29ec458885ff5df61981d69adf101c3328fad3"} Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.794843 4933 scope.go:117] "RemoveContainer" containerID="e6d22e9623f38cd68cde6e98c7b0ee2b102f8edb35af1c6667a3464fee551f58" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.799791 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w8wv6" event={"ID":"05f0680f-5545-41f5-8036-ae5810a7257e","Type":"ContainerStarted","Data":"0d91d72ddece9b1175f3c750e18d5efc9f7a4d7ad35119fcf81b46c1483419df"} Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.801434 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "fe53ac25-75b3-42c3-802f-5359023b26e7" (UID: "fe53ac25-75b3-42c3-802f-5359023b26e7"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.811769 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "fe53ac25-75b3-42c3-802f-5359023b26e7" (UID: "fe53ac25-75b3-42c3-802f-5359023b26e7"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.822557 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fe53ac25-75b3-42c3-802f-5359023b26e7" (UID: "fe53ac25-75b3-42c3-802f-5359023b26e7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.826673 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-config" (OuterVolumeSpecName: "config") pod "fe53ac25-75b3-42c3-802f-5359023b26e7" (UID: "fe53ac25-75b3-42c3-802f-5359023b26e7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.827631 4933 scope.go:117] "RemoveContainer" containerID="2ae19b8623001ea92970743871f64a042fee6abb1332e83f11c894e81eff91b0" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.839500 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "fe53ac25-75b3-42c3-802f-5359023b26e7" (UID: "fe53ac25-75b3-42c3-802f-5359023b26e7"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.846802 4933 scope.go:117] "RemoveContainer" containerID="e6d22e9623f38cd68cde6e98c7b0ee2b102f8edb35af1c6667a3464fee551f58" Jan 22 06:09:32 crc kubenswrapper[4933]: E0122 06:09:32.847229 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6d22e9623f38cd68cde6e98c7b0ee2b102f8edb35af1c6667a3464fee551f58\": container with ID starting with e6d22e9623f38cd68cde6e98c7b0ee2b102f8edb35af1c6667a3464fee551f58 not found: ID does not exist" containerID="e6d22e9623f38cd68cde6e98c7b0ee2b102f8edb35af1c6667a3464fee551f58" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.847263 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6d22e9623f38cd68cde6e98c7b0ee2b102f8edb35af1c6667a3464fee551f58"} err="failed to get container status \"e6d22e9623f38cd68cde6e98c7b0ee2b102f8edb35af1c6667a3464fee551f58\": rpc error: code = NotFound desc = could not find container \"e6d22e9623f38cd68cde6e98c7b0ee2b102f8edb35af1c6667a3464fee551f58\": container with ID starting with e6d22e9623f38cd68cde6e98c7b0ee2b102f8edb35af1c6667a3464fee551f58 not found: ID does not exist" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.847282 4933 scope.go:117] "RemoveContainer" containerID="2ae19b8623001ea92970743871f64a042fee6abb1332e83f11c894e81eff91b0" Jan 22 06:09:32 crc kubenswrapper[4933]: E0122 06:09:32.847665 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ae19b8623001ea92970743871f64a042fee6abb1332e83f11c894e81eff91b0\": container with ID starting with 2ae19b8623001ea92970743871f64a042fee6abb1332e83f11c894e81eff91b0 not found: ID does not exist" containerID="2ae19b8623001ea92970743871f64a042fee6abb1332e83f11c894e81eff91b0" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.847693 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ae19b8623001ea92970743871f64a042fee6abb1332e83f11c894e81eff91b0"} err="failed to get container status \"2ae19b8623001ea92970743871f64a042fee6abb1332e83f11c894e81eff91b0\": rpc error: code = NotFound desc = could not find container \"2ae19b8623001ea92970743871f64a042fee6abb1332e83f11c894e81eff91b0\": container with ID starting with 2ae19b8623001ea92970743871f64a042fee6abb1332e83f11c894e81eff91b0 not found: ID does not exist" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.852216 4933 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.852263 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6nrz\" (UniqueName: \"kubernetes.io/projected/fe53ac25-75b3-42c3-802f-5359023b26e7-kube-api-access-k6nrz\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.852275 4933 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.852284 4933 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.852294 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.852302 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:32 crc kubenswrapper[4933]: I0122 06:09:32.852309 4933 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe53ac25-75b3-42c3-802f-5359023b26e7-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:33 crc kubenswrapper[4933]: E0122 06:09:33.042856 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 22 06:09:33 crc kubenswrapper[4933]: E0122 06:09:33.043679 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 22 06:09:33 crc kubenswrapper[4933]: E0122 06:09:33.044136 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 22 06:09:33 crc kubenswrapper[4933]: E0122 06:09:33.044191 4933 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-rwb6s" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovsdb-server" Jan 22 06:09:33 crc kubenswrapper[4933]: E0122 06:09:33.044882 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 22 06:09:33 crc kubenswrapper[4933]: E0122 06:09:33.048460 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 22 06:09:33 crc kubenswrapper[4933]: E0122 06:09:33.050885 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 22 06:09:33 crc kubenswrapper[4933]: E0122 06:09:33.050937 4933 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-rwb6s" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovs-vswitchd" Jan 22 06:09:33 crc kubenswrapper[4933]: I0122 06:09:33.140099 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5f996bcdbf-kwx6s"] Jan 22 06:09:33 crc kubenswrapper[4933]: I0122 06:09:33.151424 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5f996bcdbf-kwx6s"] Jan 22 06:09:33 crc kubenswrapper[4933]: I0122 06:09:33.812341 4933 generic.go:334] "Generic (PLEG): container finished" podID="05f0680f-5545-41f5-8036-ae5810a7257e" containerID="0d91d72ddece9b1175f3c750e18d5efc9f7a4d7ad35119fcf81b46c1483419df" exitCode=0 Jan 22 06:09:33 crc kubenswrapper[4933]: I0122 06:09:33.812418 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w8wv6" event={"ID":"05f0680f-5545-41f5-8036-ae5810a7257e","Type":"ContainerDied","Data":"0d91d72ddece9b1175f3c750e18d5efc9f7a4d7ad35119fcf81b46c1483419df"} Jan 22 06:09:34 crc kubenswrapper[4933]: I0122 06:09:34.502846 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe53ac25-75b3-42c3-802f-5359023b26e7" path="/var/lib/kubelet/pods/fe53ac25-75b3-42c3-802f-5359023b26e7/volumes" Jan 22 06:09:34 crc kubenswrapper[4933]: I0122 06:09:34.827671 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w8wv6" event={"ID":"05f0680f-5545-41f5-8036-ae5810a7257e","Type":"ContainerStarted","Data":"1f4d27edaed17422428e077ac430ed583b5acbdff2bc34cd50d5c4c1ee1004e2"} Jan 22 06:09:34 crc kubenswrapper[4933]: I0122 06:09:34.858219 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-w8wv6" podStartSLOduration=3.408763102 podStartE2EDuration="5.858202067s" podCreationTimestamp="2026-01-22 06:09:29 +0000 UTC" firstStartedPulling="2026-01-22 06:09:31.782028323 +0000 UTC m=+1419.619153716" lastFinishedPulling="2026-01-22 06:09:34.231467318 +0000 UTC m=+1422.068592681" observedRunningTime="2026-01-22 06:09:34.856995707 +0000 UTC m=+1422.694121070" watchObservedRunningTime="2026-01-22 06:09:34.858202067 +0000 UTC m=+1422.695327420" Jan 22 06:09:38 crc kubenswrapper[4933]: E0122 06:09:38.042943 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 22 06:09:38 crc kubenswrapper[4933]: E0122 06:09:38.044036 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 22 06:09:38 crc kubenswrapper[4933]: E0122 06:09:38.044468 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 22 06:09:38 crc kubenswrapper[4933]: E0122 06:09:38.044513 4933 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-rwb6s" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovsdb-server" Jan 22 06:09:38 crc kubenswrapper[4933]: E0122 06:09:38.046191 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 22 06:09:38 crc kubenswrapper[4933]: E0122 06:09:38.053273 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 22 06:09:38 crc kubenswrapper[4933]: E0122 06:09:38.055124 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 22 06:09:38 crc kubenswrapper[4933]: E0122 06:09:38.055230 4933 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-rwb6s" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovs-vswitchd" Jan 22 06:09:39 crc kubenswrapper[4933]: I0122 06:09:39.883247 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-rwb6s_12629e2f-7d6e-417c-a8df-c15b7a3e794e/ovs-vswitchd/0.log" Jan 22 06:09:39 crc kubenswrapper[4933]: I0122 06:09:39.884808 4933 generic.go:334] "Generic (PLEG): container finished" podID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" exitCode=137 Jan 22 06:09:39 crc kubenswrapper[4933]: I0122 06:09:39.884886 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-rwb6s" event={"ID":"12629e2f-7d6e-417c-a8df-c15b7a3e794e","Type":"ContainerDied","Data":"fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90"} Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.118615 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-w8wv6" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.118898 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-w8wv6" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.542711 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-rwb6s_12629e2f-7d6e-417c-a8df-c15b7a3e794e/ovs-vswitchd/0.log" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.543552 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.564167 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/12629e2f-7d6e-417c-a8df-c15b7a3e794e-scripts\") pod \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.564248 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-var-run\") pod \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.564276 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9hf85\" (UniqueName: \"kubernetes.io/projected/12629e2f-7d6e-417c-a8df-c15b7a3e794e-kube-api-access-9hf85\") pod \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.564311 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-var-lib\") pod \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.564375 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-etc-ovs\") pod \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.564366 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-var-run" (OuterVolumeSpecName: "var-run") pod "12629e2f-7d6e-417c-a8df-c15b7a3e794e" (UID: "12629e2f-7d6e-417c-a8df-c15b7a3e794e"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.564398 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-var-log\") pod \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\" (UID: \"12629e2f-7d6e-417c-a8df-c15b7a3e794e\") " Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.564431 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-var-lib" (OuterVolumeSpecName: "var-lib") pod "12629e2f-7d6e-417c-a8df-c15b7a3e794e" (UID: "12629e2f-7d6e-417c-a8df-c15b7a3e794e"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.564772 4933 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-var-run\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.564789 4933 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-var-lib\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.564874 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "12629e2f-7d6e-417c-a8df-c15b7a3e794e" (UID: "12629e2f-7d6e-417c-a8df-c15b7a3e794e"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.564891 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-var-log" (OuterVolumeSpecName: "var-log") pod "12629e2f-7d6e-417c-a8df-c15b7a3e794e" (UID: "12629e2f-7d6e-417c-a8df-c15b7a3e794e"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.565907 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12629e2f-7d6e-417c-a8df-c15b7a3e794e-scripts" (OuterVolumeSpecName: "scripts") pod "12629e2f-7d6e-417c-a8df-c15b7a3e794e" (UID: "12629e2f-7d6e-417c-a8df-c15b7a3e794e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.583388 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12629e2f-7d6e-417c-a8df-c15b7a3e794e-kube-api-access-9hf85" (OuterVolumeSpecName: "kube-api-access-9hf85") pod "12629e2f-7d6e-417c-a8df-c15b7a3e794e" (UID: "12629e2f-7d6e-417c-a8df-c15b7a3e794e"). InnerVolumeSpecName "kube-api-access-9hf85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.665481 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/12629e2f-7d6e-417c-a8df-c15b7a3e794e-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.665812 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9hf85\" (UniqueName: \"kubernetes.io/projected/12629e2f-7d6e-417c-a8df-c15b7a3e794e-kube-api-access-9hf85\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.665823 4933 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-etc-ovs\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.665832 4933 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/12629e2f-7d6e-417c-a8df-c15b7a3e794e-var-log\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.903276 4933 generic.go:334] "Generic (PLEG): container finished" podID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerID="3dbccd349100017de57314d2ef2e4235aa70b98e80d36f8e602e30cd6b29a896" exitCode=137 Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.903334 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerDied","Data":"3dbccd349100017de57314d2ef2e4235aa70b98e80d36f8e602e30cd6b29a896"} Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.904871 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-rwb6s_12629e2f-7d6e-417c-a8df-c15b7a3e794e/ovs-vswitchd/0.log" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.906226 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-rwb6s" event={"ID":"12629e2f-7d6e-417c-a8df-c15b7a3e794e","Type":"ContainerDied","Data":"b5c0882c058c1f2711772d0b5426c672a9bb2da60241edfde7264d83aeb76b5b"} Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.906268 4933 scope.go:117] "RemoveContainer" containerID="fdbc1e77ca1a1d82a896e3c18520e64cbf781bfdafe1433a48e8546525073c90" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.906386 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-rwb6s" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.940093 4933 scope.go:117] "RemoveContainer" containerID="4851820161e4b14a1c2cb1ff78e9150ad954e9f7c40a7097166675aa2031cffe" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.940869 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-rwb6s"] Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.943419 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.943532 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:09:40 crc kubenswrapper[4933]: I0122 06:09:40.948405 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-rwb6s"] Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:40.993729 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:40.994464 4933 scope.go:117] "RemoveContainer" containerID="97dd8c1857fe2cca4317c1796aab3e3c32a4e333c89ca50415ee631196e34bde" Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.071508 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-lock\") pod \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.071573 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift\") pod \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.071607 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-combined-ca-bundle\") pod \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.071633 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.071681 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wdl2r\" (UniqueName: \"kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-kube-api-access-wdl2r\") pod \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.071797 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-cache\") pod \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\" (UID: \"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016\") " Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.072625 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-cache" (OuterVolumeSpecName: "cache") pod "4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" (UID: "4d7c7a06-59b1-4cc5-88dd-87bc9bccd016"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.072640 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-lock" (OuterVolumeSpecName: "lock") pod "4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" (UID: "4d7c7a06-59b1-4cc5-88dd-87bc9bccd016"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.075342 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-kube-api-access-wdl2r" (OuterVolumeSpecName: "kube-api-access-wdl2r") pod "4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" (UID: "4d7c7a06-59b1-4cc5-88dd-87bc9bccd016"). InnerVolumeSpecName "kube-api-access-wdl2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.076211 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" (UID: "4d7c7a06-59b1-4cc5-88dd-87bc9bccd016"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.076239 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "swift") pod "4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" (UID: "4d7c7a06-59b1-4cc5-88dd-87bc9bccd016"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.159741 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-w8wv6" podUID="05f0680f-5545-41f5-8036-ae5810a7257e" containerName="registry-server" probeResult="failure" output=< Jan 22 06:09:41 crc kubenswrapper[4933]: timeout: failed to connect service ":50051" within 1s Jan 22 06:09:41 crc kubenswrapper[4933]: > Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.173967 4933 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-lock\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.173995 4933 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.174025 4933 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.174036 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wdl2r\" (UniqueName: \"kubernetes.io/projected/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-kube-api-access-wdl2r\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.174045 4933 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-cache\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.201229 4933 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.275514 4933 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.311281 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" (UID: "4d7c7a06-59b1-4cc5-88dd-87bc9bccd016"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.376414 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.927401 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4d7c7a06-59b1-4cc5-88dd-87bc9bccd016","Type":"ContainerDied","Data":"9adc157c5fe2f46a84e84df9166b88a0ce7debcfa8230363732ceed13db40bd0"} Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.927525 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.928290 4933 scope.go:117] "RemoveContainer" containerID="3dbccd349100017de57314d2ef2e4235aa70b98e80d36f8e602e30cd6b29a896" Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.976531 4933 scope.go:117] "RemoveContainer" containerID="4bf3bc4884d64bf94b227e5a3f89d2cd681e2010861ba6ad807f97e6ed46fa36" Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.986176 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Jan 22 06:09:41 crc kubenswrapper[4933]: I0122 06:09:41.992031 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Jan 22 06:09:42 crc kubenswrapper[4933]: I0122 06:09:42.000008 4933 scope.go:117] "RemoveContainer" containerID="9a3a457b6ab2d11ee8b59c4ef3cb0fb0706bc20b89418ab13b9ceffb95fea763" Jan 22 06:09:42 crc kubenswrapper[4933]: I0122 06:09:42.018993 4933 scope.go:117] "RemoveContainer" containerID="7cfffd64b9e03c3d5063b865c0c0af9e8e61d754936c8d6e9bc69e678886a8de" Jan 22 06:09:42 crc kubenswrapper[4933]: I0122 06:09:42.036896 4933 scope.go:117] "RemoveContainer" containerID="0a2c7ba35b45c00194109715a53245977cc22628a5deb202f9c6835fd7a8b075" Jan 22 06:09:42 crc kubenswrapper[4933]: I0122 06:09:42.051889 4933 scope.go:117] "RemoveContainer" containerID="8a27921119da49050071af7c42b3954b7dd3fbf2145808d90887c6de819bffec" Jan 22 06:09:42 crc kubenswrapper[4933]: I0122 06:09:42.104717 4933 scope.go:117] "RemoveContainer" containerID="805b814e2cbc13d8230bd687a77c696f506fa359d8f4364fabf274beca8c9fbe" Jan 22 06:09:42 crc kubenswrapper[4933]: I0122 06:09:42.121653 4933 scope.go:117] "RemoveContainer" containerID="02aabe8bc9d6a787100f261aab25ec19fece062ced3a51ee5af7db32e0476c01" Jan 22 06:09:42 crc kubenswrapper[4933]: I0122 06:09:42.137738 4933 scope.go:117] "RemoveContainer" containerID="5c84341f9cb1713a1792b0f79a08b86f98220a86f1ae11140038b774810dc9bf" Jan 22 06:09:42 crc kubenswrapper[4933]: I0122 06:09:42.152908 4933 scope.go:117] "RemoveContainer" containerID="ce89b1febf7814e26dbdcab688f4151d8251ed7ce3d27c8d2405f7735ce3e4ad" Jan 22 06:09:42 crc kubenswrapper[4933]: I0122 06:09:42.168252 4933 scope.go:117] "RemoveContainer" containerID="263bbaf72a78f3a591d84bbd2a5fddf505db79d66e26fc745570da4a483e5714" Jan 22 06:09:42 crc kubenswrapper[4933]: I0122 06:09:42.187170 4933 scope.go:117] "RemoveContainer" containerID="b474cfa5d681b7ffb201c8ace8fb7d3efb77a53712a69fa5646c7089e6d05e5a" Jan 22 06:09:42 crc kubenswrapper[4933]: I0122 06:09:42.203776 4933 scope.go:117] "RemoveContainer" containerID="89e9ea346551a5c5894ae7469a69c2ea0a9fc34a0adf372fc6b1fea201f66654" Jan 22 06:09:42 crc kubenswrapper[4933]: I0122 06:09:42.225161 4933 scope.go:117] "RemoveContainer" containerID="07d6aad661ae1121fa77133d2f0b4b28385e2d29ec41899d592ae1ee48161fdd" Jan 22 06:09:42 crc kubenswrapper[4933]: I0122 06:09:42.252226 4933 scope.go:117] "RemoveContainer" containerID="88bc3429a376b19172757bdf15fd8015c87d29a4672fc50f7cd63426a4a15deb" Jan 22 06:09:42 crc kubenswrapper[4933]: I0122 06:09:42.512414 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" path="/var/lib/kubelet/pods/12629e2f-7d6e-417c-a8df-c15b7a3e794e/volumes" Jan 22 06:09:42 crc kubenswrapper[4933]: I0122 06:09:42.513944 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" path="/var/lib/kubelet/pods/4d7c7a06-59b1-4cc5-88dd-87bc9bccd016/volumes" Jan 22 06:09:50 crc kubenswrapper[4933]: I0122 06:09:50.206246 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-w8wv6" Jan 22 06:09:50 crc kubenswrapper[4933]: I0122 06:09:50.292322 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-w8wv6" Jan 22 06:09:50 crc kubenswrapper[4933]: I0122 06:09:50.456319 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-w8wv6"] Jan 22 06:09:52 crc kubenswrapper[4933]: I0122 06:09:52.054561 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-w8wv6" podUID="05f0680f-5545-41f5-8036-ae5810a7257e" containerName="registry-server" containerID="cri-o://1f4d27edaed17422428e077ac430ed583b5acbdff2bc34cd50d5c4c1ee1004e2" gracePeriod=2 Jan 22 06:09:54 crc kubenswrapper[4933]: I0122 06:09:54.081319 4933 generic.go:334] "Generic (PLEG): container finished" podID="05f0680f-5545-41f5-8036-ae5810a7257e" containerID="1f4d27edaed17422428e077ac430ed583b5acbdff2bc34cd50d5c4c1ee1004e2" exitCode=0 Jan 22 06:09:54 crc kubenswrapper[4933]: I0122 06:09:54.081423 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w8wv6" event={"ID":"05f0680f-5545-41f5-8036-ae5810a7257e","Type":"ContainerDied","Data":"1f4d27edaed17422428e077ac430ed583b5acbdff2bc34cd50d5c4c1ee1004e2"} Jan 22 06:09:54 crc kubenswrapper[4933]: I0122 06:09:54.322803 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w8wv6" Jan 22 06:09:54 crc kubenswrapper[4933]: I0122 06:09:54.488063 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05f0680f-5545-41f5-8036-ae5810a7257e-utilities\") pod \"05f0680f-5545-41f5-8036-ae5810a7257e\" (UID: \"05f0680f-5545-41f5-8036-ae5810a7257e\") " Jan 22 06:09:54 crc kubenswrapper[4933]: I0122 06:09:54.488151 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05f0680f-5545-41f5-8036-ae5810a7257e-catalog-content\") pod \"05f0680f-5545-41f5-8036-ae5810a7257e\" (UID: \"05f0680f-5545-41f5-8036-ae5810a7257e\") " Jan 22 06:09:54 crc kubenswrapper[4933]: I0122 06:09:54.488277 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9kgw\" (UniqueName: \"kubernetes.io/projected/05f0680f-5545-41f5-8036-ae5810a7257e-kube-api-access-w9kgw\") pod \"05f0680f-5545-41f5-8036-ae5810a7257e\" (UID: \"05f0680f-5545-41f5-8036-ae5810a7257e\") " Jan 22 06:09:54 crc kubenswrapper[4933]: I0122 06:09:54.490021 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05f0680f-5545-41f5-8036-ae5810a7257e-utilities" (OuterVolumeSpecName: "utilities") pod "05f0680f-5545-41f5-8036-ae5810a7257e" (UID: "05f0680f-5545-41f5-8036-ae5810a7257e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:54 crc kubenswrapper[4933]: I0122 06:09:54.496369 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05f0680f-5545-41f5-8036-ae5810a7257e-kube-api-access-w9kgw" (OuterVolumeSpecName: "kube-api-access-w9kgw") pod "05f0680f-5545-41f5-8036-ae5810a7257e" (UID: "05f0680f-5545-41f5-8036-ae5810a7257e"). InnerVolumeSpecName "kube-api-access-w9kgw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:09:54 crc kubenswrapper[4933]: I0122 06:09:54.590859 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05f0680f-5545-41f5-8036-ae5810a7257e-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:54 crc kubenswrapper[4933]: I0122 06:09:54.590912 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9kgw\" (UniqueName: \"kubernetes.io/projected/05f0680f-5545-41f5-8036-ae5810a7257e-kube-api-access-w9kgw\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:54 crc kubenswrapper[4933]: I0122 06:09:54.660609 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05f0680f-5545-41f5-8036-ae5810a7257e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "05f0680f-5545-41f5-8036-ae5810a7257e" (UID: "05f0680f-5545-41f5-8036-ae5810a7257e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:09:54 crc kubenswrapper[4933]: I0122 06:09:54.692200 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05f0680f-5545-41f5-8036-ae5810a7257e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:09:55 crc kubenswrapper[4933]: I0122 06:09:55.098493 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w8wv6" event={"ID":"05f0680f-5545-41f5-8036-ae5810a7257e","Type":"ContainerDied","Data":"72a3e6fee60ff2e88a5cd4bd635789332335898307ab07aed7492a882c11eca1"} Jan 22 06:09:55 crc kubenswrapper[4933]: I0122 06:09:55.098589 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w8wv6" Jan 22 06:09:55 crc kubenswrapper[4933]: I0122 06:09:55.098592 4933 scope.go:117] "RemoveContainer" containerID="1f4d27edaed17422428e077ac430ed583b5acbdff2bc34cd50d5c4c1ee1004e2" Jan 22 06:09:55 crc kubenswrapper[4933]: I0122 06:09:55.126253 4933 scope.go:117] "RemoveContainer" containerID="0d91d72ddece9b1175f3c750e18d5efc9f7a4d7ad35119fcf81b46c1483419df" Jan 22 06:09:55 crc kubenswrapper[4933]: I0122 06:09:55.155380 4933 scope.go:117] "RemoveContainer" containerID="45b09c98072d0db057b65e8cca6305c7356579481715ebb7b778a47cfcb21af9" Jan 22 06:09:55 crc kubenswrapper[4933]: I0122 06:09:55.233101 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-w8wv6"] Jan 22 06:09:55 crc kubenswrapper[4933]: I0122 06:09:55.241477 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-w8wv6"] Jan 22 06:09:56 crc kubenswrapper[4933]: I0122 06:09:56.508687 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05f0680f-5545-41f5-8036-ae5810a7257e" path="/var/lib/kubelet/pods/05f0680f-5545-41f5-8036-ae5810a7257e/volumes" Jan 22 06:10:10 crc kubenswrapper[4933]: I0122 06:10:10.943404 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:10:10 crc kubenswrapper[4933]: I0122 06:10:10.944118 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:10:40 crc kubenswrapper[4933]: I0122 06:10:40.943494 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:10:40 crc kubenswrapper[4933]: I0122 06:10:40.944189 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:10:40 crc kubenswrapper[4933]: I0122 06:10:40.944259 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 06:10:40 crc kubenswrapper[4933]: I0122 06:10:40.945213 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"be33ec37115cc1139fae624e9dd3a341abdaf1a2979fabd7ece8f9fddf21ac63"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:10:40 crc kubenswrapper[4933]: I0122 06:10:40.945313 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://be33ec37115cc1139fae624e9dd3a341abdaf1a2979fabd7ece8f9fddf21ac63" gracePeriod=600 Jan 22 06:10:41 crc kubenswrapper[4933]: I0122 06:10:41.578613 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="be33ec37115cc1139fae624e9dd3a341abdaf1a2979fabd7ece8f9fddf21ac63" exitCode=0 Jan 22 06:10:41 crc kubenswrapper[4933]: I0122 06:10:41.578681 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"be33ec37115cc1139fae624e9dd3a341abdaf1a2979fabd7ece8f9fddf21ac63"} Jan 22 06:10:41 crc kubenswrapper[4933]: I0122 06:10:41.578996 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946"} Jan 22 06:10:41 crc kubenswrapper[4933]: I0122 06:10:41.579027 4933 scope.go:117] "RemoveContainer" containerID="55d6c6293cbc3ae4b2571461dfbc5b504ef2bc855f8799fa252e05302735e076" Jan 22 06:11:00 crc kubenswrapper[4933]: I0122 06:11:00.559699 4933 scope.go:117] "RemoveContainer" containerID="753b48659cd3b925e9189ae0fd8ddc7e983a3dbd19ced7ee6277a2f14332223f" Jan 22 06:11:00 crc kubenswrapper[4933]: I0122 06:11:00.597226 4933 scope.go:117] "RemoveContainer" containerID="cf6f2f928d77621dd900f7415a2f45d2131a39b6c456968dd6db4e3f7965e128" Jan 22 06:11:00 crc kubenswrapper[4933]: I0122 06:11:00.623153 4933 scope.go:117] "RemoveContainer" containerID="a1097fa194d6c885505160faaaebf96f845cac25e2ae2b3290a4730eb217e9df" Jan 22 06:11:00 crc kubenswrapper[4933]: I0122 06:11:00.643360 4933 scope.go:117] "RemoveContainer" containerID="54f1e8e12f4686cbd8996c3d96c7684dac1c79b18f44175f59423edaa54dd578" Jan 22 06:11:00 crc kubenswrapper[4933]: I0122 06:11:00.683611 4933 scope.go:117] "RemoveContainer" containerID="7cd00712865e1c6012d1cfde55317eec773f77c6b865e64da93f5386176e7ff3" Jan 22 06:11:00 crc kubenswrapper[4933]: I0122 06:11:00.715023 4933 scope.go:117] "RemoveContainer" containerID="2f98e446bcaa25591ece8baecf26888aeebdd5837b713037e026429681829807" Jan 22 06:11:00 crc kubenswrapper[4933]: I0122 06:11:00.738216 4933 scope.go:117] "RemoveContainer" containerID="421e0d822af799084d1641f728bef335a9738d0d9d662fea4443a2ec830d3aa6" Jan 22 06:11:00 crc kubenswrapper[4933]: I0122 06:11:00.768141 4933 scope.go:117] "RemoveContainer" containerID="03c7665e47c0f6f18ba8efbeca0e9ebdf6cae6b198b21b1db681e429a4a20130" Jan 22 06:11:00 crc kubenswrapper[4933]: I0122 06:11:00.787675 4933 scope.go:117] "RemoveContainer" containerID="5c0bab95c149f1ef6f612891529993e753c76f7bc83546ad768f27bd646dcc8f" Jan 22 06:11:00 crc kubenswrapper[4933]: I0122 06:11:00.810054 4933 scope.go:117] "RemoveContainer" containerID="44f70ca56a2c6d50df7e87607b5f575a8b422a9efb31712590aa445609e19ded" Jan 22 06:11:00 crc kubenswrapper[4933]: I0122 06:11:00.831638 4933 scope.go:117] "RemoveContainer" containerID="8330b5239f77c8b2736000ec9bf3d8a736e59fc675f587da19ce26937bdd8640" Jan 22 06:11:00 crc kubenswrapper[4933]: I0122 06:11:00.849098 4933 scope.go:117] "RemoveContainer" containerID="6183e68a58c9738473d34db92429a47aa5b4507c279bf75ece9abeceb453ca17" Jan 22 06:11:00 crc kubenswrapper[4933]: I0122 06:11:00.864562 4933 scope.go:117] "RemoveContainer" containerID="3409b6d93b9700bfe32856ab9cee3f6d9bd53400322f262fa48eebadc6de3a23" Jan 22 06:11:00 crc kubenswrapper[4933]: I0122 06:11:00.891642 4933 scope.go:117] "RemoveContainer" containerID="6fd114928698fbfda1eb0719a5519bfbb9c115b02e0b1ebbaeb3ba4ac6e8e3b4" Jan 22 06:11:00 crc kubenswrapper[4933]: I0122 06:11:00.908687 4933 scope.go:117] "RemoveContainer" containerID="beb6906e88fabf98fbefd441f25673bf4d870ba69a3d6a353004e55d74d7321f" Jan 22 06:11:00 crc kubenswrapper[4933]: I0122 06:11:00.939447 4933 scope.go:117] "RemoveContainer" containerID="705920cc0848428f66fda9619ac71f2c3ab6f952b4da897d8d49e095b329f07e" Jan 22 06:11:00 crc kubenswrapper[4933]: I0122 06:11:00.958394 4933 scope.go:117] "RemoveContainer" containerID="91d245d4ed97af7cc2c3d8b609c00d70bb738ba8e18e4de79db1d408fd33d5f3" Jan 22 06:11:00 crc kubenswrapper[4933]: I0122 06:11:00.975275 4933 scope.go:117] "RemoveContainer" containerID="182b292aa4c399c51ea47eaf2870bdc3ee5bb3f9a6451c535d36593c849e03d6" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.736024 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-49hfj"] Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.736933 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05f0680f-5545-41f5-8036-ae5810a7257e" containerName="extract-utilities" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.736947 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="05f0680f-5545-41f5-8036-ae5810a7257e" containerName="extract-utilities" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.736965 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-replicator" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.736974 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-replicator" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.736988 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="container-auditor" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.736995 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="container-auditor" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737006 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-server" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737014 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-server" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737030 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="container-server" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737039 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="container-server" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737050 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-expirer" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737057 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-expirer" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737067 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="container-updater" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737090 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="container-updater" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737107 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-updater" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737114 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-updater" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737136 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05f0680f-5545-41f5-8036-ae5810a7257e" containerName="registry-server" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737144 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="05f0680f-5545-41f5-8036-ae5810a7257e" containerName="registry-server" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737159 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe53ac25-75b3-42c3-802f-5359023b26e7" containerName="neutron-httpd" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737166 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe53ac25-75b3-42c3-802f-5359023b26e7" containerName="neutron-httpd" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737185 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05f0680f-5545-41f5-8036-ae5810a7257e" containerName="extract-content" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737194 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="05f0680f-5545-41f5-8036-ae5810a7257e" containerName="extract-content" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737207 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="account-server" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737215 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="account-server" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737225 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="account-reaper" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737232 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="account-reaper" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737245 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovsdb-server-init" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737252 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovsdb-server-init" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737265 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="account-auditor" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737272 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="account-auditor" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737282 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="swift-recon-cron" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737290 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="swift-recon-cron" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737302 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="account-replicator" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737309 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="account-replicator" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737322 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-auditor" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737329 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-auditor" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737343 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="rsync" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737350 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="rsync" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737359 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="container-replicator" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737366 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="container-replicator" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737379 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovsdb-server" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737387 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovsdb-server" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737396 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe53ac25-75b3-42c3-802f-5359023b26e7" containerName="neutron-api" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737404 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe53ac25-75b3-42c3-802f-5359023b26e7" containerName="neutron-api" Jan 22 06:11:17 crc kubenswrapper[4933]: E0122 06:11:17.737417 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovs-vswitchd" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737426 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovs-vswitchd" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737585 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="account-server" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737601 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-server" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737614 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe53ac25-75b3-42c3-802f-5359023b26e7" containerName="neutron-httpd" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737628 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="container-updater" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737642 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovs-vswitchd" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737658 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-replicator" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737671 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="rsync" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737680 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="12629e2f-7d6e-417c-a8df-c15b7a3e794e" containerName="ovsdb-server" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737689 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-auditor" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737702 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="account-replicator" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737712 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="container-server" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737723 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="account-reaper" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737735 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe53ac25-75b3-42c3-802f-5359023b26e7" containerName="neutron-api" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737743 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="swift-recon-cron" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737754 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="container-replicator" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737765 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="container-auditor" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737774 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="05f0680f-5545-41f5-8036-ae5810a7257e" containerName="registry-server" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737785 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-expirer" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737795 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="object-updater" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.737805 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d7c7a06-59b1-4cc5-88dd-87bc9bccd016" containerName="account-auditor" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.739096 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-49hfj" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.749340 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-49hfj"] Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.885748 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0ff6c3e-d0c4-412e-9534-b708ec85f4d5-utilities\") pod \"community-operators-49hfj\" (UID: \"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5\") " pod="openshift-marketplace/community-operators-49hfj" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.885803 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnw46\" (UniqueName: \"kubernetes.io/projected/c0ff6c3e-d0c4-412e-9534-b708ec85f4d5-kube-api-access-cnw46\") pod \"community-operators-49hfj\" (UID: \"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5\") " pod="openshift-marketplace/community-operators-49hfj" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.885847 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0ff6c3e-d0c4-412e-9534-b708ec85f4d5-catalog-content\") pod \"community-operators-49hfj\" (UID: \"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5\") " pod="openshift-marketplace/community-operators-49hfj" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.986713 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0ff6c3e-d0c4-412e-9534-b708ec85f4d5-utilities\") pod \"community-operators-49hfj\" (UID: \"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5\") " pod="openshift-marketplace/community-operators-49hfj" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.986782 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnw46\" (UniqueName: \"kubernetes.io/projected/c0ff6c3e-d0c4-412e-9534-b708ec85f4d5-kube-api-access-cnw46\") pod \"community-operators-49hfj\" (UID: \"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5\") " pod="openshift-marketplace/community-operators-49hfj" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.986810 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0ff6c3e-d0c4-412e-9534-b708ec85f4d5-catalog-content\") pod \"community-operators-49hfj\" (UID: \"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5\") " pod="openshift-marketplace/community-operators-49hfj" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.987321 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0ff6c3e-d0c4-412e-9534-b708ec85f4d5-utilities\") pod \"community-operators-49hfj\" (UID: \"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5\") " pod="openshift-marketplace/community-operators-49hfj" Jan 22 06:11:17 crc kubenswrapper[4933]: I0122 06:11:17.987345 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0ff6c3e-d0c4-412e-9534-b708ec85f4d5-catalog-content\") pod \"community-operators-49hfj\" (UID: \"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5\") " pod="openshift-marketplace/community-operators-49hfj" Jan 22 06:11:18 crc kubenswrapper[4933]: I0122 06:11:18.008381 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnw46\" (UniqueName: \"kubernetes.io/projected/c0ff6c3e-d0c4-412e-9534-b708ec85f4d5-kube-api-access-cnw46\") pod \"community-operators-49hfj\" (UID: \"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5\") " pod="openshift-marketplace/community-operators-49hfj" Jan 22 06:11:18 crc kubenswrapper[4933]: I0122 06:11:18.068896 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-49hfj" Jan 22 06:11:18 crc kubenswrapper[4933]: I0122 06:11:18.330974 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-49hfj"] Jan 22 06:11:19 crc kubenswrapper[4933]: I0122 06:11:19.011166 4933 generic.go:334] "Generic (PLEG): container finished" podID="c0ff6c3e-d0c4-412e-9534-b708ec85f4d5" containerID="3499d43249f85d67c33e66d4c9964bd4c624eebf96894d03b252445852b56a78" exitCode=0 Jan 22 06:11:19 crc kubenswrapper[4933]: I0122 06:11:19.011315 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-49hfj" event={"ID":"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5","Type":"ContainerDied","Data":"3499d43249f85d67c33e66d4c9964bd4c624eebf96894d03b252445852b56a78"} Jan 22 06:11:19 crc kubenswrapper[4933]: I0122 06:11:19.011607 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-49hfj" event={"ID":"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5","Type":"ContainerStarted","Data":"0ea7b483104487f70a4c38fa917f07b376d2f8dd87806b67f80e7bac4beaf87a"} Jan 22 06:11:21 crc kubenswrapper[4933]: I0122 06:11:21.036230 4933 generic.go:334] "Generic (PLEG): container finished" podID="c0ff6c3e-d0c4-412e-9534-b708ec85f4d5" containerID="9d3f672694c1acb7256013de0cfb331cc0b2b83019ec1d63efc7f5ab6adb8048" exitCode=0 Jan 22 06:11:21 crc kubenswrapper[4933]: I0122 06:11:21.036560 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-49hfj" event={"ID":"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5","Type":"ContainerDied","Data":"9d3f672694c1acb7256013de0cfb331cc0b2b83019ec1d63efc7f5ab6adb8048"} Jan 22 06:11:22 crc kubenswrapper[4933]: I0122 06:11:22.050257 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-49hfj" event={"ID":"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5","Type":"ContainerStarted","Data":"8845a08936f278de8fdad0cfb5ccddb5227b9c8df82b45b30f4c4bbcee568363"} Jan 22 06:11:22 crc kubenswrapper[4933]: I0122 06:11:22.074325 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-49hfj" podStartSLOduration=2.588725733 podStartE2EDuration="5.074295268s" podCreationTimestamp="2026-01-22 06:11:17 +0000 UTC" firstStartedPulling="2026-01-22 06:11:19.017582833 +0000 UTC m=+1526.854708196" lastFinishedPulling="2026-01-22 06:11:21.503152368 +0000 UTC m=+1529.340277731" observedRunningTime="2026-01-22 06:11:22.066941859 +0000 UTC m=+1529.904067252" watchObservedRunningTime="2026-01-22 06:11:22.074295268 +0000 UTC m=+1529.911420661" Jan 22 06:11:28 crc kubenswrapper[4933]: I0122 06:11:28.069811 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-49hfj" Jan 22 06:11:28 crc kubenswrapper[4933]: I0122 06:11:28.070733 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-49hfj" Jan 22 06:11:28 crc kubenswrapper[4933]: I0122 06:11:28.132682 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-49hfj" Jan 22 06:11:28 crc kubenswrapper[4933]: I0122 06:11:28.193524 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-49hfj" Jan 22 06:11:28 crc kubenswrapper[4933]: I0122 06:11:28.376456 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-49hfj"] Jan 22 06:11:30 crc kubenswrapper[4933]: I0122 06:11:30.132854 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-49hfj" podUID="c0ff6c3e-d0c4-412e-9534-b708ec85f4d5" containerName="registry-server" containerID="cri-o://8845a08936f278de8fdad0cfb5ccddb5227b9c8df82b45b30f4c4bbcee568363" gracePeriod=2 Jan 22 06:11:30 crc kubenswrapper[4933]: I0122 06:11:30.561326 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-49hfj" Jan 22 06:11:30 crc kubenswrapper[4933]: I0122 06:11:30.619353 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0ff6c3e-d0c4-412e-9534-b708ec85f4d5-utilities\") pod \"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5\" (UID: \"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5\") " Jan 22 06:11:30 crc kubenswrapper[4933]: I0122 06:11:30.619528 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0ff6c3e-d0c4-412e-9534-b708ec85f4d5-catalog-content\") pod \"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5\" (UID: \"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5\") " Jan 22 06:11:30 crc kubenswrapper[4933]: I0122 06:11:30.619609 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cnw46\" (UniqueName: \"kubernetes.io/projected/c0ff6c3e-d0c4-412e-9534-b708ec85f4d5-kube-api-access-cnw46\") pod \"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5\" (UID: \"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5\") " Jan 22 06:11:30 crc kubenswrapper[4933]: I0122 06:11:30.620282 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0ff6c3e-d0c4-412e-9534-b708ec85f4d5-utilities" (OuterVolumeSpecName: "utilities") pod "c0ff6c3e-d0c4-412e-9534-b708ec85f4d5" (UID: "c0ff6c3e-d0c4-412e-9534-b708ec85f4d5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:11:30 crc kubenswrapper[4933]: I0122 06:11:30.625226 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0ff6c3e-d0c4-412e-9534-b708ec85f4d5-kube-api-access-cnw46" (OuterVolumeSpecName: "kube-api-access-cnw46") pod "c0ff6c3e-d0c4-412e-9534-b708ec85f4d5" (UID: "c0ff6c3e-d0c4-412e-9534-b708ec85f4d5"). InnerVolumeSpecName "kube-api-access-cnw46". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:11:30 crc kubenswrapper[4933]: I0122 06:11:30.672884 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0ff6c3e-d0c4-412e-9534-b708ec85f4d5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c0ff6c3e-d0c4-412e-9534-b708ec85f4d5" (UID: "c0ff6c3e-d0c4-412e-9534-b708ec85f4d5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:11:30 crc kubenswrapper[4933]: I0122 06:11:30.721481 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0ff6c3e-d0c4-412e-9534-b708ec85f4d5-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:11:30 crc kubenswrapper[4933]: I0122 06:11:30.721514 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0ff6c3e-d0c4-412e-9534-b708ec85f4d5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:11:30 crc kubenswrapper[4933]: I0122 06:11:30.721525 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cnw46\" (UniqueName: \"kubernetes.io/projected/c0ff6c3e-d0c4-412e-9534-b708ec85f4d5-kube-api-access-cnw46\") on node \"crc\" DevicePath \"\"" Jan 22 06:11:31 crc kubenswrapper[4933]: I0122 06:11:31.144619 4933 generic.go:334] "Generic (PLEG): container finished" podID="c0ff6c3e-d0c4-412e-9534-b708ec85f4d5" containerID="8845a08936f278de8fdad0cfb5ccddb5227b9c8df82b45b30f4c4bbcee568363" exitCode=0 Jan 22 06:11:31 crc kubenswrapper[4933]: I0122 06:11:31.144709 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-49hfj" Jan 22 06:11:31 crc kubenswrapper[4933]: I0122 06:11:31.144739 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-49hfj" event={"ID":"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5","Type":"ContainerDied","Data":"8845a08936f278de8fdad0cfb5ccddb5227b9c8df82b45b30f4c4bbcee568363"} Jan 22 06:11:31 crc kubenswrapper[4933]: I0122 06:11:31.146128 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-49hfj" event={"ID":"c0ff6c3e-d0c4-412e-9534-b708ec85f4d5","Type":"ContainerDied","Data":"0ea7b483104487f70a4c38fa917f07b376d2f8dd87806b67f80e7bac4beaf87a"} Jan 22 06:11:31 crc kubenswrapper[4933]: I0122 06:11:31.146168 4933 scope.go:117] "RemoveContainer" containerID="8845a08936f278de8fdad0cfb5ccddb5227b9c8df82b45b30f4c4bbcee568363" Jan 22 06:11:31 crc kubenswrapper[4933]: I0122 06:11:31.173485 4933 scope.go:117] "RemoveContainer" containerID="9d3f672694c1acb7256013de0cfb331cc0b2b83019ec1d63efc7f5ab6adb8048" Jan 22 06:11:31 crc kubenswrapper[4933]: I0122 06:11:31.176466 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-49hfj"] Jan 22 06:11:31 crc kubenswrapper[4933]: I0122 06:11:31.204438 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-49hfj"] Jan 22 06:11:31 crc kubenswrapper[4933]: I0122 06:11:31.205444 4933 scope.go:117] "RemoveContainer" containerID="3499d43249f85d67c33e66d4c9964bd4c624eebf96894d03b252445852b56a78" Jan 22 06:11:31 crc kubenswrapper[4933]: I0122 06:11:31.234092 4933 scope.go:117] "RemoveContainer" containerID="8845a08936f278de8fdad0cfb5ccddb5227b9c8df82b45b30f4c4bbcee568363" Jan 22 06:11:31 crc kubenswrapper[4933]: E0122 06:11:31.234583 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8845a08936f278de8fdad0cfb5ccddb5227b9c8df82b45b30f4c4bbcee568363\": container with ID starting with 8845a08936f278de8fdad0cfb5ccddb5227b9c8df82b45b30f4c4bbcee568363 not found: ID does not exist" containerID="8845a08936f278de8fdad0cfb5ccddb5227b9c8df82b45b30f4c4bbcee568363" Jan 22 06:11:31 crc kubenswrapper[4933]: I0122 06:11:31.234620 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8845a08936f278de8fdad0cfb5ccddb5227b9c8df82b45b30f4c4bbcee568363"} err="failed to get container status \"8845a08936f278de8fdad0cfb5ccddb5227b9c8df82b45b30f4c4bbcee568363\": rpc error: code = NotFound desc = could not find container \"8845a08936f278de8fdad0cfb5ccddb5227b9c8df82b45b30f4c4bbcee568363\": container with ID starting with 8845a08936f278de8fdad0cfb5ccddb5227b9c8df82b45b30f4c4bbcee568363 not found: ID does not exist" Jan 22 06:11:31 crc kubenswrapper[4933]: I0122 06:11:31.234641 4933 scope.go:117] "RemoveContainer" containerID="9d3f672694c1acb7256013de0cfb331cc0b2b83019ec1d63efc7f5ab6adb8048" Jan 22 06:11:31 crc kubenswrapper[4933]: E0122 06:11:31.234924 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d3f672694c1acb7256013de0cfb331cc0b2b83019ec1d63efc7f5ab6adb8048\": container with ID starting with 9d3f672694c1acb7256013de0cfb331cc0b2b83019ec1d63efc7f5ab6adb8048 not found: ID does not exist" containerID="9d3f672694c1acb7256013de0cfb331cc0b2b83019ec1d63efc7f5ab6adb8048" Jan 22 06:11:31 crc kubenswrapper[4933]: I0122 06:11:31.234944 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d3f672694c1acb7256013de0cfb331cc0b2b83019ec1d63efc7f5ab6adb8048"} err="failed to get container status \"9d3f672694c1acb7256013de0cfb331cc0b2b83019ec1d63efc7f5ab6adb8048\": rpc error: code = NotFound desc = could not find container \"9d3f672694c1acb7256013de0cfb331cc0b2b83019ec1d63efc7f5ab6adb8048\": container with ID starting with 9d3f672694c1acb7256013de0cfb331cc0b2b83019ec1d63efc7f5ab6adb8048 not found: ID does not exist" Jan 22 06:11:31 crc kubenswrapper[4933]: I0122 06:11:31.234958 4933 scope.go:117] "RemoveContainer" containerID="3499d43249f85d67c33e66d4c9964bd4c624eebf96894d03b252445852b56a78" Jan 22 06:11:31 crc kubenswrapper[4933]: E0122 06:11:31.235282 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3499d43249f85d67c33e66d4c9964bd4c624eebf96894d03b252445852b56a78\": container with ID starting with 3499d43249f85d67c33e66d4c9964bd4c624eebf96894d03b252445852b56a78 not found: ID does not exist" containerID="3499d43249f85d67c33e66d4c9964bd4c624eebf96894d03b252445852b56a78" Jan 22 06:11:31 crc kubenswrapper[4933]: I0122 06:11:31.235370 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3499d43249f85d67c33e66d4c9964bd4c624eebf96894d03b252445852b56a78"} err="failed to get container status \"3499d43249f85d67c33e66d4c9964bd4c624eebf96894d03b252445852b56a78\": rpc error: code = NotFound desc = could not find container \"3499d43249f85d67c33e66d4c9964bd4c624eebf96894d03b252445852b56a78\": container with ID starting with 3499d43249f85d67c33e66d4c9964bd4c624eebf96894d03b252445852b56a78 not found: ID does not exist" Jan 22 06:11:32 crc kubenswrapper[4933]: I0122 06:11:32.506500 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0ff6c3e-d0c4-412e-9534-b708ec85f4d5" path="/var/lib/kubelet/pods/c0ff6c3e-d0c4-412e-9534-b708ec85f4d5/volumes" Jan 22 06:12:01 crc kubenswrapper[4933]: I0122 06:12:01.207178 4933 scope.go:117] "RemoveContainer" containerID="f2721000af5a120f7871e7c19c9dc7ce44e29a702f48b5d2b4de79c19cb595fd" Jan 22 06:12:01 crc kubenswrapper[4933]: I0122 06:12:01.272673 4933 scope.go:117] "RemoveContainer" containerID="a4f3ce4e811b73a4d05b28e37fa88419dff7ac4f35d8a912a0bf8ee52f0f3503" Jan 22 06:12:01 crc kubenswrapper[4933]: I0122 06:12:01.320791 4933 scope.go:117] "RemoveContainer" containerID="3917ee58ab8a64812cc4ae1ea20673598a33f8056af415a03f81cd87c862ee77" Jan 22 06:12:01 crc kubenswrapper[4933]: I0122 06:12:01.368852 4933 scope.go:117] "RemoveContainer" containerID="d8f447e83a7dace27b0cfb5535961cb963b243a669c28f5456502fec405aa42c" Jan 22 06:12:01 crc kubenswrapper[4933]: I0122 06:12:01.392876 4933 scope.go:117] "RemoveContainer" containerID="8fd13567d632d5099d30b19499d649a020aef880efa93a39cddc58f7ca281d15" Jan 22 06:12:01 crc kubenswrapper[4933]: I0122 06:12:01.420876 4933 scope.go:117] "RemoveContainer" containerID="2925016a4526e711c7a13ca9b27ae95ab14072a5756f21964d4b6cb4c8838060" Jan 22 06:12:01 crc kubenswrapper[4933]: I0122 06:12:01.458214 4933 scope.go:117] "RemoveContainer" containerID="6407112093f3e5136e3386e9fa21f2e430a1ab957e3150228ee38dbdebba2e14" Jan 22 06:12:01 crc kubenswrapper[4933]: I0122 06:12:01.494837 4933 scope.go:117] "RemoveContainer" containerID="25256a7adb222867f28f0101189b07c7deb83968a46968e6cf7492f82b35018b" Jan 22 06:12:01 crc kubenswrapper[4933]: I0122 06:12:01.531368 4933 scope.go:117] "RemoveContainer" containerID="d01bdab4b1711810909a920cee5293034b71009acdfd620b98e33f77e1c8919b" Jan 22 06:12:01 crc kubenswrapper[4933]: I0122 06:12:01.549019 4933 scope.go:117] "RemoveContainer" containerID="e0226956031a6164e10e35e1b1744abeea73d2f85ab0cdab0974600cd35c965b" Jan 22 06:12:03 crc kubenswrapper[4933]: I0122 06:12:03.147041 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2sbvv"] Jan 22 06:12:03 crc kubenswrapper[4933]: E0122 06:12:03.149280 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0ff6c3e-d0c4-412e-9534-b708ec85f4d5" containerName="extract-content" Jan 22 06:12:03 crc kubenswrapper[4933]: I0122 06:12:03.149457 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0ff6c3e-d0c4-412e-9534-b708ec85f4d5" containerName="extract-content" Jan 22 06:12:03 crc kubenswrapper[4933]: E0122 06:12:03.149587 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0ff6c3e-d0c4-412e-9534-b708ec85f4d5" containerName="registry-server" Jan 22 06:12:03 crc kubenswrapper[4933]: I0122 06:12:03.149703 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0ff6c3e-d0c4-412e-9534-b708ec85f4d5" containerName="registry-server" Jan 22 06:12:03 crc kubenswrapper[4933]: E0122 06:12:03.149853 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0ff6c3e-d0c4-412e-9534-b708ec85f4d5" containerName="extract-utilities" Jan 22 06:12:03 crc kubenswrapper[4933]: I0122 06:12:03.149989 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0ff6c3e-d0c4-412e-9534-b708ec85f4d5" containerName="extract-utilities" Jan 22 06:12:03 crc kubenswrapper[4933]: I0122 06:12:03.150374 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0ff6c3e-d0c4-412e-9534-b708ec85f4d5" containerName="registry-server" Jan 22 06:12:03 crc kubenswrapper[4933]: I0122 06:12:03.152241 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2sbvv" Jan 22 06:12:03 crc kubenswrapper[4933]: I0122 06:12:03.158319 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2sbvv"] Jan 22 06:12:03 crc kubenswrapper[4933]: I0122 06:12:03.250372 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vjxck\" (UniqueName: \"kubernetes.io/projected/0cc8a0e1-ed32-4138-9d50-5a07beb3cf36-kube-api-access-vjxck\") pod \"certified-operators-2sbvv\" (UID: \"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36\") " pod="openshift-marketplace/certified-operators-2sbvv" Jan 22 06:12:03 crc kubenswrapper[4933]: I0122 06:12:03.250698 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cc8a0e1-ed32-4138-9d50-5a07beb3cf36-utilities\") pod \"certified-operators-2sbvv\" (UID: \"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36\") " pod="openshift-marketplace/certified-operators-2sbvv" Jan 22 06:12:03 crc kubenswrapper[4933]: I0122 06:12:03.250893 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cc8a0e1-ed32-4138-9d50-5a07beb3cf36-catalog-content\") pod \"certified-operators-2sbvv\" (UID: \"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36\") " pod="openshift-marketplace/certified-operators-2sbvv" Jan 22 06:12:03 crc kubenswrapper[4933]: I0122 06:12:03.352726 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vjxck\" (UniqueName: \"kubernetes.io/projected/0cc8a0e1-ed32-4138-9d50-5a07beb3cf36-kube-api-access-vjxck\") pod \"certified-operators-2sbvv\" (UID: \"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36\") " pod="openshift-marketplace/certified-operators-2sbvv" Jan 22 06:12:03 crc kubenswrapper[4933]: I0122 06:12:03.353580 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cc8a0e1-ed32-4138-9d50-5a07beb3cf36-utilities\") pod \"certified-operators-2sbvv\" (UID: \"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36\") " pod="openshift-marketplace/certified-operators-2sbvv" Jan 22 06:12:03 crc kubenswrapper[4933]: I0122 06:12:03.353802 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cc8a0e1-ed32-4138-9d50-5a07beb3cf36-catalog-content\") pod \"certified-operators-2sbvv\" (UID: \"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36\") " pod="openshift-marketplace/certified-operators-2sbvv" Jan 22 06:12:03 crc kubenswrapper[4933]: I0122 06:12:03.354073 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cc8a0e1-ed32-4138-9d50-5a07beb3cf36-utilities\") pod \"certified-operators-2sbvv\" (UID: \"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36\") " pod="openshift-marketplace/certified-operators-2sbvv" Jan 22 06:12:03 crc kubenswrapper[4933]: I0122 06:12:03.355763 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cc8a0e1-ed32-4138-9d50-5a07beb3cf36-catalog-content\") pod \"certified-operators-2sbvv\" (UID: \"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36\") " pod="openshift-marketplace/certified-operators-2sbvv" Jan 22 06:12:03 crc kubenswrapper[4933]: I0122 06:12:03.377863 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vjxck\" (UniqueName: \"kubernetes.io/projected/0cc8a0e1-ed32-4138-9d50-5a07beb3cf36-kube-api-access-vjxck\") pod \"certified-operators-2sbvv\" (UID: \"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36\") " pod="openshift-marketplace/certified-operators-2sbvv" Jan 22 06:12:03 crc kubenswrapper[4933]: I0122 06:12:03.483529 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2sbvv" Jan 22 06:12:03 crc kubenswrapper[4933]: I0122 06:12:03.954000 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2sbvv"] Jan 22 06:12:04 crc kubenswrapper[4933]: I0122 06:12:04.474391 4933 generic.go:334] "Generic (PLEG): container finished" podID="0cc8a0e1-ed32-4138-9d50-5a07beb3cf36" containerID="13ec5c2ba335e2058c60f3fb542a39e9c931de4b1f7bb66a58fe35c3b2ebae05" exitCode=0 Jan 22 06:12:04 crc kubenswrapper[4933]: I0122 06:12:04.474460 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2sbvv" event={"ID":"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36","Type":"ContainerDied","Data":"13ec5c2ba335e2058c60f3fb542a39e9c931de4b1f7bb66a58fe35c3b2ebae05"} Jan 22 06:12:04 crc kubenswrapper[4933]: I0122 06:12:04.474807 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2sbvv" event={"ID":"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36","Type":"ContainerStarted","Data":"200222b835337de17705606b9d5596702c20140d2b3c97f87fbac1f9f40d23a4"} Jan 22 06:12:05 crc kubenswrapper[4933]: I0122 06:12:05.485754 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2sbvv" event={"ID":"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36","Type":"ContainerStarted","Data":"c84eed6ca018701cb449cd78a61a8c80baad868d621a707c92155942db572adc"} Jan 22 06:12:06 crc kubenswrapper[4933]: I0122 06:12:06.494985 4933 generic.go:334] "Generic (PLEG): container finished" podID="0cc8a0e1-ed32-4138-9d50-5a07beb3cf36" containerID="c84eed6ca018701cb449cd78a61a8c80baad868d621a707c92155942db572adc" exitCode=0 Jan 22 06:12:06 crc kubenswrapper[4933]: I0122 06:12:06.513265 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2sbvv" event={"ID":"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36","Type":"ContainerDied","Data":"c84eed6ca018701cb449cd78a61a8c80baad868d621a707c92155942db572adc"} Jan 22 06:12:07 crc kubenswrapper[4933]: I0122 06:12:07.505708 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2sbvv" event={"ID":"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36","Type":"ContainerStarted","Data":"ed878a1ab6136906759e43979d97039857d533ada64ca3b123c6c3fcfcd5a6fe"} Jan 22 06:12:07 crc kubenswrapper[4933]: I0122 06:12:07.536867 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2sbvv" podStartSLOduration=1.989670375 podStartE2EDuration="4.536840267s" podCreationTimestamp="2026-01-22 06:12:03 +0000 UTC" firstStartedPulling="2026-01-22 06:12:04.477763738 +0000 UTC m=+1572.314889101" lastFinishedPulling="2026-01-22 06:12:07.0249336 +0000 UTC m=+1574.862058993" observedRunningTime="2026-01-22 06:12:07.533096091 +0000 UTC m=+1575.370221454" watchObservedRunningTime="2026-01-22 06:12:07.536840267 +0000 UTC m=+1575.373965660" Jan 22 06:12:13 crc kubenswrapper[4933]: I0122 06:12:13.484009 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2sbvv" Jan 22 06:12:13 crc kubenswrapper[4933]: I0122 06:12:13.484535 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2sbvv" Jan 22 06:12:13 crc kubenswrapper[4933]: I0122 06:12:13.537417 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2sbvv" Jan 22 06:12:13 crc kubenswrapper[4933]: I0122 06:12:13.625565 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2sbvv" Jan 22 06:12:13 crc kubenswrapper[4933]: I0122 06:12:13.777368 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2sbvv"] Jan 22 06:12:15 crc kubenswrapper[4933]: I0122 06:12:15.586130 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2sbvv" podUID="0cc8a0e1-ed32-4138-9d50-5a07beb3cf36" containerName="registry-server" containerID="cri-o://ed878a1ab6136906759e43979d97039857d533ada64ca3b123c6c3fcfcd5a6fe" gracePeriod=2 Jan 22 06:12:16 crc kubenswrapper[4933]: I0122 06:12:16.596507 4933 generic.go:334] "Generic (PLEG): container finished" podID="0cc8a0e1-ed32-4138-9d50-5a07beb3cf36" containerID="ed878a1ab6136906759e43979d97039857d533ada64ca3b123c6c3fcfcd5a6fe" exitCode=0 Jan 22 06:12:16 crc kubenswrapper[4933]: I0122 06:12:16.596597 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2sbvv" event={"ID":"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36","Type":"ContainerDied","Data":"ed878a1ab6136906759e43979d97039857d533ada64ca3b123c6c3fcfcd5a6fe"} Jan 22 06:12:16 crc kubenswrapper[4933]: I0122 06:12:16.597037 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2sbvv" event={"ID":"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36","Type":"ContainerDied","Data":"200222b835337de17705606b9d5596702c20140d2b3c97f87fbac1f9f40d23a4"} Jan 22 06:12:16 crc kubenswrapper[4933]: I0122 06:12:16.597084 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="200222b835337de17705606b9d5596702c20140d2b3c97f87fbac1f9f40d23a4" Jan 22 06:12:16 crc kubenswrapper[4933]: I0122 06:12:16.601937 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2sbvv" Jan 22 06:12:16 crc kubenswrapper[4933]: I0122 06:12:16.778004 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vjxck\" (UniqueName: \"kubernetes.io/projected/0cc8a0e1-ed32-4138-9d50-5a07beb3cf36-kube-api-access-vjxck\") pod \"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36\" (UID: \"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36\") " Jan 22 06:12:16 crc kubenswrapper[4933]: I0122 06:12:16.778071 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cc8a0e1-ed32-4138-9d50-5a07beb3cf36-utilities\") pod \"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36\" (UID: \"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36\") " Jan 22 06:12:16 crc kubenswrapper[4933]: I0122 06:12:16.778146 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cc8a0e1-ed32-4138-9d50-5a07beb3cf36-catalog-content\") pod \"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36\" (UID: \"0cc8a0e1-ed32-4138-9d50-5a07beb3cf36\") " Jan 22 06:12:16 crc kubenswrapper[4933]: I0122 06:12:16.779188 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0cc8a0e1-ed32-4138-9d50-5a07beb3cf36-utilities" (OuterVolumeSpecName: "utilities") pod "0cc8a0e1-ed32-4138-9d50-5a07beb3cf36" (UID: "0cc8a0e1-ed32-4138-9d50-5a07beb3cf36"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:12:16 crc kubenswrapper[4933]: I0122 06:12:16.783666 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0cc8a0e1-ed32-4138-9d50-5a07beb3cf36-kube-api-access-vjxck" (OuterVolumeSpecName: "kube-api-access-vjxck") pod "0cc8a0e1-ed32-4138-9d50-5a07beb3cf36" (UID: "0cc8a0e1-ed32-4138-9d50-5a07beb3cf36"). InnerVolumeSpecName "kube-api-access-vjxck". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:12:16 crc kubenswrapper[4933]: I0122 06:12:16.835771 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0cc8a0e1-ed32-4138-9d50-5a07beb3cf36-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0cc8a0e1-ed32-4138-9d50-5a07beb3cf36" (UID: "0cc8a0e1-ed32-4138-9d50-5a07beb3cf36"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:12:16 crc kubenswrapper[4933]: I0122 06:12:16.879311 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vjxck\" (UniqueName: \"kubernetes.io/projected/0cc8a0e1-ed32-4138-9d50-5a07beb3cf36-kube-api-access-vjxck\") on node \"crc\" DevicePath \"\"" Jan 22 06:12:16 crc kubenswrapper[4933]: I0122 06:12:16.879919 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cc8a0e1-ed32-4138-9d50-5a07beb3cf36-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:12:16 crc kubenswrapper[4933]: I0122 06:12:16.880003 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cc8a0e1-ed32-4138-9d50-5a07beb3cf36-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:12:18 crc kubenswrapper[4933]: I0122 06:12:18.127531 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2sbvv" Jan 22 06:12:18 crc kubenswrapper[4933]: I0122 06:12:18.172028 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2sbvv"] Jan 22 06:12:18 crc kubenswrapper[4933]: I0122 06:12:18.179758 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2sbvv"] Jan 22 06:12:18 crc kubenswrapper[4933]: I0122 06:12:18.501945 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0cc8a0e1-ed32-4138-9d50-5a07beb3cf36" path="/var/lib/kubelet/pods/0cc8a0e1-ed32-4138-9d50-5a07beb3cf36/volumes" Jan 22 06:13:01 crc kubenswrapper[4933]: I0122 06:13:01.742941 4933 scope.go:117] "RemoveContainer" containerID="b688c9df9ddf18e461a754a176dde6318fee2f52bdd4e4de166be8f749491199" Jan 22 06:13:01 crc kubenswrapper[4933]: I0122 06:13:01.767739 4933 scope.go:117] "RemoveContainer" containerID="3684c1a9364d58aa65ee2109cc86d4b56f3ea349100b97465ad0582a41b254fd" Jan 22 06:13:01 crc kubenswrapper[4933]: I0122 06:13:01.828728 4933 scope.go:117] "RemoveContainer" containerID="156296118b1b88d53b2355e6b83afe711457599e35d08ad1921101045b462f90" Jan 22 06:13:01 crc kubenswrapper[4933]: I0122 06:13:01.879837 4933 scope.go:117] "RemoveContainer" containerID="48c61157f4530cf2b5b3b83a7a41b770aa5408744cfbbb089df03b144095d74c" Jan 22 06:13:01 crc kubenswrapper[4933]: I0122 06:13:01.927815 4933 scope.go:117] "RemoveContainer" containerID="91d388ea73a08e0801ca20093e51fe25125825118703baeb4840b396ba49b75c" Jan 22 06:13:01 crc kubenswrapper[4933]: I0122 06:13:01.960599 4933 scope.go:117] "RemoveContainer" containerID="37b3e40a183805b926f93991f87b98c3916433b4779fa99bc0dfedd6dbd20491" Jan 22 06:13:02 crc kubenswrapper[4933]: I0122 06:13:02.008548 4933 scope.go:117] "RemoveContainer" containerID="df7de060c1dad8894f50f45e424929c853494018875cf7b6b592ddd7bb3ce606" Jan 22 06:13:02 crc kubenswrapper[4933]: I0122 06:13:02.022435 4933 scope.go:117] "RemoveContainer" containerID="090e33e4d3567f753f608b0bfeb07bd49b6d7637884bebf9129dec720755e05d" Jan 22 06:13:02 crc kubenswrapper[4933]: I0122 06:13:02.530186 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-z8mm2"] Jan 22 06:13:02 crc kubenswrapper[4933]: E0122 06:13:02.530497 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cc8a0e1-ed32-4138-9d50-5a07beb3cf36" containerName="extract-content" Jan 22 06:13:02 crc kubenswrapper[4933]: I0122 06:13:02.530513 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cc8a0e1-ed32-4138-9d50-5a07beb3cf36" containerName="extract-content" Jan 22 06:13:02 crc kubenswrapper[4933]: E0122 06:13:02.530538 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cc8a0e1-ed32-4138-9d50-5a07beb3cf36" containerName="extract-utilities" Jan 22 06:13:02 crc kubenswrapper[4933]: I0122 06:13:02.530545 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cc8a0e1-ed32-4138-9d50-5a07beb3cf36" containerName="extract-utilities" Jan 22 06:13:02 crc kubenswrapper[4933]: E0122 06:13:02.530561 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cc8a0e1-ed32-4138-9d50-5a07beb3cf36" containerName="registry-server" Jan 22 06:13:02 crc kubenswrapper[4933]: I0122 06:13:02.530567 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cc8a0e1-ed32-4138-9d50-5a07beb3cf36" containerName="registry-server" Jan 22 06:13:02 crc kubenswrapper[4933]: I0122 06:13:02.530694 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="0cc8a0e1-ed32-4138-9d50-5a07beb3cf36" containerName="registry-server" Jan 22 06:13:02 crc kubenswrapper[4933]: I0122 06:13:02.531646 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z8mm2" Jan 22 06:13:02 crc kubenswrapper[4933]: I0122 06:13:02.534341 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z8mm2"] Jan 22 06:13:02 crc kubenswrapper[4933]: I0122 06:13:02.681689 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea4ae020-67c3-4cf3-a35c-0eac08c8f115-catalog-content\") pod \"redhat-marketplace-z8mm2\" (UID: \"ea4ae020-67c3-4cf3-a35c-0eac08c8f115\") " pod="openshift-marketplace/redhat-marketplace-z8mm2" Jan 22 06:13:02 crc kubenswrapper[4933]: I0122 06:13:02.681725 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5pfxh\" (UniqueName: \"kubernetes.io/projected/ea4ae020-67c3-4cf3-a35c-0eac08c8f115-kube-api-access-5pfxh\") pod \"redhat-marketplace-z8mm2\" (UID: \"ea4ae020-67c3-4cf3-a35c-0eac08c8f115\") " pod="openshift-marketplace/redhat-marketplace-z8mm2" Jan 22 06:13:02 crc kubenswrapper[4933]: I0122 06:13:02.681760 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea4ae020-67c3-4cf3-a35c-0eac08c8f115-utilities\") pod \"redhat-marketplace-z8mm2\" (UID: \"ea4ae020-67c3-4cf3-a35c-0eac08c8f115\") " pod="openshift-marketplace/redhat-marketplace-z8mm2" Jan 22 06:13:02 crc kubenswrapper[4933]: I0122 06:13:02.782818 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea4ae020-67c3-4cf3-a35c-0eac08c8f115-catalog-content\") pod \"redhat-marketplace-z8mm2\" (UID: \"ea4ae020-67c3-4cf3-a35c-0eac08c8f115\") " pod="openshift-marketplace/redhat-marketplace-z8mm2" Jan 22 06:13:02 crc kubenswrapper[4933]: I0122 06:13:02.783048 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5pfxh\" (UniqueName: \"kubernetes.io/projected/ea4ae020-67c3-4cf3-a35c-0eac08c8f115-kube-api-access-5pfxh\") pod \"redhat-marketplace-z8mm2\" (UID: \"ea4ae020-67c3-4cf3-a35c-0eac08c8f115\") " pod="openshift-marketplace/redhat-marketplace-z8mm2" Jan 22 06:13:02 crc kubenswrapper[4933]: I0122 06:13:02.783101 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea4ae020-67c3-4cf3-a35c-0eac08c8f115-utilities\") pod \"redhat-marketplace-z8mm2\" (UID: \"ea4ae020-67c3-4cf3-a35c-0eac08c8f115\") " pod="openshift-marketplace/redhat-marketplace-z8mm2" Jan 22 06:13:02 crc kubenswrapper[4933]: I0122 06:13:02.783748 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea4ae020-67c3-4cf3-a35c-0eac08c8f115-utilities\") pod \"redhat-marketplace-z8mm2\" (UID: \"ea4ae020-67c3-4cf3-a35c-0eac08c8f115\") " pod="openshift-marketplace/redhat-marketplace-z8mm2" Jan 22 06:13:02 crc kubenswrapper[4933]: I0122 06:13:02.784250 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea4ae020-67c3-4cf3-a35c-0eac08c8f115-catalog-content\") pod \"redhat-marketplace-z8mm2\" (UID: \"ea4ae020-67c3-4cf3-a35c-0eac08c8f115\") " pod="openshift-marketplace/redhat-marketplace-z8mm2" Jan 22 06:13:02 crc kubenswrapper[4933]: I0122 06:13:02.803023 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5pfxh\" (UniqueName: \"kubernetes.io/projected/ea4ae020-67c3-4cf3-a35c-0eac08c8f115-kube-api-access-5pfxh\") pod \"redhat-marketplace-z8mm2\" (UID: \"ea4ae020-67c3-4cf3-a35c-0eac08c8f115\") " pod="openshift-marketplace/redhat-marketplace-z8mm2" Jan 22 06:13:02 crc kubenswrapper[4933]: I0122 06:13:02.852698 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z8mm2" Jan 22 06:13:03 crc kubenswrapper[4933]: I0122 06:13:03.384586 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z8mm2"] Jan 22 06:13:03 crc kubenswrapper[4933]: I0122 06:13:03.573231 4933 generic.go:334] "Generic (PLEG): container finished" podID="ea4ae020-67c3-4cf3-a35c-0eac08c8f115" containerID="f53611fcd0734b9ffb3841567c1a33924ab26e8d92d5053efdcccd66b7ac84c9" exitCode=0 Jan 22 06:13:03 crc kubenswrapper[4933]: I0122 06:13:03.573296 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z8mm2" event={"ID":"ea4ae020-67c3-4cf3-a35c-0eac08c8f115","Type":"ContainerDied","Data":"f53611fcd0734b9ffb3841567c1a33924ab26e8d92d5053efdcccd66b7ac84c9"} Jan 22 06:13:03 crc kubenswrapper[4933]: I0122 06:13:03.573349 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z8mm2" event={"ID":"ea4ae020-67c3-4cf3-a35c-0eac08c8f115","Type":"ContainerStarted","Data":"11c33558728f686b687fa2963237b295f01696a740a999bce73e4a351dc5768a"} Jan 22 06:13:04 crc kubenswrapper[4933]: I0122 06:13:04.583602 4933 generic.go:334] "Generic (PLEG): container finished" podID="ea4ae020-67c3-4cf3-a35c-0eac08c8f115" containerID="b89bc7ed60ebd13ce269656ecbee67607bc696ad1ced51fa31ceb008c15b5c3f" exitCode=0 Jan 22 06:13:04 crc kubenswrapper[4933]: I0122 06:13:04.583691 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z8mm2" event={"ID":"ea4ae020-67c3-4cf3-a35c-0eac08c8f115","Type":"ContainerDied","Data":"b89bc7ed60ebd13ce269656ecbee67607bc696ad1ced51fa31ceb008c15b5c3f"} Jan 22 06:13:04 crc kubenswrapper[4933]: I0122 06:13:04.585617 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 06:13:05 crc kubenswrapper[4933]: I0122 06:13:05.593052 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z8mm2" event={"ID":"ea4ae020-67c3-4cf3-a35c-0eac08c8f115","Type":"ContainerStarted","Data":"6be71cd8baf1f4efc0c2fae5e5b989ad0adc39409eddfe7708ea6391c589d676"} Jan 22 06:13:05 crc kubenswrapper[4933]: I0122 06:13:05.619007 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-z8mm2" podStartSLOduration=2.040993588 podStartE2EDuration="3.618985928s" podCreationTimestamp="2026-01-22 06:13:02 +0000 UTC" firstStartedPulling="2026-01-22 06:13:03.574744072 +0000 UTC m=+1631.411869435" lastFinishedPulling="2026-01-22 06:13:05.152736412 +0000 UTC m=+1632.989861775" observedRunningTime="2026-01-22 06:13:05.612151374 +0000 UTC m=+1633.449276737" watchObservedRunningTime="2026-01-22 06:13:05.618985928 +0000 UTC m=+1633.456111281" Jan 22 06:13:10 crc kubenswrapper[4933]: I0122 06:13:10.943789 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:13:10 crc kubenswrapper[4933]: I0122 06:13:10.944202 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:13:12 crc kubenswrapper[4933]: I0122 06:13:12.853372 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-z8mm2" Jan 22 06:13:12 crc kubenswrapper[4933]: I0122 06:13:12.853461 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-z8mm2" Jan 22 06:13:12 crc kubenswrapper[4933]: I0122 06:13:12.932342 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-z8mm2" Jan 22 06:13:13 crc kubenswrapper[4933]: I0122 06:13:13.735413 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-z8mm2" Jan 22 06:13:13 crc kubenswrapper[4933]: I0122 06:13:13.798380 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z8mm2"] Jan 22 06:13:15 crc kubenswrapper[4933]: I0122 06:13:15.674552 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-z8mm2" podUID="ea4ae020-67c3-4cf3-a35c-0eac08c8f115" containerName="registry-server" containerID="cri-o://6be71cd8baf1f4efc0c2fae5e5b989ad0adc39409eddfe7708ea6391c589d676" gracePeriod=2 Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.609510 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z8mm2" Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.681947 4933 generic.go:334] "Generic (PLEG): container finished" podID="ea4ae020-67c3-4cf3-a35c-0eac08c8f115" containerID="6be71cd8baf1f4efc0c2fae5e5b989ad0adc39409eddfe7708ea6391c589d676" exitCode=0 Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.681983 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z8mm2" event={"ID":"ea4ae020-67c3-4cf3-a35c-0eac08c8f115","Type":"ContainerDied","Data":"6be71cd8baf1f4efc0c2fae5e5b989ad0adc39409eddfe7708ea6391c589d676"} Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.682010 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z8mm2" event={"ID":"ea4ae020-67c3-4cf3-a35c-0eac08c8f115","Type":"ContainerDied","Data":"11c33558728f686b687fa2963237b295f01696a740a999bce73e4a351dc5768a"} Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.682026 4933 scope.go:117] "RemoveContainer" containerID="6be71cd8baf1f4efc0c2fae5e5b989ad0adc39409eddfe7708ea6391c589d676" Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.682034 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z8mm2" Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.705808 4933 scope.go:117] "RemoveContainer" containerID="b89bc7ed60ebd13ce269656ecbee67607bc696ad1ced51fa31ceb008c15b5c3f" Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.727477 4933 scope.go:117] "RemoveContainer" containerID="f53611fcd0734b9ffb3841567c1a33924ab26e8d92d5053efdcccd66b7ac84c9" Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.745860 4933 scope.go:117] "RemoveContainer" containerID="6be71cd8baf1f4efc0c2fae5e5b989ad0adc39409eddfe7708ea6391c589d676" Jan 22 06:13:16 crc kubenswrapper[4933]: E0122 06:13:16.746302 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6be71cd8baf1f4efc0c2fae5e5b989ad0adc39409eddfe7708ea6391c589d676\": container with ID starting with 6be71cd8baf1f4efc0c2fae5e5b989ad0adc39409eddfe7708ea6391c589d676 not found: ID does not exist" containerID="6be71cd8baf1f4efc0c2fae5e5b989ad0adc39409eddfe7708ea6391c589d676" Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.746334 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6be71cd8baf1f4efc0c2fae5e5b989ad0adc39409eddfe7708ea6391c589d676"} err="failed to get container status \"6be71cd8baf1f4efc0c2fae5e5b989ad0adc39409eddfe7708ea6391c589d676\": rpc error: code = NotFound desc = could not find container \"6be71cd8baf1f4efc0c2fae5e5b989ad0adc39409eddfe7708ea6391c589d676\": container with ID starting with 6be71cd8baf1f4efc0c2fae5e5b989ad0adc39409eddfe7708ea6391c589d676 not found: ID does not exist" Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.746354 4933 scope.go:117] "RemoveContainer" containerID="b89bc7ed60ebd13ce269656ecbee67607bc696ad1ced51fa31ceb008c15b5c3f" Jan 22 06:13:16 crc kubenswrapper[4933]: E0122 06:13:16.746669 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b89bc7ed60ebd13ce269656ecbee67607bc696ad1ced51fa31ceb008c15b5c3f\": container with ID starting with b89bc7ed60ebd13ce269656ecbee67607bc696ad1ced51fa31ceb008c15b5c3f not found: ID does not exist" containerID="b89bc7ed60ebd13ce269656ecbee67607bc696ad1ced51fa31ceb008c15b5c3f" Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.746693 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b89bc7ed60ebd13ce269656ecbee67607bc696ad1ced51fa31ceb008c15b5c3f"} err="failed to get container status \"b89bc7ed60ebd13ce269656ecbee67607bc696ad1ced51fa31ceb008c15b5c3f\": rpc error: code = NotFound desc = could not find container \"b89bc7ed60ebd13ce269656ecbee67607bc696ad1ced51fa31ceb008c15b5c3f\": container with ID starting with b89bc7ed60ebd13ce269656ecbee67607bc696ad1ced51fa31ceb008c15b5c3f not found: ID does not exist" Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.746707 4933 scope.go:117] "RemoveContainer" containerID="f53611fcd0734b9ffb3841567c1a33924ab26e8d92d5053efdcccd66b7ac84c9" Jan 22 06:13:16 crc kubenswrapper[4933]: E0122 06:13:16.746982 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f53611fcd0734b9ffb3841567c1a33924ab26e8d92d5053efdcccd66b7ac84c9\": container with ID starting with f53611fcd0734b9ffb3841567c1a33924ab26e8d92d5053efdcccd66b7ac84c9 not found: ID does not exist" containerID="f53611fcd0734b9ffb3841567c1a33924ab26e8d92d5053efdcccd66b7ac84c9" Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.747007 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f53611fcd0734b9ffb3841567c1a33924ab26e8d92d5053efdcccd66b7ac84c9"} err="failed to get container status \"f53611fcd0734b9ffb3841567c1a33924ab26e8d92d5053efdcccd66b7ac84c9\": rpc error: code = NotFound desc = could not find container \"f53611fcd0734b9ffb3841567c1a33924ab26e8d92d5053efdcccd66b7ac84c9\": container with ID starting with f53611fcd0734b9ffb3841567c1a33924ab26e8d92d5053efdcccd66b7ac84c9 not found: ID does not exist" Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.795104 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5pfxh\" (UniqueName: \"kubernetes.io/projected/ea4ae020-67c3-4cf3-a35c-0eac08c8f115-kube-api-access-5pfxh\") pod \"ea4ae020-67c3-4cf3-a35c-0eac08c8f115\" (UID: \"ea4ae020-67c3-4cf3-a35c-0eac08c8f115\") " Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.795170 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea4ae020-67c3-4cf3-a35c-0eac08c8f115-catalog-content\") pod \"ea4ae020-67c3-4cf3-a35c-0eac08c8f115\" (UID: \"ea4ae020-67c3-4cf3-a35c-0eac08c8f115\") " Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.795216 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea4ae020-67c3-4cf3-a35c-0eac08c8f115-utilities\") pod \"ea4ae020-67c3-4cf3-a35c-0eac08c8f115\" (UID: \"ea4ae020-67c3-4cf3-a35c-0eac08c8f115\") " Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.796342 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea4ae020-67c3-4cf3-a35c-0eac08c8f115-utilities" (OuterVolumeSpecName: "utilities") pod "ea4ae020-67c3-4cf3-a35c-0eac08c8f115" (UID: "ea4ae020-67c3-4cf3-a35c-0eac08c8f115"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.803893 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea4ae020-67c3-4cf3-a35c-0eac08c8f115-kube-api-access-5pfxh" (OuterVolumeSpecName: "kube-api-access-5pfxh") pod "ea4ae020-67c3-4cf3-a35c-0eac08c8f115" (UID: "ea4ae020-67c3-4cf3-a35c-0eac08c8f115"). InnerVolumeSpecName "kube-api-access-5pfxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.824178 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea4ae020-67c3-4cf3-a35c-0eac08c8f115-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ea4ae020-67c3-4cf3-a35c-0eac08c8f115" (UID: "ea4ae020-67c3-4cf3-a35c-0eac08c8f115"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.897733 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5pfxh\" (UniqueName: \"kubernetes.io/projected/ea4ae020-67c3-4cf3-a35c-0eac08c8f115-kube-api-access-5pfxh\") on node \"crc\" DevicePath \"\"" Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.897798 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea4ae020-67c3-4cf3-a35c-0eac08c8f115-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:13:16 crc kubenswrapper[4933]: I0122 06:13:16.897821 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea4ae020-67c3-4cf3-a35c-0eac08c8f115-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:13:17 crc kubenswrapper[4933]: I0122 06:13:17.032432 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z8mm2"] Jan 22 06:13:17 crc kubenswrapper[4933]: I0122 06:13:17.040721 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-z8mm2"] Jan 22 06:13:18 crc kubenswrapper[4933]: I0122 06:13:18.500306 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea4ae020-67c3-4cf3-a35c-0eac08c8f115" path="/var/lib/kubelet/pods/ea4ae020-67c3-4cf3-a35c-0eac08c8f115/volumes" Jan 22 06:13:40 crc kubenswrapper[4933]: I0122 06:13:40.943043 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:13:40 crc kubenswrapper[4933]: I0122 06:13:40.943712 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:14:02 crc kubenswrapper[4933]: I0122 06:14:02.130278 4933 scope.go:117] "RemoveContainer" containerID="61f6493fabb950de8e2af3aae3dd7bf181fc3faf59f89e9a6161a8bab9d020e0" Jan 22 06:14:02 crc kubenswrapper[4933]: I0122 06:14:02.149264 4933 scope.go:117] "RemoveContainer" containerID="b22c6743199404c4213ddd23786b457d932b0b9434a7c83fd6bb4caba5288b2f" Jan 22 06:14:02 crc kubenswrapper[4933]: I0122 06:14:02.164501 4933 scope.go:117] "RemoveContainer" containerID="cb606c1f6f736102fcf0079ab6c6d4f8465529171bfeb5a249a5ba85855f1bce" Jan 22 06:14:02 crc kubenswrapper[4933]: I0122 06:14:02.224897 4933 scope.go:117] "RemoveContainer" containerID="17cce92623155f7d8d1837e57204a6dfb1d65a9db999fe215e989e68c83edf20" Jan 22 06:14:02 crc kubenswrapper[4933]: I0122 06:14:02.275354 4933 scope.go:117] "RemoveContainer" containerID="c5ed2708b994d64972c77f751483b57c4a7c6592049bb6b8066c9ae126dd2fe3" Jan 22 06:14:02 crc kubenswrapper[4933]: I0122 06:14:02.297864 4933 scope.go:117] "RemoveContainer" containerID="5b9c14d83c800061f8051520912f10415507b56ca2e8aee72a7d1ee8a0ebee79" Jan 22 06:14:10 crc kubenswrapper[4933]: I0122 06:14:10.942718 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:14:10 crc kubenswrapper[4933]: I0122 06:14:10.943450 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:14:10 crc kubenswrapper[4933]: I0122 06:14:10.943514 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 06:14:10 crc kubenswrapper[4933]: I0122 06:14:10.944445 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:14:10 crc kubenswrapper[4933]: I0122 06:14:10.944544 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" gracePeriod=600 Jan 22 06:14:11 crc kubenswrapper[4933]: E0122 06:14:11.083863 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:14:11 crc kubenswrapper[4933]: I0122 06:14:11.407157 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" exitCode=0 Jan 22 06:14:11 crc kubenswrapper[4933]: I0122 06:14:11.407243 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946"} Jan 22 06:14:11 crc kubenswrapper[4933]: I0122 06:14:11.407659 4933 scope.go:117] "RemoveContainer" containerID="be33ec37115cc1139fae624e9dd3a341abdaf1a2979fabd7ece8f9fddf21ac63" Jan 22 06:14:11 crc kubenswrapper[4933]: I0122 06:14:11.408204 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:14:11 crc kubenswrapper[4933]: E0122 06:14:11.408445 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:14:22 crc kubenswrapper[4933]: I0122 06:14:22.495596 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:14:22 crc kubenswrapper[4933]: E0122 06:14:22.496551 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:14:35 crc kubenswrapper[4933]: I0122 06:14:35.491867 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:14:35 crc kubenswrapper[4933]: E0122 06:14:35.492863 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:14:46 crc kubenswrapper[4933]: I0122 06:14:46.491756 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:14:46 crc kubenswrapper[4933]: E0122 06:14:46.492802 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:14:59 crc kubenswrapper[4933]: I0122 06:14:59.491314 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:14:59 crc kubenswrapper[4933]: E0122 06:14:59.492552 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.162566 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4"] Jan 22 06:15:00 crc kubenswrapper[4933]: E0122 06:15:00.162992 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea4ae020-67c3-4cf3-a35c-0eac08c8f115" containerName="extract-content" Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.163023 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea4ae020-67c3-4cf3-a35c-0eac08c8f115" containerName="extract-content" Jan 22 06:15:00 crc kubenswrapper[4933]: E0122 06:15:00.163063 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea4ae020-67c3-4cf3-a35c-0eac08c8f115" containerName="extract-utilities" Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.163102 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea4ae020-67c3-4cf3-a35c-0eac08c8f115" containerName="extract-utilities" Jan 22 06:15:00 crc kubenswrapper[4933]: E0122 06:15:00.163125 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea4ae020-67c3-4cf3-a35c-0eac08c8f115" containerName="registry-server" Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.163136 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea4ae020-67c3-4cf3-a35c-0eac08c8f115" containerName="registry-server" Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.163416 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea4ae020-67c3-4cf3-a35c-0eac08c8f115" containerName="registry-server" Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.164306 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4" Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.166312 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.166642 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.175934 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4"] Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.266065 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-448dp\" (UniqueName: \"kubernetes.io/projected/3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1-kube-api-access-448dp\") pod \"collect-profiles-29484375-lhpj4\" (UID: \"3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4" Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.266221 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1-secret-volume\") pod \"collect-profiles-29484375-lhpj4\" (UID: \"3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4" Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.266334 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1-config-volume\") pod \"collect-profiles-29484375-lhpj4\" (UID: \"3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4" Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.368919 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-448dp\" (UniqueName: \"kubernetes.io/projected/3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1-kube-api-access-448dp\") pod \"collect-profiles-29484375-lhpj4\" (UID: \"3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4" Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.369406 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1-secret-volume\") pod \"collect-profiles-29484375-lhpj4\" (UID: \"3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4" Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.369573 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1-config-volume\") pod \"collect-profiles-29484375-lhpj4\" (UID: \"3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4" Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.371864 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1-config-volume\") pod \"collect-profiles-29484375-lhpj4\" (UID: \"3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4" Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.380758 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1-secret-volume\") pod \"collect-profiles-29484375-lhpj4\" (UID: \"3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4" Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.389377 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-448dp\" (UniqueName: \"kubernetes.io/projected/3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1-kube-api-access-448dp\") pod \"collect-profiles-29484375-lhpj4\" (UID: \"3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4" Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.490220 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4" Jan 22 06:15:00 crc kubenswrapper[4933]: I0122 06:15:00.963883 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4"] Jan 22 06:15:01 crc kubenswrapper[4933]: I0122 06:15:01.839012 4933 generic.go:334] "Generic (PLEG): container finished" podID="3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1" containerID="d448ee6ab88e31963d507d2b89e8f43643f2d6bdf6a926e176b9b5fd1cf4bca9" exitCode=0 Jan 22 06:15:01 crc kubenswrapper[4933]: I0122 06:15:01.839160 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4" event={"ID":"3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1","Type":"ContainerDied","Data":"d448ee6ab88e31963d507d2b89e8f43643f2d6bdf6a926e176b9b5fd1cf4bca9"} Jan 22 06:15:01 crc kubenswrapper[4933]: I0122 06:15:01.839570 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4" event={"ID":"3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1","Type":"ContainerStarted","Data":"c5f2b4c4f4d8fbd06b60e62ab2ff01436bf50723052da9a765f28b6661ec1cc6"} Jan 22 06:15:02 crc kubenswrapper[4933]: I0122 06:15:02.385893 4933 scope.go:117] "RemoveContainer" containerID="0fdc5152f900a8420f5cb44deece538cdbfc86883d7690cc9abc8456c127e4ab" Jan 22 06:15:03 crc kubenswrapper[4933]: I0122 06:15:03.164477 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4" Jan 22 06:15:03 crc kubenswrapper[4933]: I0122 06:15:03.225897 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-448dp\" (UniqueName: \"kubernetes.io/projected/3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1-kube-api-access-448dp\") pod \"3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1\" (UID: \"3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1\") " Jan 22 06:15:03 crc kubenswrapper[4933]: I0122 06:15:03.225958 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1-secret-volume\") pod \"3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1\" (UID: \"3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1\") " Jan 22 06:15:03 crc kubenswrapper[4933]: I0122 06:15:03.226039 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1-config-volume\") pod \"3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1\" (UID: \"3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1\") " Jan 22 06:15:03 crc kubenswrapper[4933]: I0122 06:15:03.227444 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1-config-volume" (OuterVolumeSpecName: "config-volume") pod "3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1" (UID: "3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:15:03 crc kubenswrapper[4933]: I0122 06:15:03.232878 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1-kube-api-access-448dp" (OuterVolumeSpecName: "kube-api-access-448dp") pod "3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1" (UID: "3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1"). InnerVolumeSpecName "kube-api-access-448dp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:15:03 crc kubenswrapper[4933]: I0122 06:15:03.233178 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1" (UID: "3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:15:03 crc kubenswrapper[4933]: I0122 06:15:03.327418 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-448dp\" (UniqueName: \"kubernetes.io/projected/3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1-kube-api-access-448dp\") on node \"crc\" DevicePath \"\"" Jan 22 06:15:03 crc kubenswrapper[4933]: I0122 06:15:03.327454 4933 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 06:15:03 crc kubenswrapper[4933]: I0122 06:15:03.327464 4933 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 06:15:03 crc kubenswrapper[4933]: I0122 06:15:03.861451 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4" event={"ID":"3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1","Type":"ContainerDied","Data":"c5f2b4c4f4d8fbd06b60e62ab2ff01436bf50723052da9a765f28b6661ec1cc6"} Jan 22 06:15:03 crc kubenswrapper[4933]: I0122 06:15:03.861500 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c5f2b4c4f4d8fbd06b60e62ab2ff01436bf50723052da9a765f28b6661ec1cc6" Jan 22 06:15:03 crc kubenswrapper[4933]: I0122 06:15:03.861530 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4" Jan 22 06:15:12 crc kubenswrapper[4933]: I0122 06:15:12.498288 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:15:12 crc kubenswrapper[4933]: E0122 06:15:12.499485 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:15:27 crc kubenswrapper[4933]: I0122 06:15:27.491203 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:15:27 crc kubenswrapper[4933]: E0122 06:15:27.493660 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:15:38 crc kubenswrapper[4933]: I0122 06:15:38.491713 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:15:38 crc kubenswrapper[4933]: E0122 06:15:38.494963 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:15:50 crc kubenswrapper[4933]: I0122 06:15:50.491137 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:15:50 crc kubenswrapper[4933]: E0122 06:15:50.492284 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:16:02 crc kubenswrapper[4933]: I0122 06:16:02.494403 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:16:02 crc kubenswrapper[4933]: E0122 06:16:02.495125 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:16:14 crc kubenswrapper[4933]: I0122 06:16:14.490444 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:16:14 crc kubenswrapper[4933]: E0122 06:16:14.491105 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:16:26 crc kubenswrapper[4933]: I0122 06:16:26.490580 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:16:26 crc kubenswrapper[4933]: E0122 06:16:26.491494 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:16:39 crc kubenswrapper[4933]: I0122 06:16:39.491303 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:16:39 crc kubenswrapper[4933]: E0122 06:16:39.492278 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:16:51 crc kubenswrapper[4933]: I0122 06:16:51.490342 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:16:51 crc kubenswrapper[4933]: E0122 06:16:51.491158 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:17:02 crc kubenswrapper[4933]: I0122 06:17:02.497714 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:17:02 crc kubenswrapper[4933]: E0122 06:17:02.498985 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:17:16 crc kubenswrapper[4933]: I0122 06:17:16.490621 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:17:16 crc kubenswrapper[4933]: E0122 06:17:16.491665 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:17:29 crc kubenswrapper[4933]: I0122 06:17:29.491369 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:17:29 crc kubenswrapper[4933]: E0122 06:17:29.492138 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:17:41 crc kubenswrapper[4933]: I0122 06:17:41.491177 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:17:41 crc kubenswrapper[4933]: E0122 06:17:41.491952 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:17:56 crc kubenswrapper[4933]: I0122 06:17:56.491247 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:17:56 crc kubenswrapper[4933]: E0122 06:17:56.492333 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:18:11 crc kubenswrapper[4933]: I0122 06:18:11.491123 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:18:11 crc kubenswrapper[4933]: E0122 06:18:11.492405 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:18:25 crc kubenswrapper[4933]: I0122 06:18:25.491337 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:18:25 crc kubenswrapper[4933]: E0122 06:18:25.492551 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:18:39 crc kubenswrapper[4933]: I0122 06:18:39.491284 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:18:39 crc kubenswrapper[4933]: E0122 06:18:39.492031 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:18:51 crc kubenswrapper[4933]: I0122 06:18:51.491582 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:18:51 crc kubenswrapper[4933]: E0122 06:18:51.492704 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:19:02 crc kubenswrapper[4933]: I0122 06:19:02.516571 4933 scope.go:117] "RemoveContainer" containerID="c84eed6ca018701cb449cd78a61a8c80baad868d621a707c92155942db572adc" Jan 22 06:19:02 crc kubenswrapper[4933]: I0122 06:19:02.536059 4933 scope.go:117] "RemoveContainer" containerID="13ec5c2ba335e2058c60f3fb542a39e9c931de4b1f7bb66a58fe35c3b2ebae05" Jan 22 06:19:02 crc kubenswrapper[4933]: I0122 06:19:02.563024 4933 scope.go:117] "RemoveContainer" containerID="ed878a1ab6136906759e43979d97039857d533ada64ca3b123c6c3fcfcd5a6fe" Jan 22 06:19:04 crc kubenswrapper[4933]: I0122 06:19:04.491139 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:19:04 crc kubenswrapper[4933]: E0122 06:19:04.491420 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:19:17 crc kubenswrapper[4933]: I0122 06:19:17.490520 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:19:18 crc kubenswrapper[4933]: I0122 06:19:18.128065 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"fe8d3968014e868d088b1477d12be705e72f0a7b1c028581fa7e7552a65574d9"} Jan 22 06:21:40 crc kubenswrapper[4933]: I0122 06:21:40.943111 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:21:40 crc kubenswrapper[4933]: I0122 06:21:40.943923 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:21:52 crc kubenswrapper[4933]: I0122 06:21:52.510772 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9h8kx"] Jan 22 06:21:52 crc kubenswrapper[4933]: E0122 06:21:52.511996 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1" containerName="collect-profiles" Jan 22 06:21:52 crc kubenswrapper[4933]: I0122 06:21:52.512012 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1" containerName="collect-profiles" Jan 22 06:21:52 crc kubenswrapper[4933]: I0122 06:21:52.512227 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1" containerName="collect-profiles" Jan 22 06:21:52 crc kubenswrapper[4933]: I0122 06:21:52.513477 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9h8kx" Jan 22 06:21:52 crc kubenswrapper[4933]: I0122 06:21:52.519885 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9h8kx"] Jan 22 06:21:52 crc kubenswrapper[4933]: I0122 06:21:52.609121 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggd5k\" (UniqueName: \"kubernetes.io/projected/3f509cda-4999-47f8-97dc-9ce0b968b8d8-kube-api-access-ggd5k\") pod \"community-operators-9h8kx\" (UID: \"3f509cda-4999-47f8-97dc-9ce0b968b8d8\") " pod="openshift-marketplace/community-operators-9h8kx" Jan 22 06:21:52 crc kubenswrapper[4933]: I0122 06:21:52.609191 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f509cda-4999-47f8-97dc-9ce0b968b8d8-catalog-content\") pod \"community-operators-9h8kx\" (UID: \"3f509cda-4999-47f8-97dc-9ce0b968b8d8\") " pod="openshift-marketplace/community-operators-9h8kx" Jan 22 06:21:52 crc kubenswrapper[4933]: I0122 06:21:52.609324 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f509cda-4999-47f8-97dc-9ce0b968b8d8-utilities\") pod \"community-operators-9h8kx\" (UID: \"3f509cda-4999-47f8-97dc-9ce0b968b8d8\") " pod="openshift-marketplace/community-operators-9h8kx" Jan 22 06:21:52 crc kubenswrapper[4933]: I0122 06:21:52.710704 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f509cda-4999-47f8-97dc-9ce0b968b8d8-utilities\") pod \"community-operators-9h8kx\" (UID: \"3f509cda-4999-47f8-97dc-9ce0b968b8d8\") " pod="openshift-marketplace/community-operators-9h8kx" Jan 22 06:21:52 crc kubenswrapper[4933]: I0122 06:21:52.710771 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggd5k\" (UniqueName: \"kubernetes.io/projected/3f509cda-4999-47f8-97dc-9ce0b968b8d8-kube-api-access-ggd5k\") pod \"community-operators-9h8kx\" (UID: \"3f509cda-4999-47f8-97dc-9ce0b968b8d8\") " pod="openshift-marketplace/community-operators-9h8kx" Jan 22 06:21:52 crc kubenswrapper[4933]: I0122 06:21:52.710792 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f509cda-4999-47f8-97dc-9ce0b968b8d8-catalog-content\") pod \"community-operators-9h8kx\" (UID: \"3f509cda-4999-47f8-97dc-9ce0b968b8d8\") " pod="openshift-marketplace/community-operators-9h8kx" Jan 22 06:21:52 crc kubenswrapper[4933]: I0122 06:21:52.711325 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f509cda-4999-47f8-97dc-9ce0b968b8d8-utilities\") pod \"community-operators-9h8kx\" (UID: \"3f509cda-4999-47f8-97dc-9ce0b968b8d8\") " pod="openshift-marketplace/community-operators-9h8kx" Jan 22 06:21:52 crc kubenswrapper[4933]: I0122 06:21:52.711350 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f509cda-4999-47f8-97dc-9ce0b968b8d8-catalog-content\") pod \"community-operators-9h8kx\" (UID: \"3f509cda-4999-47f8-97dc-9ce0b968b8d8\") " pod="openshift-marketplace/community-operators-9h8kx" Jan 22 06:21:52 crc kubenswrapper[4933]: I0122 06:21:52.729742 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggd5k\" (UniqueName: \"kubernetes.io/projected/3f509cda-4999-47f8-97dc-9ce0b968b8d8-kube-api-access-ggd5k\") pod \"community-operators-9h8kx\" (UID: \"3f509cda-4999-47f8-97dc-9ce0b968b8d8\") " pod="openshift-marketplace/community-operators-9h8kx" Jan 22 06:21:52 crc kubenswrapper[4933]: I0122 06:21:52.835366 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9h8kx" Jan 22 06:21:53 crc kubenswrapper[4933]: I0122 06:21:53.337736 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9h8kx"] Jan 22 06:21:53 crc kubenswrapper[4933]: W0122 06:21:53.344136 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3f509cda_4999_47f8_97dc_9ce0b968b8d8.slice/crio-37746c98ec80ec04be4e3413861915c34d451cb080d6dcbd08b52e80faec0718 WatchSource:0}: Error finding container 37746c98ec80ec04be4e3413861915c34d451cb080d6dcbd08b52e80faec0718: Status 404 returned error can't find the container with id 37746c98ec80ec04be4e3413861915c34d451cb080d6dcbd08b52e80faec0718 Jan 22 06:21:53 crc kubenswrapper[4933]: I0122 06:21:53.407926 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9h8kx" event={"ID":"3f509cda-4999-47f8-97dc-9ce0b968b8d8","Type":"ContainerStarted","Data":"37746c98ec80ec04be4e3413861915c34d451cb080d6dcbd08b52e80faec0718"} Jan 22 06:21:54 crc kubenswrapper[4933]: I0122 06:21:54.414995 4933 generic.go:334] "Generic (PLEG): container finished" podID="3f509cda-4999-47f8-97dc-9ce0b968b8d8" containerID="c620717366dba28aa7e04c97119becd720c527ba8c1d43643c13771ed8d54e73" exitCode=0 Jan 22 06:21:54 crc kubenswrapper[4933]: I0122 06:21:54.415116 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9h8kx" event={"ID":"3f509cda-4999-47f8-97dc-9ce0b968b8d8","Type":"ContainerDied","Data":"c620717366dba28aa7e04c97119becd720c527ba8c1d43643c13771ed8d54e73"} Jan 22 06:21:54 crc kubenswrapper[4933]: I0122 06:21:54.416981 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 06:21:55 crc kubenswrapper[4933]: I0122 06:21:55.422774 4933 generic.go:334] "Generic (PLEG): container finished" podID="3f509cda-4999-47f8-97dc-9ce0b968b8d8" containerID="85259b5d369acecaa3c49d21017843561bcecc4da48e2aa6e29f165cc9f7f98f" exitCode=0 Jan 22 06:21:55 crc kubenswrapper[4933]: I0122 06:21:55.422819 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9h8kx" event={"ID":"3f509cda-4999-47f8-97dc-9ce0b968b8d8","Type":"ContainerDied","Data":"85259b5d369acecaa3c49d21017843561bcecc4da48e2aa6e29f165cc9f7f98f"} Jan 22 06:21:56 crc kubenswrapper[4933]: I0122 06:21:56.432153 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9h8kx" event={"ID":"3f509cda-4999-47f8-97dc-9ce0b968b8d8","Type":"ContainerStarted","Data":"d1823d439b41e540d58ff60f552a6b3a50adfb3b3d78c50d90df1f47ef971204"} Jan 22 06:21:56 crc kubenswrapper[4933]: I0122 06:21:56.456950 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9h8kx" podStartSLOduration=3.071629357 podStartE2EDuration="4.456916499s" podCreationTimestamp="2026-01-22 06:21:52 +0000 UTC" firstStartedPulling="2026-01-22 06:21:54.41670378 +0000 UTC m=+2162.253829133" lastFinishedPulling="2026-01-22 06:21:55.801990881 +0000 UTC m=+2163.639116275" observedRunningTime="2026-01-22 06:21:56.449989639 +0000 UTC m=+2164.287115002" watchObservedRunningTime="2026-01-22 06:21:56.456916499 +0000 UTC m=+2164.294041892" Jan 22 06:22:02 crc kubenswrapper[4933]: I0122 06:22:02.836699 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9h8kx" Jan 22 06:22:02 crc kubenswrapper[4933]: I0122 06:22:02.838012 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9h8kx" Jan 22 06:22:02 crc kubenswrapper[4933]: I0122 06:22:02.888127 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9h8kx" Jan 22 06:22:03 crc kubenswrapper[4933]: I0122 06:22:03.532996 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9h8kx" Jan 22 06:22:03 crc kubenswrapper[4933]: I0122 06:22:03.581460 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9h8kx"] Jan 22 06:22:05 crc kubenswrapper[4933]: I0122 06:22:05.499242 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-9h8kx" podUID="3f509cda-4999-47f8-97dc-9ce0b968b8d8" containerName="registry-server" containerID="cri-o://d1823d439b41e540d58ff60f552a6b3a50adfb3b3d78c50d90df1f47ef971204" gracePeriod=2 Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.435416 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9h8kx" Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.505848 4933 generic.go:334] "Generic (PLEG): container finished" podID="3f509cda-4999-47f8-97dc-9ce0b968b8d8" containerID="d1823d439b41e540d58ff60f552a6b3a50adfb3b3d78c50d90df1f47ef971204" exitCode=0 Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.505891 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9h8kx" event={"ID":"3f509cda-4999-47f8-97dc-9ce0b968b8d8","Type":"ContainerDied","Data":"d1823d439b41e540d58ff60f552a6b3a50adfb3b3d78c50d90df1f47ef971204"} Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.505912 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9h8kx" Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.505930 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9h8kx" event={"ID":"3f509cda-4999-47f8-97dc-9ce0b968b8d8","Type":"ContainerDied","Data":"37746c98ec80ec04be4e3413861915c34d451cb080d6dcbd08b52e80faec0718"} Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.505955 4933 scope.go:117] "RemoveContainer" containerID="d1823d439b41e540d58ff60f552a6b3a50adfb3b3d78c50d90df1f47ef971204" Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.524799 4933 scope.go:117] "RemoveContainer" containerID="85259b5d369acecaa3c49d21017843561bcecc4da48e2aa6e29f165cc9f7f98f" Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.549247 4933 scope.go:117] "RemoveContainer" containerID="c620717366dba28aa7e04c97119becd720c527ba8c1d43643c13771ed8d54e73" Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.571100 4933 scope.go:117] "RemoveContainer" containerID="d1823d439b41e540d58ff60f552a6b3a50adfb3b3d78c50d90df1f47ef971204" Jan 22 06:22:06 crc kubenswrapper[4933]: E0122 06:22:06.572258 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1823d439b41e540d58ff60f552a6b3a50adfb3b3d78c50d90df1f47ef971204\": container with ID starting with d1823d439b41e540d58ff60f552a6b3a50adfb3b3d78c50d90df1f47ef971204 not found: ID does not exist" containerID="d1823d439b41e540d58ff60f552a6b3a50adfb3b3d78c50d90df1f47ef971204" Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.572300 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1823d439b41e540d58ff60f552a6b3a50adfb3b3d78c50d90df1f47ef971204"} err="failed to get container status \"d1823d439b41e540d58ff60f552a6b3a50adfb3b3d78c50d90df1f47ef971204\": rpc error: code = NotFound desc = could not find container \"d1823d439b41e540d58ff60f552a6b3a50adfb3b3d78c50d90df1f47ef971204\": container with ID starting with d1823d439b41e540d58ff60f552a6b3a50adfb3b3d78c50d90df1f47ef971204 not found: ID does not exist" Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.572349 4933 scope.go:117] "RemoveContainer" containerID="85259b5d369acecaa3c49d21017843561bcecc4da48e2aa6e29f165cc9f7f98f" Jan 22 06:22:06 crc kubenswrapper[4933]: E0122 06:22:06.572850 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85259b5d369acecaa3c49d21017843561bcecc4da48e2aa6e29f165cc9f7f98f\": container with ID starting with 85259b5d369acecaa3c49d21017843561bcecc4da48e2aa6e29f165cc9f7f98f not found: ID does not exist" containerID="85259b5d369acecaa3c49d21017843561bcecc4da48e2aa6e29f165cc9f7f98f" Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.572884 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85259b5d369acecaa3c49d21017843561bcecc4da48e2aa6e29f165cc9f7f98f"} err="failed to get container status \"85259b5d369acecaa3c49d21017843561bcecc4da48e2aa6e29f165cc9f7f98f\": rpc error: code = NotFound desc = could not find container \"85259b5d369acecaa3c49d21017843561bcecc4da48e2aa6e29f165cc9f7f98f\": container with ID starting with 85259b5d369acecaa3c49d21017843561bcecc4da48e2aa6e29f165cc9f7f98f not found: ID does not exist" Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.572922 4933 scope.go:117] "RemoveContainer" containerID="c620717366dba28aa7e04c97119becd720c527ba8c1d43643c13771ed8d54e73" Jan 22 06:22:06 crc kubenswrapper[4933]: E0122 06:22:06.573229 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c620717366dba28aa7e04c97119becd720c527ba8c1d43643c13771ed8d54e73\": container with ID starting with c620717366dba28aa7e04c97119becd720c527ba8c1d43643c13771ed8d54e73 not found: ID does not exist" containerID="c620717366dba28aa7e04c97119becd720c527ba8c1d43643c13771ed8d54e73" Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.573273 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c620717366dba28aa7e04c97119becd720c527ba8c1d43643c13771ed8d54e73"} err="failed to get container status \"c620717366dba28aa7e04c97119becd720c527ba8c1d43643c13771ed8d54e73\": rpc error: code = NotFound desc = could not find container \"c620717366dba28aa7e04c97119becd720c527ba8c1d43643c13771ed8d54e73\": container with ID starting with c620717366dba28aa7e04c97119becd720c527ba8c1d43643c13771ed8d54e73 not found: ID does not exist" Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.608758 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f509cda-4999-47f8-97dc-9ce0b968b8d8-catalog-content\") pod \"3f509cda-4999-47f8-97dc-9ce0b968b8d8\" (UID: \"3f509cda-4999-47f8-97dc-9ce0b968b8d8\") " Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.608850 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f509cda-4999-47f8-97dc-9ce0b968b8d8-utilities\") pod \"3f509cda-4999-47f8-97dc-9ce0b968b8d8\" (UID: \"3f509cda-4999-47f8-97dc-9ce0b968b8d8\") " Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.608930 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ggd5k\" (UniqueName: \"kubernetes.io/projected/3f509cda-4999-47f8-97dc-9ce0b968b8d8-kube-api-access-ggd5k\") pod \"3f509cda-4999-47f8-97dc-9ce0b968b8d8\" (UID: \"3f509cda-4999-47f8-97dc-9ce0b968b8d8\") " Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.610945 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f509cda-4999-47f8-97dc-9ce0b968b8d8-utilities" (OuterVolumeSpecName: "utilities") pod "3f509cda-4999-47f8-97dc-9ce0b968b8d8" (UID: "3f509cda-4999-47f8-97dc-9ce0b968b8d8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.615017 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f509cda-4999-47f8-97dc-9ce0b968b8d8-kube-api-access-ggd5k" (OuterVolumeSpecName: "kube-api-access-ggd5k") pod "3f509cda-4999-47f8-97dc-9ce0b968b8d8" (UID: "3f509cda-4999-47f8-97dc-9ce0b968b8d8"). InnerVolumeSpecName "kube-api-access-ggd5k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.668174 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f509cda-4999-47f8-97dc-9ce0b968b8d8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3f509cda-4999-47f8-97dc-9ce0b968b8d8" (UID: "3f509cda-4999-47f8-97dc-9ce0b968b8d8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.711364 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f509cda-4999-47f8-97dc-9ce0b968b8d8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.711406 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f509cda-4999-47f8-97dc-9ce0b968b8d8-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.711417 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ggd5k\" (UniqueName: \"kubernetes.io/projected/3f509cda-4999-47f8-97dc-9ce0b968b8d8-kube-api-access-ggd5k\") on node \"crc\" DevicePath \"\"" Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.859328 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9h8kx"] Jan 22 06:22:06 crc kubenswrapper[4933]: I0122 06:22:06.872260 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-9h8kx"] Jan 22 06:22:08 crc kubenswrapper[4933]: I0122 06:22:08.505332 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f509cda-4999-47f8-97dc-9ce0b968b8d8" path="/var/lib/kubelet/pods/3f509cda-4999-47f8-97dc-9ce0b968b8d8/volumes" Jan 22 06:22:10 crc kubenswrapper[4933]: I0122 06:22:10.943763 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:22:10 crc kubenswrapper[4933]: I0122 06:22:10.944142 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:22:40 crc kubenswrapper[4933]: I0122 06:22:40.943261 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:22:40 crc kubenswrapper[4933]: I0122 06:22:40.943957 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:22:40 crc kubenswrapper[4933]: I0122 06:22:40.944041 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 06:22:40 crc kubenswrapper[4933]: I0122 06:22:40.945150 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fe8d3968014e868d088b1477d12be705e72f0a7b1c028581fa7e7552a65574d9"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:22:40 crc kubenswrapper[4933]: I0122 06:22:40.945253 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://fe8d3968014e868d088b1477d12be705e72f0a7b1c028581fa7e7552a65574d9" gracePeriod=600 Jan 22 06:22:41 crc kubenswrapper[4933]: I0122 06:22:41.890019 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="fe8d3968014e868d088b1477d12be705e72f0a7b1c028581fa7e7552a65574d9" exitCode=0 Jan 22 06:22:41 crc kubenswrapper[4933]: I0122 06:22:41.890116 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"fe8d3968014e868d088b1477d12be705e72f0a7b1c028581fa7e7552a65574d9"} Jan 22 06:22:41 crc kubenswrapper[4933]: I0122 06:22:41.890908 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df"} Jan 22 06:22:41 crc kubenswrapper[4933]: I0122 06:22:41.891026 4933 scope.go:117] "RemoveContainer" containerID="54e199a5d4d9e08b499d27a10ef530f0e0054285faf2472519a3248f3a305946" Jan 22 06:23:02 crc kubenswrapper[4933]: I0122 06:23:02.099136 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-p6xw2"] Jan 22 06:23:02 crc kubenswrapper[4933]: E0122 06:23:02.100029 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f509cda-4999-47f8-97dc-9ce0b968b8d8" containerName="extract-content" Jan 22 06:23:02 crc kubenswrapper[4933]: I0122 06:23:02.100049 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f509cda-4999-47f8-97dc-9ce0b968b8d8" containerName="extract-content" Jan 22 06:23:02 crc kubenswrapper[4933]: E0122 06:23:02.100077 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f509cda-4999-47f8-97dc-9ce0b968b8d8" containerName="extract-utilities" Jan 22 06:23:02 crc kubenswrapper[4933]: I0122 06:23:02.100089 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f509cda-4999-47f8-97dc-9ce0b968b8d8" containerName="extract-utilities" Jan 22 06:23:02 crc kubenswrapper[4933]: E0122 06:23:02.100132 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f509cda-4999-47f8-97dc-9ce0b968b8d8" containerName="registry-server" Jan 22 06:23:02 crc kubenswrapper[4933]: I0122 06:23:02.100140 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f509cda-4999-47f8-97dc-9ce0b968b8d8" containerName="registry-server" Jan 22 06:23:02 crc kubenswrapper[4933]: I0122 06:23:02.100293 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f509cda-4999-47f8-97dc-9ce0b968b8d8" containerName="registry-server" Jan 22 06:23:02 crc kubenswrapper[4933]: I0122 06:23:02.102960 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p6xw2" Jan 22 06:23:02 crc kubenswrapper[4933]: I0122 06:23:02.137223 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p6xw2"] Jan 22 06:23:02 crc kubenswrapper[4933]: I0122 06:23:02.164729 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vvz8\" (UniqueName: \"kubernetes.io/projected/87572054-1a21-4939-af6f-442c0b4c59b2-kube-api-access-2vvz8\") pod \"certified-operators-p6xw2\" (UID: \"87572054-1a21-4939-af6f-442c0b4c59b2\") " pod="openshift-marketplace/certified-operators-p6xw2" Jan 22 06:23:02 crc kubenswrapper[4933]: I0122 06:23:02.164834 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87572054-1a21-4939-af6f-442c0b4c59b2-catalog-content\") pod \"certified-operators-p6xw2\" (UID: \"87572054-1a21-4939-af6f-442c0b4c59b2\") " pod="openshift-marketplace/certified-operators-p6xw2" Jan 22 06:23:02 crc kubenswrapper[4933]: I0122 06:23:02.164894 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87572054-1a21-4939-af6f-442c0b4c59b2-utilities\") pod \"certified-operators-p6xw2\" (UID: \"87572054-1a21-4939-af6f-442c0b4c59b2\") " pod="openshift-marketplace/certified-operators-p6xw2" Jan 22 06:23:02 crc kubenswrapper[4933]: I0122 06:23:02.266690 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vvz8\" (UniqueName: \"kubernetes.io/projected/87572054-1a21-4939-af6f-442c0b4c59b2-kube-api-access-2vvz8\") pod \"certified-operators-p6xw2\" (UID: \"87572054-1a21-4939-af6f-442c0b4c59b2\") " pod="openshift-marketplace/certified-operators-p6xw2" Jan 22 06:23:02 crc kubenswrapper[4933]: I0122 06:23:02.266748 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87572054-1a21-4939-af6f-442c0b4c59b2-catalog-content\") pod \"certified-operators-p6xw2\" (UID: \"87572054-1a21-4939-af6f-442c0b4c59b2\") " pod="openshift-marketplace/certified-operators-p6xw2" Jan 22 06:23:02 crc kubenswrapper[4933]: I0122 06:23:02.266783 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87572054-1a21-4939-af6f-442c0b4c59b2-utilities\") pod \"certified-operators-p6xw2\" (UID: \"87572054-1a21-4939-af6f-442c0b4c59b2\") " pod="openshift-marketplace/certified-operators-p6xw2" Jan 22 06:23:02 crc kubenswrapper[4933]: I0122 06:23:02.267290 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87572054-1a21-4939-af6f-442c0b4c59b2-catalog-content\") pod \"certified-operators-p6xw2\" (UID: \"87572054-1a21-4939-af6f-442c0b4c59b2\") " pod="openshift-marketplace/certified-operators-p6xw2" Jan 22 06:23:02 crc kubenswrapper[4933]: I0122 06:23:02.267313 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87572054-1a21-4939-af6f-442c0b4c59b2-utilities\") pod \"certified-operators-p6xw2\" (UID: \"87572054-1a21-4939-af6f-442c0b4c59b2\") " pod="openshift-marketplace/certified-operators-p6xw2" Jan 22 06:23:02 crc kubenswrapper[4933]: I0122 06:23:02.289977 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vvz8\" (UniqueName: \"kubernetes.io/projected/87572054-1a21-4939-af6f-442c0b4c59b2-kube-api-access-2vvz8\") pod \"certified-operators-p6xw2\" (UID: \"87572054-1a21-4939-af6f-442c0b4c59b2\") " pod="openshift-marketplace/certified-operators-p6xw2" Jan 22 06:23:02 crc kubenswrapper[4933]: I0122 06:23:02.438586 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p6xw2" Jan 22 06:23:02 crc kubenswrapper[4933]: I0122 06:23:02.904490 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p6xw2"] Jan 22 06:23:03 crc kubenswrapper[4933]: I0122 06:23:03.135118 4933 generic.go:334] "Generic (PLEG): container finished" podID="87572054-1a21-4939-af6f-442c0b4c59b2" containerID="f88362845bbb717133a04b8a3ab2d94017db0c372bc25f9c94cd30d93575b759" exitCode=0 Jan 22 06:23:03 crc kubenswrapper[4933]: I0122 06:23:03.135200 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p6xw2" event={"ID":"87572054-1a21-4939-af6f-442c0b4c59b2","Type":"ContainerDied","Data":"f88362845bbb717133a04b8a3ab2d94017db0c372bc25f9c94cd30d93575b759"} Jan 22 06:23:03 crc kubenswrapper[4933]: I0122 06:23:03.137766 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p6xw2" event={"ID":"87572054-1a21-4939-af6f-442c0b4c59b2","Type":"ContainerStarted","Data":"9586edc5351a97a07941d36c02184fe294710c09f89b5b2f591fb3caee07734f"} Jan 22 06:23:04 crc kubenswrapper[4933]: I0122 06:23:04.148973 4933 generic.go:334] "Generic (PLEG): container finished" podID="87572054-1a21-4939-af6f-442c0b4c59b2" containerID="f79fbf28cd140aa8a7ce3530452feddd83429192572a4aa7e78b8da581d715ef" exitCode=0 Jan 22 06:23:04 crc kubenswrapper[4933]: I0122 06:23:04.149057 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p6xw2" event={"ID":"87572054-1a21-4939-af6f-442c0b4c59b2","Type":"ContainerDied","Data":"f79fbf28cd140aa8a7ce3530452feddd83429192572a4aa7e78b8da581d715ef"} Jan 22 06:23:04 crc kubenswrapper[4933]: I0122 06:23:04.901121 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lmxqn"] Jan 22 06:23:04 crc kubenswrapper[4933]: I0122 06:23:04.904534 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lmxqn" Jan 22 06:23:04 crc kubenswrapper[4933]: I0122 06:23:04.911344 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lmxqn"] Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.089137 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bwbph"] Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.090429 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bwbph" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.104033 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bwbph"] Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.106858 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4da7aab-73a5-4545-89b9-ef150983d178-utilities\") pod \"redhat-marketplace-lmxqn\" (UID: \"f4da7aab-73a5-4545-89b9-ef150983d178\") " pod="openshift-marketplace/redhat-marketplace-lmxqn" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.106931 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23d8b2c6-29f2-4606-8b9c-578ea344bbd9-utilities\") pod \"redhat-operators-bwbph\" (UID: \"23d8b2c6-29f2-4606-8b9c-578ea344bbd9\") " pod="openshift-marketplace/redhat-operators-bwbph" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.106973 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmlf9\" (UniqueName: \"kubernetes.io/projected/f4da7aab-73a5-4545-89b9-ef150983d178-kube-api-access-dmlf9\") pod \"redhat-marketplace-lmxqn\" (UID: \"f4da7aab-73a5-4545-89b9-ef150983d178\") " pod="openshift-marketplace/redhat-marketplace-lmxqn" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.106997 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23d8b2c6-29f2-4606-8b9c-578ea344bbd9-catalog-content\") pod \"redhat-operators-bwbph\" (UID: \"23d8b2c6-29f2-4606-8b9c-578ea344bbd9\") " pod="openshift-marketplace/redhat-operators-bwbph" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.107020 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fh9nz\" (UniqueName: \"kubernetes.io/projected/23d8b2c6-29f2-4606-8b9c-578ea344bbd9-kube-api-access-fh9nz\") pod \"redhat-operators-bwbph\" (UID: \"23d8b2c6-29f2-4606-8b9c-578ea344bbd9\") " pod="openshift-marketplace/redhat-operators-bwbph" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.107112 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4da7aab-73a5-4545-89b9-ef150983d178-catalog-content\") pod \"redhat-marketplace-lmxqn\" (UID: \"f4da7aab-73a5-4545-89b9-ef150983d178\") " pod="openshift-marketplace/redhat-marketplace-lmxqn" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.157996 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p6xw2" event={"ID":"87572054-1a21-4939-af6f-442c0b4c59b2","Type":"ContainerStarted","Data":"970d7f08373261c0c3fbbc722b326d9b30ba43ba9b22a13341ffda40cd08f551"} Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.179393 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-p6xw2" podStartSLOduration=1.760849362 podStartE2EDuration="3.17937699s" podCreationTimestamp="2026-01-22 06:23:02 +0000 UTC" firstStartedPulling="2026-01-22 06:23:03.137495268 +0000 UTC m=+2230.974620661" lastFinishedPulling="2026-01-22 06:23:04.556022926 +0000 UTC m=+2232.393148289" observedRunningTime="2026-01-22 06:23:05.174531652 +0000 UTC m=+2233.011657015" watchObservedRunningTime="2026-01-22 06:23:05.17937699 +0000 UTC m=+2233.016502343" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.207900 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23d8b2c6-29f2-4606-8b9c-578ea344bbd9-utilities\") pod \"redhat-operators-bwbph\" (UID: \"23d8b2c6-29f2-4606-8b9c-578ea344bbd9\") " pod="openshift-marketplace/redhat-operators-bwbph" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.207969 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmlf9\" (UniqueName: \"kubernetes.io/projected/f4da7aab-73a5-4545-89b9-ef150983d178-kube-api-access-dmlf9\") pod \"redhat-marketplace-lmxqn\" (UID: \"f4da7aab-73a5-4545-89b9-ef150983d178\") " pod="openshift-marketplace/redhat-marketplace-lmxqn" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.207992 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23d8b2c6-29f2-4606-8b9c-578ea344bbd9-catalog-content\") pod \"redhat-operators-bwbph\" (UID: \"23d8b2c6-29f2-4606-8b9c-578ea344bbd9\") " pod="openshift-marketplace/redhat-operators-bwbph" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.208017 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fh9nz\" (UniqueName: \"kubernetes.io/projected/23d8b2c6-29f2-4606-8b9c-578ea344bbd9-kube-api-access-fh9nz\") pod \"redhat-operators-bwbph\" (UID: \"23d8b2c6-29f2-4606-8b9c-578ea344bbd9\") " pod="openshift-marketplace/redhat-operators-bwbph" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.208105 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4da7aab-73a5-4545-89b9-ef150983d178-catalog-content\") pod \"redhat-marketplace-lmxqn\" (UID: \"f4da7aab-73a5-4545-89b9-ef150983d178\") " pod="openshift-marketplace/redhat-marketplace-lmxqn" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.208136 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4da7aab-73a5-4545-89b9-ef150983d178-utilities\") pod \"redhat-marketplace-lmxqn\" (UID: \"f4da7aab-73a5-4545-89b9-ef150983d178\") " pod="openshift-marketplace/redhat-marketplace-lmxqn" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.208596 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4da7aab-73a5-4545-89b9-ef150983d178-utilities\") pod \"redhat-marketplace-lmxqn\" (UID: \"f4da7aab-73a5-4545-89b9-ef150983d178\") " pod="openshift-marketplace/redhat-marketplace-lmxqn" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.208705 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23d8b2c6-29f2-4606-8b9c-578ea344bbd9-catalog-content\") pod \"redhat-operators-bwbph\" (UID: \"23d8b2c6-29f2-4606-8b9c-578ea344bbd9\") " pod="openshift-marketplace/redhat-operators-bwbph" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.208737 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23d8b2c6-29f2-4606-8b9c-578ea344bbd9-utilities\") pod \"redhat-operators-bwbph\" (UID: \"23d8b2c6-29f2-4606-8b9c-578ea344bbd9\") " pod="openshift-marketplace/redhat-operators-bwbph" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.209151 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4da7aab-73a5-4545-89b9-ef150983d178-catalog-content\") pod \"redhat-marketplace-lmxqn\" (UID: \"f4da7aab-73a5-4545-89b9-ef150983d178\") " pod="openshift-marketplace/redhat-marketplace-lmxqn" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.226229 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fh9nz\" (UniqueName: \"kubernetes.io/projected/23d8b2c6-29f2-4606-8b9c-578ea344bbd9-kube-api-access-fh9nz\") pod \"redhat-operators-bwbph\" (UID: \"23d8b2c6-29f2-4606-8b9c-578ea344bbd9\") " pod="openshift-marketplace/redhat-operators-bwbph" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.226479 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmlf9\" (UniqueName: \"kubernetes.io/projected/f4da7aab-73a5-4545-89b9-ef150983d178-kube-api-access-dmlf9\") pod \"redhat-marketplace-lmxqn\" (UID: \"f4da7aab-73a5-4545-89b9-ef150983d178\") " pod="openshift-marketplace/redhat-marketplace-lmxqn" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.232340 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lmxqn" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.408117 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bwbph" Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.447306 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lmxqn"] Jan 22 06:23:05 crc kubenswrapper[4933]: W0122 06:23:05.452033 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4da7aab_73a5_4545_89b9_ef150983d178.slice/crio-58ff73c8934f49563cb9586a532f98bbde8943cf3cff4b7884a6753b8b1716a8 WatchSource:0}: Error finding container 58ff73c8934f49563cb9586a532f98bbde8943cf3cff4b7884a6753b8b1716a8: Status 404 returned error can't find the container with id 58ff73c8934f49563cb9586a532f98bbde8943cf3cff4b7884a6753b8b1716a8 Jan 22 06:23:05 crc kubenswrapper[4933]: I0122 06:23:05.866967 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bwbph"] Jan 22 06:23:05 crc kubenswrapper[4933]: W0122 06:23:05.871769 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod23d8b2c6_29f2_4606_8b9c_578ea344bbd9.slice/crio-1933993eb3e98896884f570ea3b41d059269c66a88ee61ca4938feff07208ed5 WatchSource:0}: Error finding container 1933993eb3e98896884f570ea3b41d059269c66a88ee61ca4938feff07208ed5: Status 404 returned error can't find the container with id 1933993eb3e98896884f570ea3b41d059269c66a88ee61ca4938feff07208ed5 Jan 22 06:23:06 crc kubenswrapper[4933]: I0122 06:23:06.166430 4933 generic.go:334] "Generic (PLEG): container finished" podID="23d8b2c6-29f2-4606-8b9c-578ea344bbd9" containerID="01c2c3d25a4b6b1e38829e93ee9446cebd4182f4a006463a59d5707b9c7dbb3d" exitCode=0 Jan 22 06:23:06 crc kubenswrapper[4933]: I0122 06:23:06.166511 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bwbph" event={"ID":"23d8b2c6-29f2-4606-8b9c-578ea344bbd9","Type":"ContainerDied","Data":"01c2c3d25a4b6b1e38829e93ee9446cebd4182f4a006463a59d5707b9c7dbb3d"} Jan 22 06:23:06 crc kubenswrapper[4933]: I0122 06:23:06.166762 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bwbph" event={"ID":"23d8b2c6-29f2-4606-8b9c-578ea344bbd9","Type":"ContainerStarted","Data":"1933993eb3e98896884f570ea3b41d059269c66a88ee61ca4938feff07208ed5"} Jan 22 06:23:06 crc kubenswrapper[4933]: I0122 06:23:06.170277 4933 generic.go:334] "Generic (PLEG): container finished" podID="f4da7aab-73a5-4545-89b9-ef150983d178" containerID="8465ba96d61ef7bb447c05b020668e5d4798c6d1bfe8049e40320ee6361463c2" exitCode=0 Jan 22 06:23:06 crc kubenswrapper[4933]: I0122 06:23:06.170334 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lmxqn" event={"ID":"f4da7aab-73a5-4545-89b9-ef150983d178","Type":"ContainerDied","Data":"8465ba96d61ef7bb447c05b020668e5d4798c6d1bfe8049e40320ee6361463c2"} Jan 22 06:23:06 crc kubenswrapper[4933]: I0122 06:23:06.170391 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lmxqn" event={"ID":"f4da7aab-73a5-4545-89b9-ef150983d178","Type":"ContainerStarted","Data":"58ff73c8934f49563cb9586a532f98bbde8943cf3cff4b7884a6753b8b1716a8"} Jan 22 06:23:08 crc kubenswrapper[4933]: I0122 06:23:08.190434 4933 generic.go:334] "Generic (PLEG): container finished" podID="23d8b2c6-29f2-4606-8b9c-578ea344bbd9" containerID="3757f37111d1a982711d5e2c7d836818779997357e6f84f0d878a23e2b055799" exitCode=0 Jan 22 06:23:08 crc kubenswrapper[4933]: I0122 06:23:08.190490 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bwbph" event={"ID":"23d8b2c6-29f2-4606-8b9c-578ea344bbd9","Type":"ContainerDied","Data":"3757f37111d1a982711d5e2c7d836818779997357e6f84f0d878a23e2b055799"} Jan 22 06:23:08 crc kubenswrapper[4933]: I0122 06:23:08.194572 4933 generic.go:334] "Generic (PLEG): container finished" podID="f4da7aab-73a5-4545-89b9-ef150983d178" containerID="667eab074e461e132a37d8aaa7deadbd94e4666d7cbd1a75cbb2b9cef2686b82" exitCode=0 Jan 22 06:23:08 crc kubenswrapper[4933]: I0122 06:23:08.194637 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lmxqn" event={"ID":"f4da7aab-73a5-4545-89b9-ef150983d178","Type":"ContainerDied","Data":"667eab074e461e132a37d8aaa7deadbd94e4666d7cbd1a75cbb2b9cef2686b82"} Jan 22 06:23:09 crc kubenswrapper[4933]: I0122 06:23:09.203140 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lmxqn" event={"ID":"f4da7aab-73a5-4545-89b9-ef150983d178","Type":"ContainerStarted","Data":"5c03254ff3dcfb1eeceb31d6331e4aedc973593247e9baa1544fc79534a71fd3"} Jan 22 06:23:09 crc kubenswrapper[4933]: I0122 06:23:09.205539 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bwbph" event={"ID":"23d8b2c6-29f2-4606-8b9c-578ea344bbd9","Type":"ContainerStarted","Data":"d69be314d3c21eba69743dfe6914d718f002b6cfd2267843c8660f9e41c813f7"} Jan 22 06:23:09 crc kubenswrapper[4933]: I0122 06:23:09.229055 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lmxqn" podStartSLOduration=2.785580915 podStartE2EDuration="5.229038683s" podCreationTimestamp="2026-01-22 06:23:04 +0000 UTC" firstStartedPulling="2026-01-22 06:23:06.171381775 +0000 UTC m=+2234.008507118" lastFinishedPulling="2026-01-22 06:23:08.614839513 +0000 UTC m=+2236.451964886" observedRunningTime="2026-01-22 06:23:09.225983088 +0000 UTC m=+2237.063108481" watchObservedRunningTime="2026-01-22 06:23:09.229038683 +0000 UTC m=+2237.066164036" Jan 22 06:23:09 crc kubenswrapper[4933]: I0122 06:23:09.248673 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bwbph" podStartSLOduration=1.834347597 podStartE2EDuration="4.248654042s" podCreationTimestamp="2026-01-22 06:23:05 +0000 UTC" firstStartedPulling="2026-01-22 06:23:06.168839213 +0000 UTC m=+2234.005964566" lastFinishedPulling="2026-01-22 06:23:08.583145618 +0000 UTC m=+2236.420271011" observedRunningTime="2026-01-22 06:23:09.245334061 +0000 UTC m=+2237.082459444" watchObservedRunningTime="2026-01-22 06:23:09.248654042 +0000 UTC m=+2237.085779395" Jan 22 06:23:12 crc kubenswrapper[4933]: I0122 06:23:12.442558 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-p6xw2" Jan 22 06:23:12 crc kubenswrapper[4933]: I0122 06:23:12.442902 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-p6xw2" Jan 22 06:23:12 crc kubenswrapper[4933]: I0122 06:23:12.485655 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-p6xw2" Jan 22 06:23:13 crc kubenswrapper[4933]: I0122 06:23:13.292800 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-p6xw2" Jan 22 06:23:15 crc kubenswrapper[4933]: I0122 06:23:15.232774 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lmxqn" Jan 22 06:23:15 crc kubenswrapper[4933]: I0122 06:23:15.233209 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lmxqn" Jan 22 06:23:15 crc kubenswrapper[4933]: I0122 06:23:15.291756 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lmxqn" Jan 22 06:23:15 crc kubenswrapper[4933]: I0122 06:23:15.367959 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lmxqn" Jan 22 06:23:15 crc kubenswrapper[4933]: I0122 06:23:15.409013 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bwbph" Jan 22 06:23:15 crc kubenswrapper[4933]: I0122 06:23:15.409067 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bwbph" Jan 22 06:23:15 crc kubenswrapper[4933]: I0122 06:23:15.460559 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bwbph" Jan 22 06:23:16 crc kubenswrapper[4933]: I0122 06:23:16.287835 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lmxqn"] Jan 22 06:23:16 crc kubenswrapper[4933]: I0122 06:23:16.318019 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bwbph" Jan 22 06:23:17 crc kubenswrapper[4933]: I0122 06:23:17.270175 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lmxqn" podUID="f4da7aab-73a5-4545-89b9-ef150983d178" containerName="registry-server" containerID="cri-o://5c03254ff3dcfb1eeceb31d6331e4aedc973593247e9baa1544fc79534a71fd3" gracePeriod=2 Jan 22 06:23:17 crc kubenswrapper[4933]: I0122 06:23:17.683297 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bwbph"] Jan 22 06:23:18 crc kubenswrapper[4933]: I0122 06:23:18.299413 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bwbph" podUID="23d8b2c6-29f2-4606-8b9c-578ea344bbd9" containerName="registry-server" containerID="cri-o://d69be314d3c21eba69743dfe6914d718f002b6cfd2267843c8660f9e41c813f7" gracePeriod=2 Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.307978 4933 generic.go:334] "Generic (PLEG): container finished" podID="23d8b2c6-29f2-4606-8b9c-578ea344bbd9" containerID="d69be314d3c21eba69743dfe6914d718f002b6cfd2267843c8660f9e41c813f7" exitCode=0 Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.308123 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bwbph" event={"ID":"23d8b2c6-29f2-4606-8b9c-578ea344bbd9","Type":"ContainerDied","Data":"d69be314d3c21eba69743dfe6914d718f002b6cfd2267843c8660f9e41c813f7"} Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.310584 4933 generic.go:334] "Generic (PLEG): container finished" podID="f4da7aab-73a5-4545-89b9-ef150983d178" containerID="5c03254ff3dcfb1eeceb31d6331e4aedc973593247e9baa1544fc79534a71fd3" exitCode=0 Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.310622 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lmxqn" event={"ID":"f4da7aab-73a5-4545-89b9-ef150983d178","Type":"ContainerDied","Data":"5c03254ff3dcfb1eeceb31d6331e4aedc973593247e9baa1544fc79534a71fd3"} Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.609630 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lmxqn" Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.713051 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4da7aab-73a5-4545-89b9-ef150983d178-utilities\") pod \"f4da7aab-73a5-4545-89b9-ef150983d178\" (UID: \"f4da7aab-73a5-4545-89b9-ef150983d178\") " Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.713210 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dmlf9\" (UniqueName: \"kubernetes.io/projected/f4da7aab-73a5-4545-89b9-ef150983d178-kube-api-access-dmlf9\") pod \"f4da7aab-73a5-4545-89b9-ef150983d178\" (UID: \"f4da7aab-73a5-4545-89b9-ef150983d178\") " Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.713426 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4da7aab-73a5-4545-89b9-ef150983d178-catalog-content\") pod \"f4da7aab-73a5-4545-89b9-ef150983d178\" (UID: \"f4da7aab-73a5-4545-89b9-ef150983d178\") " Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.714152 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4da7aab-73a5-4545-89b9-ef150983d178-utilities" (OuterVolumeSpecName: "utilities") pod "f4da7aab-73a5-4545-89b9-ef150983d178" (UID: "f4da7aab-73a5-4545-89b9-ef150983d178"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.721613 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4da7aab-73a5-4545-89b9-ef150983d178-kube-api-access-dmlf9" (OuterVolumeSpecName: "kube-api-access-dmlf9") pod "f4da7aab-73a5-4545-89b9-ef150983d178" (UID: "f4da7aab-73a5-4545-89b9-ef150983d178"). InnerVolumeSpecName "kube-api-access-dmlf9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.745013 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4da7aab-73a5-4545-89b9-ef150983d178-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f4da7aab-73a5-4545-89b9-ef150983d178" (UID: "f4da7aab-73a5-4545-89b9-ef150983d178"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.801746 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bwbph" Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.819297 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4da7aab-73a5-4545-89b9-ef150983d178-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.819370 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4da7aab-73a5-4545-89b9-ef150983d178-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.819395 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dmlf9\" (UniqueName: \"kubernetes.io/projected/f4da7aab-73a5-4545-89b9-ef150983d178-kube-api-access-dmlf9\") on node \"crc\" DevicePath \"\"" Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.920484 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23d8b2c6-29f2-4606-8b9c-578ea344bbd9-catalog-content\") pod \"23d8b2c6-29f2-4606-8b9c-578ea344bbd9\" (UID: \"23d8b2c6-29f2-4606-8b9c-578ea344bbd9\") " Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.920552 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23d8b2c6-29f2-4606-8b9c-578ea344bbd9-utilities\") pod \"23d8b2c6-29f2-4606-8b9c-578ea344bbd9\" (UID: \"23d8b2c6-29f2-4606-8b9c-578ea344bbd9\") " Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.920654 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fh9nz\" (UniqueName: \"kubernetes.io/projected/23d8b2c6-29f2-4606-8b9c-578ea344bbd9-kube-api-access-fh9nz\") pod \"23d8b2c6-29f2-4606-8b9c-578ea344bbd9\" (UID: \"23d8b2c6-29f2-4606-8b9c-578ea344bbd9\") " Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.922188 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23d8b2c6-29f2-4606-8b9c-578ea344bbd9-utilities" (OuterVolumeSpecName: "utilities") pod "23d8b2c6-29f2-4606-8b9c-578ea344bbd9" (UID: "23d8b2c6-29f2-4606-8b9c-578ea344bbd9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:23:19 crc kubenswrapper[4933]: I0122 06:23:19.924683 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23d8b2c6-29f2-4606-8b9c-578ea344bbd9-kube-api-access-fh9nz" (OuterVolumeSpecName: "kube-api-access-fh9nz") pod "23d8b2c6-29f2-4606-8b9c-578ea344bbd9" (UID: "23d8b2c6-29f2-4606-8b9c-578ea344bbd9"). InnerVolumeSpecName "kube-api-access-fh9nz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.022394 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fh9nz\" (UniqueName: \"kubernetes.io/projected/23d8b2c6-29f2-4606-8b9c-578ea344bbd9-kube-api-access-fh9nz\") on node \"crc\" DevicePath \"\"" Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.022460 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23d8b2c6-29f2-4606-8b9c-578ea344bbd9-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.094593 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23d8b2c6-29f2-4606-8b9c-578ea344bbd9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "23d8b2c6-29f2-4606-8b9c-578ea344bbd9" (UID: "23d8b2c6-29f2-4606-8b9c-578ea344bbd9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.123920 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23d8b2c6-29f2-4606-8b9c-578ea344bbd9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.319586 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bwbph" event={"ID":"23d8b2c6-29f2-4606-8b9c-578ea344bbd9","Type":"ContainerDied","Data":"1933993eb3e98896884f570ea3b41d059269c66a88ee61ca4938feff07208ed5"} Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.319645 4933 scope.go:117] "RemoveContainer" containerID="d69be314d3c21eba69743dfe6914d718f002b6cfd2267843c8660f9e41c813f7" Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.319640 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bwbph" Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.323440 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lmxqn" event={"ID":"f4da7aab-73a5-4545-89b9-ef150983d178","Type":"ContainerDied","Data":"58ff73c8934f49563cb9586a532f98bbde8943cf3cff4b7884a6753b8b1716a8"} Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.323521 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lmxqn" Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.341132 4933 scope.go:117] "RemoveContainer" containerID="3757f37111d1a982711d5e2c7d836818779997357e6f84f0d878a23e2b055799" Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.373320 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bwbph"] Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.375514 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bwbph"] Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.380257 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lmxqn"] Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.385549 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lmxqn"] Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.387635 4933 scope.go:117] "RemoveContainer" containerID="01c2c3d25a4b6b1e38829e93ee9446cebd4182f4a006463a59d5707b9c7dbb3d" Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.408113 4933 scope.go:117] "RemoveContainer" containerID="5c03254ff3dcfb1eeceb31d6331e4aedc973593247e9baa1544fc79534a71fd3" Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.434466 4933 scope.go:117] "RemoveContainer" containerID="667eab074e461e132a37d8aaa7deadbd94e4666d7cbd1a75cbb2b9cef2686b82" Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.449619 4933 scope.go:117] "RemoveContainer" containerID="8465ba96d61ef7bb447c05b020668e5d4798c6d1bfe8049e40320ee6361463c2" Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.497723 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23d8b2c6-29f2-4606-8b9c-578ea344bbd9" path="/var/lib/kubelet/pods/23d8b2c6-29f2-4606-8b9c-578ea344bbd9/volumes" Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.498302 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4da7aab-73a5-4545-89b9-ef150983d178" path="/var/lib/kubelet/pods/f4da7aab-73a5-4545-89b9-ef150983d178/volumes" Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.685991 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-p6xw2"] Jan 22 06:23:20 crc kubenswrapper[4933]: I0122 06:23:20.686628 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-p6xw2" podUID="87572054-1a21-4939-af6f-442c0b4c59b2" containerName="registry-server" containerID="cri-o://970d7f08373261c0c3fbbc722b326d9b30ba43ba9b22a13341ffda40cd08f551" gracePeriod=2 Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.123509 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p6xw2" Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.242412 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87572054-1a21-4939-af6f-442c0b4c59b2-utilities\") pod \"87572054-1a21-4939-af6f-442c0b4c59b2\" (UID: \"87572054-1a21-4939-af6f-442c0b4c59b2\") " Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.242549 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87572054-1a21-4939-af6f-442c0b4c59b2-catalog-content\") pod \"87572054-1a21-4939-af6f-442c0b4c59b2\" (UID: \"87572054-1a21-4939-af6f-442c0b4c59b2\") " Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.242586 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vvz8\" (UniqueName: \"kubernetes.io/projected/87572054-1a21-4939-af6f-442c0b4c59b2-kube-api-access-2vvz8\") pod \"87572054-1a21-4939-af6f-442c0b4c59b2\" (UID: \"87572054-1a21-4939-af6f-442c0b4c59b2\") " Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.243984 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87572054-1a21-4939-af6f-442c0b4c59b2-utilities" (OuterVolumeSpecName: "utilities") pod "87572054-1a21-4939-af6f-442c0b4c59b2" (UID: "87572054-1a21-4939-af6f-442c0b4c59b2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.249381 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87572054-1a21-4939-af6f-442c0b4c59b2-kube-api-access-2vvz8" (OuterVolumeSpecName: "kube-api-access-2vvz8") pod "87572054-1a21-4939-af6f-442c0b4c59b2" (UID: "87572054-1a21-4939-af6f-442c0b4c59b2"). InnerVolumeSpecName "kube-api-access-2vvz8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.315197 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87572054-1a21-4939-af6f-442c0b4c59b2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "87572054-1a21-4939-af6f-442c0b4c59b2" (UID: "87572054-1a21-4939-af6f-442c0b4c59b2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.338008 4933 generic.go:334] "Generic (PLEG): container finished" podID="87572054-1a21-4939-af6f-442c0b4c59b2" containerID="970d7f08373261c0c3fbbc722b326d9b30ba43ba9b22a13341ffda40cd08f551" exitCode=0 Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.338071 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p6xw2" event={"ID":"87572054-1a21-4939-af6f-442c0b4c59b2","Type":"ContainerDied","Data":"970d7f08373261c0c3fbbc722b326d9b30ba43ba9b22a13341ffda40cd08f551"} Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.338123 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p6xw2" event={"ID":"87572054-1a21-4939-af6f-442c0b4c59b2","Type":"ContainerDied","Data":"9586edc5351a97a07941d36c02184fe294710c09f89b5b2f591fb3caee07734f"} Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.338141 4933 scope.go:117] "RemoveContainer" containerID="970d7f08373261c0c3fbbc722b326d9b30ba43ba9b22a13341ffda40cd08f551" Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.338148 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p6xw2" Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.344766 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87572054-1a21-4939-af6f-442c0b4c59b2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.344856 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vvz8\" (UniqueName: \"kubernetes.io/projected/87572054-1a21-4939-af6f-442c0b4c59b2-kube-api-access-2vvz8\") on node \"crc\" DevicePath \"\"" Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.344885 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87572054-1a21-4939-af6f-442c0b4c59b2-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.365414 4933 scope.go:117] "RemoveContainer" containerID="f79fbf28cd140aa8a7ce3530452feddd83429192572a4aa7e78b8da581d715ef" Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.395330 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-p6xw2"] Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.403490 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-p6xw2"] Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.406069 4933 scope.go:117] "RemoveContainer" containerID="f88362845bbb717133a04b8a3ab2d94017db0c372bc25f9c94cd30d93575b759" Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.436774 4933 scope.go:117] "RemoveContainer" containerID="970d7f08373261c0c3fbbc722b326d9b30ba43ba9b22a13341ffda40cd08f551" Jan 22 06:23:21 crc kubenswrapper[4933]: E0122 06:23:21.437455 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"970d7f08373261c0c3fbbc722b326d9b30ba43ba9b22a13341ffda40cd08f551\": container with ID starting with 970d7f08373261c0c3fbbc722b326d9b30ba43ba9b22a13341ffda40cd08f551 not found: ID does not exist" containerID="970d7f08373261c0c3fbbc722b326d9b30ba43ba9b22a13341ffda40cd08f551" Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.437502 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"970d7f08373261c0c3fbbc722b326d9b30ba43ba9b22a13341ffda40cd08f551"} err="failed to get container status \"970d7f08373261c0c3fbbc722b326d9b30ba43ba9b22a13341ffda40cd08f551\": rpc error: code = NotFound desc = could not find container \"970d7f08373261c0c3fbbc722b326d9b30ba43ba9b22a13341ffda40cd08f551\": container with ID starting with 970d7f08373261c0c3fbbc722b326d9b30ba43ba9b22a13341ffda40cd08f551 not found: ID does not exist" Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.437531 4933 scope.go:117] "RemoveContainer" containerID="f79fbf28cd140aa8a7ce3530452feddd83429192572a4aa7e78b8da581d715ef" Jan 22 06:23:21 crc kubenswrapper[4933]: E0122 06:23:21.437995 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f79fbf28cd140aa8a7ce3530452feddd83429192572a4aa7e78b8da581d715ef\": container with ID starting with f79fbf28cd140aa8a7ce3530452feddd83429192572a4aa7e78b8da581d715ef not found: ID does not exist" containerID="f79fbf28cd140aa8a7ce3530452feddd83429192572a4aa7e78b8da581d715ef" Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.438040 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f79fbf28cd140aa8a7ce3530452feddd83429192572a4aa7e78b8da581d715ef"} err="failed to get container status \"f79fbf28cd140aa8a7ce3530452feddd83429192572a4aa7e78b8da581d715ef\": rpc error: code = NotFound desc = could not find container \"f79fbf28cd140aa8a7ce3530452feddd83429192572a4aa7e78b8da581d715ef\": container with ID starting with f79fbf28cd140aa8a7ce3530452feddd83429192572a4aa7e78b8da581d715ef not found: ID does not exist" Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.438064 4933 scope.go:117] "RemoveContainer" containerID="f88362845bbb717133a04b8a3ab2d94017db0c372bc25f9c94cd30d93575b759" Jan 22 06:23:21 crc kubenswrapper[4933]: E0122 06:23:21.438391 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f88362845bbb717133a04b8a3ab2d94017db0c372bc25f9c94cd30d93575b759\": container with ID starting with f88362845bbb717133a04b8a3ab2d94017db0c372bc25f9c94cd30d93575b759 not found: ID does not exist" containerID="f88362845bbb717133a04b8a3ab2d94017db0c372bc25f9c94cd30d93575b759" Jan 22 06:23:21 crc kubenswrapper[4933]: I0122 06:23:21.438424 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f88362845bbb717133a04b8a3ab2d94017db0c372bc25f9c94cd30d93575b759"} err="failed to get container status \"f88362845bbb717133a04b8a3ab2d94017db0c372bc25f9c94cd30d93575b759\": rpc error: code = NotFound desc = could not find container \"f88362845bbb717133a04b8a3ab2d94017db0c372bc25f9c94cd30d93575b759\": container with ID starting with f88362845bbb717133a04b8a3ab2d94017db0c372bc25f9c94cd30d93575b759 not found: ID does not exist" Jan 22 06:23:22 crc kubenswrapper[4933]: I0122 06:23:22.505194 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87572054-1a21-4939-af6f-442c0b4c59b2" path="/var/lib/kubelet/pods/87572054-1a21-4939-af6f-442c0b4c59b2/volumes" Jan 22 06:25:10 crc kubenswrapper[4933]: I0122 06:25:10.943344 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:25:10 crc kubenswrapper[4933]: I0122 06:25:10.943865 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:25:40 crc kubenswrapper[4933]: I0122 06:25:40.943333 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:25:40 crc kubenswrapper[4933]: I0122 06:25:40.943926 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:26:10 crc kubenswrapper[4933]: I0122 06:26:10.942932 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:26:10 crc kubenswrapper[4933]: I0122 06:26:10.943876 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:26:10 crc kubenswrapper[4933]: I0122 06:26:10.943951 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 06:26:10 crc kubenswrapper[4933]: I0122 06:26:10.944867 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:26:10 crc kubenswrapper[4933]: I0122 06:26:10.944972 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" gracePeriod=600 Jan 22 06:26:11 crc kubenswrapper[4933]: E0122 06:26:11.075060 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:26:11 crc kubenswrapper[4933]: I0122 06:26:11.871277 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" exitCode=0 Jan 22 06:26:11 crc kubenswrapper[4933]: I0122 06:26:11.871366 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df"} Jan 22 06:26:11 crc kubenswrapper[4933]: I0122 06:26:11.871479 4933 scope.go:117] "RemoveContainer" containerID="fe8d3968014e868d088b1477d12be705e72f0a7b1c028581fa7e7552a65574d9" Jan 22 06:26:11 crc kubenswrapper[4933]: I0122 06:26:11.873634 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:26:11 crc kubenswrapper[4933]: E0122 06:26:11.874609 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:26:26 crc kubenswrapper[4933]: I0122 06:26:26.491679 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:26:26 crc kubenswrapper[4933]: E0122 06:26:26.493031 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:26:41 crc kubenswrapper[4933]: I0122 06:26:41.490695 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:26:41 crc kubenswrapper[4933]: E0122 06:26:41.491592 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:26:53 crc kubenswrapper[4933]: I0122 06:26:53.490721 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:26:53 crc kubenswrapper[4933]: E0122 06:26:53.491419 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:27:07 crc kubenswrapper[4933]: I0122 06:27:07.490890 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:27:07 crc kubenswrapper[4933]: E0122 06:27:07.491847 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:27:22 crc kubenswrapper[4933]: I0122 06:27:22.498618 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:27:22 crc kubenswrapper[4933]: E0122 06:27:22.499494 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:27:35 crc kubenswrapper[4933]: I0122 06:27:35.491819 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:27:35 crc kubenswrapper[4933]: E0122 06:27:35.492650 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:27:50 crc kubenswrapper[4933]: I0122 06:27:50.491426 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:27:50 crc kubenswrapper[4933]: E0122 06:27:50.492357 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:28:05 crc kubenswrapper[4933]: I0122 06:28:05.491639 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:28:05 crc kubenswrapper[4933]: E0122 06:28:05.493051 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:28:17 crc kubenswrapper[4933]: I0122 06:28:17.490961 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:28:17 crc kubenswrapper[4933]: E0122 06:28:17.492130 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:28:31 crc kubenswrapper[4933]: I0122 06:28:31.490860 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:28:31 crc kubenswrapper[4933]: E0122 06:28:31.491942 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:28:44 crc kubenswrapper[4933]: I0122 06:28:44.491544 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:28:44 crc kubenswrapper[4933]: E0122 06:28:44.492270 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:28:55 crc kubenswrapper[4933]: I0122 06:28:55.491041 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:28:55 crc kubenswrapper[4933]: E0122 06:28:55.492227 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:29:10 crc kubenswrapper[4933]: I0122 06:29:10.490654 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:29:10 crc kubenswrapper[4933]: E0122 06:29:10.491456 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:29:23 crc kubenswrapper[4933]: I0122 06:29:23.490937 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:29:23 crc kubenswrapper[4933]: E0122 06:29:23.491775 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:29:35 crc kubenswrapper[4933]: I0122 06:29:35.491740 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:29:35 crc kubenswrapper[4933]: E0122 06:29:35.492754 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:29:49 crc kubenswrapper[4933]: I0122 06:29:49.490717 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:29:49 crc kubenswrapper[4933]: E0122 06:29:49.491746 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.165803 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk"] Jan 22 06:30:00 crc kubenswrapper[4933]: E0122 06:30:00.167044 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4da7aab-73a5-4545-89b9-ef150983d178" containerName="extract-utilities" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.167068 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4da7aab-73a5-4545-89b9-ef150983d178" containerName="extract-utilities" Jan 22 06:30:00 crc kubenswrapper[4933]: E0122 06:30:00.167118 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4da7aab-73a5-4545-89b9-ef150983d178" containerName="extract-content" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.167130 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4da7aab-73a5-4545-89b9-ef150983d178" containerName="extract-content" Jan 22 06:30:00 crc kubenswrapper[4933]: E0122 06:30:00.167146 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23d8b2c6-29f2-4606-8b9c-578ea344bbd9" containerName="extract-content" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.167157 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="23d8b2c6-29f2-4606-8b9c-578ea344bbd9" containerName="extract-content" Jan 22 06:30:00 crc kubenswrapper[4933]: E0122 06:30:00.167184 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87572054-1a21-4939-af6f-442c0b4c59b2" containerName="extract-content" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.167193 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="87572054-1a21-4939-af6f-442c0b4c59b2" containerName="extract-content" Jan 22 06:30:00 crc kubenswrapper[4933]: E0122 06:30:00.167209 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23d8b2c6-29f2-4606-8b9c-578ea344bbd9" containerName="extract-utilities" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.167217 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="23d8b2c6-29f2-4606-8b9c-578ea344bbd9" containerName="extract-utilities" Jan 22 06:30:00 crc kubenswrapper[4933]: E0122 06:30:00.167232 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23d8b2c6-29f2-4606-8b9c-578ea344bbd9" containerName="registry-server" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.167239 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="23d8b2c6-29f2-4606-8b9c-578ea344bbd9" containerName="registry-server" Jan 22 06:30:00 crc kubenswrapper[4933]: E0122 06:30:00.167252 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87572054-1a21-4939-af6f-442c0b4c59b2" containerName="extract-utilities" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.167260 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="87572054-1a21-4939-af6f-442c0b4c59b2" containerName="extract-utilities" Jan 22 06:30:00 crc kubenswrapper[4933]: E0122 06:30:00.167270 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4da7aab-73a5-4545-89b9-ef150983d178" containerName="registry-server" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.167278 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4da7aab-73a5-4545-89b9-ef150983d178" containerName="registry-server" Jan 22 06:30:00 crc kubenswrapper[4933]: E0122 06:30:00.167290 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87572054-1a21-4939-af6f-442c0b4c59b2" containerName="registry-server" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.167297 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="87572054-1a21-4939-af6f-442c0b4c59b2" containerName="registry-server" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.167455 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4da7aab-73a5-4545-89b9-ef150983d178" containerName="registry-server" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.167484 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="87572054-1a21-4939-af6f-442c0b4c59b2" containerName="registry-server" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.167503 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="23d8b2c6-29f2-4606-8b9c-578ea344bbd9" containerName="registry-server" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.168017 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.171584 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.171998 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.180800 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk"] Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.247817 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5s2x\" (UniqueName: \"kubernetes.io/projected/ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed-kube-api-access-f5s2x\") pod \"collect-profiles-29484390-dhpzk\" (UID: \"ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.247913 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed-config-volume\") pod \"collect-profiles-29484390-dhpzk\" (UID: \"ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.247998 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed-secret-volume\") pod \"collect-profiles-29484390-dhpzk\" (UID: \"ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.349780 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed-secret-volume\") pod \"collect-profiles-29484390-dhpzk\" (UID: \"ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.349880 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5s2x\" (UniqueName: \"kubernetes.io/projected/ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed-kube-api-access-f5s2x\") pod \"collect-profiles-29484390-dhpzk\" (UID: \"ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.349934 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed-config-volume\") pod \"collect-profiles-29484390-dhpzk\" (UID: \"ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.352396 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed-config-volume\") pod \"collect-profiles-29484390-dhpzk\" (UID: \"ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.360358 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed-secret-volume\") pod \"collect-profiles-29484390-dhpzk\" (UID: \"ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.383967 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5s2x\" (UniqueName: \"kubernetes.io/projected/ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed-kube-api-access-f5s2x\") pod \"collect-profiles-29484390-dhpzk\" (UID: \"ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk" Jan 22 06:30:00 crc kubenswrapper[4933]: I0122 06:30:00.489884 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk" Jan 22 06:30:01 crc kubenswrapper[4933]: I0122 06:30:01.002670 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk"] Jan 22 06:30:01 crc kubenswrapper[4933]: W0122 06:30:01.007763 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podee57b5f6_0cd3_4922_bd54_8a8dfb7ab2ed.slice/crio-61ba67ce8e268afd55fb9ef0bcedb6c55066964d6ca7a7b1db7492d931a4a3ea WatchSource:0}: Error finding container 61ba67ce8e268afd55fb9ef0bcedb6c55066964d6ca7a7b1db7492d931a4a3ea: Status 404 returned error can't find the container with id 61ba67ce8e268afd55fb9ef0bcedb6c55066964d6ca7a7b1db7492d931a4a3ea Jan 22 06:30:01 crc kubenswrapper[4933]: I0122 06:30:01.437544 4933 generic.go:334] "Generic (PLEG): container finished" podID="ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed" containerID="d5f99abad65dd8d93d1e50f12735bdb64270cfdce58d1c0a32903403958dc0e0" exitCode=0 Jan 22 06:30:01 crc kubenswrapper[4933]: I0122 06:30:01.437584 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk" event={"ID":"ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed","Type":"ContainerDied","Data":"d5f99abad65dd8d93d1e50f12735bdb64270cfdce58d1c0a32903403958dc0e0"} Jan 22 06:30:01 crc kubenswrapper[4933]: I0122 06:30:01.437625 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk" event={"ID":"ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed","Type":"ContainerStarted","Data":"61ba67ce8e268afd55fb9ef0bcedb6c55066964d6ca7a7b1db7492d931a4a3ea"} Jan 22 06:30:01 crc kubenswrapper[4933]: I0122 06:30:01.490687 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:30:01 crc kubenswrapper[4933]: E0122 06:30:01.491004 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:30:02 crc kubenswrapper[4933]: I0122 06:30:02.743956 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk" Jan 22 06:30:02 crc kubenswrapper[4933]: I0122 06:30:02.797444 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed-secret-volume\") pod \"ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed\" (UID: \"ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed\") " Jan 22 06:30:02 crc kubenswrapper[4933]: I0122 06:30:02.797592 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f5s2x\" (UniqueName: \"kubernetes.io/projected/ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed-kube-api-access-f5s2x\") pod \"ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed\" (UID: \"ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed\") " Jan 22 06:30:02 crc kubenswrapper[4933]: I0122 06:30:02.797621 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed-config-volume\") pod \"ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed\" (UID: \"ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed\") " Jan 22 06:30:02 crc kubenswrapper[4933]: I0122 06:30:02.798155 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed-config-volume" (OuterVolumeSpecName: "config-volume") pod "ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed" (UID: "ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:30:02 crc kubenswrapper[4933]: I0122 06:30:02.802932 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed" (UID: "ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:30:02 crc kubenswrapper[4933]: I0122 06:30:02.803011 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed-kube-api-access-f5s2x" (OuterVolumeSpecName: "kube-api-access-f5s2x") pod "ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed" (UID: "ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed"). InnerVolumeSpecName "kube-api-access-f5s2x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:30:02 crc kubenswrapper[4933]: I0122 06:30:02.898981 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f5s2x\" (UniqueName: \"kubernetes.io/projected/ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed-kube-api-access-f5s2x\") on node \"crc\" DevicePath \"\"" Jan 22 06:30:02 crc kubenswrapper[4933]: I0122 06:30:02.899022 4933 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 06:30:02 crc kubenswrapper[4933]: I0122 06:30:02.899035 4933 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 06:30:03 crc kubenswrapper[4933]: I0122 06:30:03.457161 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk" event={"ID":"ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed","Type":"ContainerDied","Data":"61ba67ce8e268afd55fb9ef0bcedb6c55066964d6ca7a7b1db7492d931a4a3ea"} Jan 22 06:30:03 crc kubenswrapper[4933]: I0122 06:30:03.457217 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="61ba67ce8e268afd55fb9ef0bcedb6c55066964d6ca7a7b1db7492d931a4a3ea" Jan 22 06:30:03 crc kubenswrapper[4933]: I0122 06:30:03.457321 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk" Jan 22 06:30:03 crc kubenswrapper[4933]: I0122 06:30:03.825863 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt"] Jan 22 06:30:03 crc kubenswrapper[4933]: I0122 06:30:03.830987 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484345-n7rrt"] Jan 22 06:30:04 crc kubenswrapper[4933]: I0122 06:30:04.504227 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae503948-5876-4b1e-ba9f-23ebb0e05b94" path="/var/lib/kubelet/pods/ae503948-5876-4b1e-ba9f-23ebb0e05b94/volumes" Jan 22 06:30:14 crc kubenswrapper[4933]: I0122 06:30:14.491570 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:30:14 crc kubenswrapper[4933]: E0122 06:30:14.492680 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:30:27 crc kubenswrapper[4933]: I0122 06:30:27.491319 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:30:27 crc kubenswrapper[4933]: E0122 06:30:27.492330 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:30:38 crc kubenswrapper[4933]: I0122 06:30:38.490813 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:30:38 crc kubenswrapper[4933]: E0122 06:30:38.491800 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:30:51 crc kubenswrapper[4933]: I0122 06:30:51.490870 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:30:51 crc kubenswrapper[4933]: E0122 06:30:51.491721 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:31:02 crc kubenswrapper[4933]: I0122 06:31:02.862626 4933 scope.go:117] "RemoveContainer" containerID="7eb8cf5f766b8084fb5d0a37be26d574a884da83588b699a614e4561c39f4d54" Jan 22 06:31:05 crc kubenswrapper[4933]: I0122 06:31:05.491656 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:31:05 crc kubenswrapper[4933]: E0122 06:31:05.492795 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:31:19 crc kubenswrapper[4933]: I0122 06:31:19.491479 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:31:20 crc kubenswrapper[4933]: I0122 06:31:20.665910 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"30d623b3b05ccfca699be78d903490dfc9f2de1ca28a85a83a15fc90af4e5258"} Jan 22 06:32:17 crc kubenswrapper[4933]: I0122 06:32:17.328883 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8vwvb"] Jan 22 06:32:17 crc kubenswrapper[4933]: E0122 06:32:17.329978 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed" containerName="collect-profiles" Jan 22 06:32:17 crc kubenswrapper[4933]: I0122 06:32:17.329998 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed" containerName="collect-profiles" Jan 22 06:32:17 crc kubenswrapper[4933]: I0122 06:32:17.330245 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed" containerName="collect-profiles" Jan 22 06:32:17 crc kubenswrapper[4933]: I0122 06:32:17.331856 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8vwvb" Jan 22 06:32:17 crc kubenswrapper[4933]: I0122 06:32:17.345408 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8vwvb"] Jan 22 06:32:17 crc kubenswrapper[4933]: I0122 06:32:17.472368 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/744665f9-a527-48f7-bc49-b3f9ef752383-utilities\") pod \"community-operators-8vwvb\" (UID: \"744665f9-a527-48f7-bc49-b3f9ef752383\") " pod="openshift-marketplace/community-operators-8vwvb" Jan 22 06:32:17 crc kubenswrapper[4933]: I0122 06:32:17.472680 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8v7k\" (UniqueName: \"kubernetes.io/projected/744665f9-a527-48f7-bc49-b3f9ef752383-kube-api-access-w8v7k\") pod \"community-operators-8vwvb\" (UID: \"744665f9-a527-48f7-bc49-b3f9ef752383\") " pod="openshift-marketplace/community-operators-8vwvb" Jan 22 06:32:17 crc kubenswrapper[4933]: I0122 06:32:17.472734 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/744665f9-a527-48f7-bc49-b3f9ef752383-catalog-content\") pod \"community-operators-8vwvb\" (UID: \"744665f9-a527-48f7-bc49-b3f9ef752383\") " pod="openshift-marketplace/community-operators-8vwvb" Jan 22 06:32:17 crc kubenswrapper[4933]: I0122 06:32:17.574301 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8v7k\" (UniqueName: \"kubernetes.io/projected/744665f9-a527-48f7-bc49-b3f9ef752383-kube-api-access-w8v7k\") pod \"community-operators-8vwvb\" (UID: \"744665f9-a527-48f7-bc49-b3f9ef752383\") " pod="openshift-marketplace/community-operators-8vwvb" Jan 22 06:32:17 crc kubenswrapper[4933]: I0122 06:32:17.574413 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/744665f9-a527-48f7-bc49-b3f9ef752383-catalog-content\") pod \"community-operators-8vwvb\" (UID: \"744665f9-a527-48f7-bc49-b3f9ef752383\") " pod="openshift-marketplace/community-operators-8vwvb" Jan 22 06:32:17 crc kubenswrapper[4933]: I0122 06:32:17.574448 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/744665f9-a527-48f7-bc49-b3f9ef752383-utilities\") pod \"community-operators-8vwvb\" (UID: \"744665f9-a527-48f7-bc49-b3f9ef752383\") " pod="openshift-marketplace/community-operators-8vwvb" Jan 22 06:32:17 crc kubenswrapper[4933]: I0122 06:32:17.574956 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/744665f9-a527-48f7-bc49-b3f9ef752383-utilities\") pod \"community-operators-8vwvb\" (UID: \"744665f9-a527-48f7-bc49-b3f9ef752383\") " pod="openshift-marketplace/community-operators-8vwvb" Jan 22 06:32:17 crc kubenswrapper[4933]: I0122 06:32:17.575572 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/744665f9-a527-48f7-bc49-b3f9ef752383-catalog-content\") pod \"community-operators-8vwvb\" (UID: \"744665f9-a527-48f7-bc49-b3f9ef752383\") " pod="openshift-marketplace/community-operators-8vwvb" Jan 22 06:32:17 crc kubenswrapper[4933]: I0122 06:32:17.596941 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8v7k\" (UniqueName: \"kubernetes.io/projected/744665f9-a527-48f7-bc49-b3f9ef752383-kube-api-access-w8v7k\") pod \"community-operators-8vwvb\" (UID: \"744665f9-a527-48f7-bc49-b3f9ef752383\") " pod="openshift-marketplace/community-operators-8vwvb" Jan 22 06:32:17 crc kubenswrapper[4933]: I0122 06:32:17.657823 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8vwvb" Jan 22 06:32:18 crc kubenswrapper[4933]: I0122 06:32:18.074766 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8vwvb"] Jan 22 06:32:18 crc kubenswrapper[4933]: I0122 06:32:18.163266 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8vwvb" event={"ID":"744665f9-a527-48f7-bc49-b3f9ef752383","Type":"ContainerStarted","Data":"bb322a0507af2d3ca0104dcbb1f1f470235438fd1b234727adbc2f0915996634"} Jan 22 06:32:19 crc kubenswrapper[4933]: I0122 06:32:19.171248 4933 generic.go:334] "Generic (PLEG): container finished" podID="744665f9-a527-48f7-bc49-b3f9ef752383" containerID="1c67eb527c132d4c178244b370be766aa3f2f6ba27f31d3644bdf407086e2658" exitCode=0 Jan 22 06:32:19 crc kubenswrapper[4933]: I0122 06:32:19.171291 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8vwvb" event={"ID":"744665f9-a527-48f7-bc49-b3f9ef752383","Type":"ContainerDied","Data":"1c67eb527c132d4c178244b370be766aa3f2f6ba27f31d3644bdf407086e2658"} Jan 22 06:32:19 crc kubenswrapper[4933]: I0122 06:32:19.173828 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 06:32:21 crc kubenswrapper[4933]: I0122 06:32:21.192992 4933 generic.go:334] "Generic (PLEG): container finished" podID="744665f9-a527-48f7-bc49-b3f9ef752383" containerID="659be2b50ed590aaf3497d6d36b46739ce0087cdca95160073619d47d427964f" exitCode=0 Jan 22 06:32:21 crc kubenswrapper[4933]: I0122 06:32:21.194360 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8vwvb" event={"ID":"744665f9-a527-48f7-bc49-b3f9ef752383","Type":"ContainerDied","Data":"659be2b50ed590aaf3497d6d36b46739ce0087cdca95160073619d47d427964f"} Jan 22 06:32:22 crc kubenswrapper[4933]: I0122 06:32:22.205148 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8vwvb" event={"ID":"744665f9-a527-48f7-bc49-b3f9ef752383","Type":"ContainerStarted","Data":"b8dfc4e722bf58ad3649fadd790450df8c29ca9bf9b32dc9b8e46f67e93672b5"} Jan 22 06:32:22 crc kubenswrapper[4933]: I0122 06:32:22.230769 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8vwvb" podStartSLOduration=2.839829364 podStartE2EDuration="5.230742443s" podCreationTimestamp="2026-01-22 06:32:17 +0000 UTC" firstStartedPulling="2026-01-22 06:32:19.173488242 +0000 UTC m=+2787.010613615" lastFinishedPulling="2026-01-22 06:32:21.564401331 +0000 UTC m=+2789.401526694" observedRunningTime="2026-01-22 06:32:22.23061666 +0000 UTC m=+2790.067742093" watchObservedRunningTime="2026-01-22 06:32:22.230742443 +0000 UTC m=+2790.067867816" Jan 22 06:32:27 crc kubenswrapper[4933]: I0122 06:32:27.658670 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8vwvb" Jan 22 06:32:27 crc kubenswrapper[4933]: I0122 06:32:27.659321 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8vwvb" Jan 22 06:32:27 crc kubenswrapper[4933]: I0122 06:32:27.729187 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8vwvb" Jan 22 06:32:28 crc kubenswrapper[4933]: I0122 06:32:28.332849 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8vwvb" Jan 22 06:32:28 crc kubenswrapper[4933]: I0122 06:32:28.399232 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8vwvb"] Jan 22 06:32:30 crc kubenswrapper[4933]: I0122 06:32:30.268883 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8vwvb" podUID="744665f9-a527-48f7-bc49-b3f9ef752383" containerName="registry-server" containerID="cri-o://b8dfc4e722bf58ad3649fadd790450df8c29ca9bf9b32dc9b8e46f67e93672b5" gracePeriod=2 Jan 22 06:32:30 crc kubenswrapper[4933]: I0122 06:32:30.685194 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8vwvb" Jan 22 06:32:30 crc kubenswrapper[4933]: I0122 06:32:30.826459 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8v7k\" (UniqueName: \"kubernetes.io/projected/744665f9-a527-48f7-bc49-b3f9ef752383-kube-api-access-w8v7k\") pod \"744665f9-a527-48f7-bc49-b3f9ef752383\" (UID: \"744665f9-a527-48f7-bc49-b3f9ef752383\") " Jan 22 06:32:30 crc kubenswrapper[4933]: I0122 06:32:30.826547 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/744665f9-a527-48f7-bc49-b3f9ef752383-catalog-content\") pod \"744665f9-a527-48f7-bc49-b3f9ef752383\" (UID: \"744665f9-a527-48f7-bc49-b3f9ef752383\") " Jan 22 06:32:30 crc kubenswrapper[4933]: I0122 06:32:30.826581 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/744665f9-a527-48f7-bc49-b3f9ef752383-utilities\") pod \"744665f9-a527-48f7-bc49-b3f9ef752383\" (UID: \"744665f9-a527-48f7-bc49-b3f9ef752383\") " Jan 22 06:32:30 crc kubenswrapper[4933]: I0122 06:32:30.828113 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/744665f9-a527-48f7-bc49-b3f9ef752383-utilities" (OuterVolumeSpecName: "utilities") pod "744665f9-a527-48f7-bc49-b3f9ef752383" (UID: "744665f9-a527-48f7-bc49-b3f9ef752383"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:32:30 crc kubenswrapper[4933]: I0122 06:32:30.832252 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/744665f9-a527-48f7-bc49-b3f9ef752383-kube-api-access-w8v7k" (OuterVolumeSpecName: "kube-api-access-w8v7k") pod "744665f9-a527-48f7-bc49-b3f9ef752383" (UID: "744665f9-a527-48f7-bc49-b3f9ef752383"). InnerVolumeSpecName "kube-api-access-w8v7k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:32:30 crc kubenswrapper[4933]: I0122 06:32:30.890185 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/744665f9-a527-48f7-bc49-b3f9ef752383-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "744665f9-a527-48f7-bc49-b3f9ef752383" (UID: "744665f9-a527-48f7-bc49-b3f9ef752383"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:32:30 crc kubenswrapper[4933]: I0122 06:32:30.927770 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8v7k\" (UniqueName: \"kubernetes.io/projected/744665f9-a527-48f7-bc49-b3f9ef752383-kube-api-access-w8v7k\") on node \"crc\" DevicePath \"\"" Jan 22 06:32:30 crc kubenswrapper[4933]: I0122 06:32:30.927807 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/744665f9-a527-48f7-bc49-b3f9ef752383-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:32:30 crc kubenswrapper[4933]: I0122 06:32:30.927822 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/744665f9-a527-48f7-bc49-b3f9ef752383-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:32:31 crc kubenswrapper[4933]: I0122 06:32:31.281962 4933 generic.go:334] "Generic (PLEG): container finished" podID="744665f9-a527-48f7-bc49-b3f9ef752383" containerID="b8dfc4e722bf58ad3649fadd790450df8c29ca9bf9b32dc9b8e46f67e93672b5" exitCode=0 Jan 22 06:32:31 crc kubenswrapper[4933]: I0122 06:32:31.282009 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8vwvb" event={"ID":"744665f9-a527-48f7-bc49-b3f9ef752383","Type":"ContainerDied","Data":"b8dfc4e722bf58ad3649fadd790450df8c29ca9bf9b32dc9b8e46f67e93672b5"} Jan 22 06:32:31 crc kubenswrapper[4933]: I0122 06:32:31.282038 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8vwvb" event={"ID":"744665f9-a527-48f7-bc49-b3f9ef752383","Type":"ContainerDied","Data":"bb322a0507af2d3ca0104dcbb1f1f470235438fd1b234727adbc2f0915996634"} Jan 22 06:32:31 crc kubenswrapper[4933]: I0122 06:32:31.282059 4933 scope.go:117] "RemoveContainer" containerID="b8dfc4e722bf58ad3649fadd790450df8c29ca9bf9b32dc9b8e46f67e93672b5" Jan 22 06:32:31 crc kubenswrapper[4933]: I0122 06:32:31.282190 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8vwvb" Jan 22 06:32:31 crc kubenswrapper[4933]: I0122 06:32:31.313158 4933 scope.go:117] "RemoveContainer" containerID="659be2b50ed590aaf3497d6d36b46739ce0087cdca95160073619d47d427964f" Jan 22 06:32:31 crc kubenswrapper[4933]: I0122 06:32:31.338843 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8vwvb"] Jan 22 06:32:31 crc kubenswrapper[4933]: I0122 06:32:31.352964 4933 scope.go:117] "RemoveContainer" containerID="1c67eb527c132d4c178244b370be766aa3f2f6ba27f31d3644bdf407086e2658" Jan 22 06:32:31 crc kubenswrapper[4933]: I0122 06:32:31.356686 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8vwvb"] Jan 22 06:32:31 crc kubenswrapper[4933]: I0122 06:32:31.381861 4933 scope.go:117] "RemoveContainer" containerID="b8dfc4e722bf58ad3649fadd790450df8c29ca9bf9b32dc9b8e46f67e93672b5" Jan 22 06:32:31 crc kubenswrapper[4933]: E0122 06:32:31.382461 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8dfc4e722bf58ad3649fadd790450df8c29ca9bf9b32dc9b8e46f67e93672b5\": container with ID starting with b8dfc4e722bf58ad3649fadd790450df8c29ca9bf9b32dc9b8e46f67e93672b5 not found: ID does not exist" containerID="b8dfc4e722bf58ad3649fadd790450df8c29ca9bf9b32dc9b8e46f67e93672b5" Jan 22 06:32:31 crc kubenswrapper[4933]: I0122 06:32:31.382505 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8dfc4e722bf58ad3649fadd790450df8c29ca9bf9b32dc9b8e46f67e93672b5"} err="failed to get container status \"b8dfc4e722bf58ad3649fadd790450df8c29ca9bf9b32dc9b8e46f67e93672b5\": rpc error: code = NotFound desc = could not find container \"b8dfc4e722bf58ad3649fadd790450df8c29ca9bf9b32dc9b8e46f67e93672b5\": container with ID starting with b8dfc4e722bf58ad3649fadd790450df8c29ca9bf9b32dc9b8e46f67e93672b5 not found: ID does not exist" Jan 22 06:32:31 crc kubenswrapper[4933]: I0122 06:32:31.382536 4933 scope.go:117] "RemoveContainer" containerID="659be2b50ed590aaf3497d6d36b46739ce0087cdca95160073619d47d427964f" Jan 22 06:32:31 crc kubenswrapper[4933]: E0122 06:32:31.382913 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"659be2b50ed590aaf3497d6d36b46739ce0087cdca95160073619d47d427964f\": container with ID starting with 659be2b50ed590aaf3497d6d36b46739ce0087cdca95160073619d47d427964f not found: ID does not exist" containerID="659be2b50ed590aaf3497d6d36b46739ce0087cdca95160073619d47d427964f" Jan 22 06:32:31 crc kubenswrapper[4933]: I0122 06:32:31.382944 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"659be2b50ed590aaf3497d6d36b46739ce0087cdca95160073619d47d427964f"} err="failed to get container status \"659be2b50ed590aaf3497d6d36b46739ce0087cdca95160073619d47d427964f\": rpc error: code = NotFound desc = could not find container \"659be2b50ed590aaf3497d6d36b46739ce0087cdca95160073619d47d427964f\": container with ID starting with 659be2b50ed590aaf3497d6d36b46739ce0087cdca95160073619d47d427964f not found: ID does not exist" Jan 22 06:32:31 crc kubenswrapper[4933]: I0122 06:32:31.382965 4933 scope.go:117] "RemoveContainer" containerID="1c67eb527c132d4c178244b370be766aa3f2f6ba27f31d3644bdf407086e2658" Jan 22 06:32:31 crc kubenswrapper[4933]: E0122 06:32:31.383297 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c67eb527c132d4c178244b370be766aa3f2f6ba27f31d3644bdf407086e2658\": container with ID starting with 1c67eb527c132d4c178244b370be766aa3f2f6ba27f31d3644bdf407086e2658 not found: ID does not exist" containerID="1c67eb527c132d4c178244b370be766aa3f2f6ba27f31d3644bdf407086e2658" Jan 22 06:32:31 crc kubenswrapper[4933]: I0122 06:32:31.383317 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c67eb527c132d4c178244b370be766aa3f2f6ba27f31d3644bdf407086e2658"} err="failed to get container status \"1c67eb527c132d4c178244b370be766aa3f2f6ba27f31d3644bdf407086e2658\": rpc error: code = NotFound desc = could not find container \"1c67eb527c132d4c178244b370be766aa3f2f6ba27f31d3644bdf407086e2658\": container with ID starting with 1c67eb527c132d4c178244b370be766aa3f2f6ba27f31d3644bdf407086e2658 not found: ID does not exist" Jan 22 06:32:32 crc kubenswrapper[4933]: I0122 06:32:32.507366 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="744665f9-a527-48f7-bc49-b3f9ef752383" path="/var/lib/kubelet/pods/744665f9-a527-48f7-bc49-b3f9ef752383/volumes" Jan 22 06:33:06 crc kubenswrapper[4933]: I0122 06:33:06.968474 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jlk64"] Jan 22 06:33:06 crc kubenswrapper[4933]: E0122 06:33:06.969821 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="744665f9-a527-48f7-bc49-b3f9ef752383" containerName="extract-utilities" Jan 22 06:33:06 crc kubenswrapper[4933]: I0122 06:33:06.969854 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="744665f9-a527-48f7-bc49-b3f9ef752383" containerName="extract-utilities" Jan 22 06:33:06 crc kubenswrapper[4933]: E0122 06:33:06.969895 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="744665f9-a527-48f7-bc49-b3f9ef752383" containerName="extract-content" Jan 22 06:33:06 crc kubenswrapper[4933]: I0122 06:33:06.969913 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="744665f9-a527-48f7-bc49-b3f9ef752383" containerName="extract-content" Jan 22 06:33:06 crc kubenswrapper[4933]: E0122 06:33:06.969933 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="744665f9-a527-48f7-bc49-b3f9ef752383" containerName="registry-server" Jan 22 06:33:06 crc kubenswrapper[4933]: I0122 06:33:06.969952 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="744665f9-a527-48f7-bc49-b3f9ef752383" containerName="registry-server" Jan 22 06:33:06 crc kubenswrapper[4933]: I0122 06:33:06.970372 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="744665f9-a527-48f7-bc49-b3f9ef752383" containerName="registry-server" Jan 22 06:33:06 crc kubenswrapper[4933]: I0122 06:33:06.972688 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jlk64" Jan 22 06:33:06 crc kubenswrapper[4933]: I0122 06:33:06.999395 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jlk64"] Jan 22 06:33:07 crc kubenswrapper[4933]: I0122 06:33:07.109004 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22898585-7306-43e3-b12d-6491514659e2-catalog-content\") pod \"certified-operators-jlk64\" (UID: \"22898585-7306-43e3-b12d-6491514659e2\") " pod="openshift-marketplace/certified-operators-jlk64" Jan 22 06:33:07 crc kubenswrapper[4933]: I0122 06:33:07.109279 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22898585-7306-43e3-b12d-6491514659e2-utilities\") pod \"certified-operators-jlk64\" (UID: \"22898585-7306-43e3-b12d-6491514659e2\") " pod="openshift-marketplace/certified-operators-jlk64" Jan 22 06:33:07 crc kubenswrapper[4933]: I0122 06:33:07.109372 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5xvn\" (UniqueName: \"kubernetes.io/projected/22898585-7306-43e3-b12d-6491514659e2-kube-api-access-m5xvn\") pod \"certified-operators-jlk64\" (UID: \"22898585-7306-43e3-b12d-6491514659e2\") " pod="openshift-marketplace/certified-operators-jlk64" Jan 22 06:33:07 crc kubenswrapper[4933]: I0122 06:33:07.210670 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5xvn\" (UniqueName: \"kubernetes.io/projected/22898585-7306-43e3-b12d-6491514659e2-kube-api-access-m5xvn\") pod \"certified-operators-jlk64\" (UID: \"22898585-7306-43e3-b12d-6491514659e2\") " pod="openshift-marketplace/certified-operators-jlk64" Jan 22 06:33:07 crc kubenswrapper[4933]: I0122 06:33:07.210816 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22898585-7306-43e3-b12d-6491514659e2-catalog-content\") pod \"certified-operators-jlk64\" (UID: \"22898585-7306-43e3-b12d-6491514659e2\") " pod="openshift-marketplace/certified-operators-jlk64" Jan 22 06:33:07 crc kubenswrapper[4933]: I0122 06:33:07.210857 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22898585-7306-43e3-b12d-6491514659e2-utilities\") pod \"certified-operators-jlk64\" (UID: \"22898585-7306-43e3-b12d-6491514659e2\") " pod="openshift-marketplace/certified-operators-jlk64" Jan 22 06:33:07 crc kubenswrapper[4933]: I0122 06:33:07.211579 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22898585-7306-43e3-b12d-6491514659e2-utilities\") pod \"certified-operators-jlk64\" (UID: \"22898585-7306-43e3-b12d-6491514659e2\") " pod="openshift-marketplace/certified-operators-jlk64" Jan 22 06:33:07 crc kubenswrapper[4933]: I0122 06:33:07.211875 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22898585-7306-43e3-b12d-6491514659e2-catalog-content\") pod \"certified-operators-jlk64\" (UID: \"22898585-7306-43e3-b12d-6491514659e2\") " pod="openshift-marketplace/certified-operators-jlk64" Jan 22 06:33:07 crc kubenswrapper[4933]: I0122 06:33:07.230851 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5xvn\" (UniqueName: \"kubernetes.io/projected/22898585-7306-43e3-b12d-6491514659e2-kube-api-access-m5xvn\") pod \"certified-operators-jlk64\" (UID: \"22898585-7306-43e3-b12d-6491514659e2\") " pod="openshift-marketplace/certified-operators-jlk64" Jan 22 06:33:07 crc kubenswrapper[4933]: I0122 06:33:07.313614 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jlk64" Jan 22 06:33:07 crc kubenswrapper[4933]: I0122 06:33:07.727880 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jlk64"] Jan 22 06:33:08 crc kubenswrapper[4933]: I0122 06:33:08.604129 4933 generic.go:334] "Generic (PLEG): container finished" podID="22898585-7306-43e3-b12d-6491514659e2" containerID="55a4ab4cc0cb0bd4afbad0411e4d5533dd84437f59e387abc9b3356e122ee3c7" exitCode=0 Jan 22 06:33:08 crc kubenswrapper[4933]: I0122 06:33:08.604193 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jlk64" event={"ID":"22898585-7306-43e3-b12d-6491514659e2","Type":"ContainerDied","Data":"55a4ab4cc0cb0bd4afbad0411e4d5533dd84437f59e387abc9b3356e122ee3c7"} Jan 22 06:33:08 crc kubenswrapper[4933]: I0122 06:33:08.604225 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jlk64" event={"ID":"22898585-7306-43e3-b12d-6491514659e2","Type":"ContainerStarted","Data":"c0712e062fc48b8858367dc82945f5bdf55b0c778dff2eeefcffc87a19f33352"} Jan 22 06:33:09 crc kubenswrapper[4933]: I0122 06:33:09.614346 4933 generic.go:334] "Generic (PLEG): container finished" podID="22898585-7306-43e3-b12d-6491514659e2" containerID="6444ef17178da3210de22543950cafdbf41a697cf1b97db5fc7a214a5c4d62da" exitCode=0 Jan 22 06:33:09 crc kubenswrapper[4933]: I0122 06:33:09.614401 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jlk64" event={"ID":"22898585-7306-43e3-b12d-6491514659e2","Type":"ContainerDied","Data":"6444ef17178da3210de22543950cafdbf41a697cf1b97db5fc7a214a5c4d62da"} Jan 22 06:33:10 crc kubenswrapper[4933]: I0122 06:33:10.627521 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jlk64" event={"ID":"22898585-7306-43e3-b12d-6491514659e2","Type":"ContainerStarted","Data":"b4fb07a329158918759d3619773c94f0c1fde43db65dfd6f8aa127ef56411eb4"} Jan 22 06:33:10 crc kubenswrapper[4933]: I0122 06:33:10.658314 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jlk64" podStartSLOduration=3.2193771079999998 podStartE2EDuration="4.658297243s" podCreationTimestamp="2026-01-22 06:33:06 +0000 UTC" firstStartedPulling="2026-01-22 06:33:08.606722005 +0000 UTC m=+2836.443847388" lastFinishedPulling="2026-01-22 06:33:10.04564214 +0000 UTC m=+2837.882767523" observedRunningTime="2026-01-22 06:33:10.64779974 +0000 UTC m=+2838.484925113" watchObservedRunningTime="2026-01-22 06:33:10.658297243 +0000 UTC m=+2838.495422596" Jan 22 06:33:17 crc kubenswrapper[4933]: I0122 06:33:17.314878 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jlk64" Jan 22 06:33:17 crc kubenswrapper[4933]: I0122 06:33:17.315428 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jlk64" Jan 22 06:33:17 crc kubenswrapper[4933]: I0122 06:33:17.361981 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jlk64" Jan 22 06:33:17 crc kubenswrapper[4933]: I0122 06:33:17.730453 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jlk64" Jan 22 06:33:17 crc kubenswrapper[4933]: I0122 06:33:17.775260 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jlk64"] Jan 22 06:33:19 crc kubenswrapper[4933]: I0122 06:33:19.704020 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-jlk64" podUID="22898585-7306-43e3-b12d-6491514659e2" containerName="registry-server" containerID="cri-o://b4fb07a329158918759d3619773c94f0c1fde43db65dfd6f8aa127ef56411eb4" gracePeriod=2 Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.651561 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jlk64" Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.713455 4933 generic.go:334] "Generic (PLEG): container finished" podID="22898585-7306-43e3-b12d-6491514659e2" containerID="b4fb07a329158918759d3619773c94f0c1fde43db65dfd6f8aa127ef56411eb4" exitCode=0 Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.713501 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jlk64" event={"ID":"22898585-7306-43e3-b12d-6491514659e2","Type":"ContainerDied","Data":"b4fb07a329158918759d3619773c94f0c1fde43db65dfd6f8aa127ef56411eb4"} Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.713540 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jlk64" event={"ID":"22898585-7306-43e3-b12d-6491514659e2","Type":"ContainerDied","Data":"c0712e062fc48b8858367dc82945f5bdf55b0c778dff2eeefcffc87a19f33352"} Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.713563 4933 scope.go:117] "RemoveContainer" containerID="b4fb07a329158918759d3619773c94f0c1fde43db65dfd6f8aa127ef56411eb4" Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.713502 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jlk64" Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.736783 4933 scope.go:117] "RemoveContainer" containerID="6444ef17178da3210de22543950cafdbf41a697cf1b97db5fc7a214a5c4d62da" Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.739558 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22898585-7306-43e3-b12d-6491514659e2-catalog-content\") pod \"22898585-7306-43e3-b12d-6491514659e2\" (UID: \"22898585-7306-43e3-b12d-6491514659e2\") " Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.739684 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22898585-7306-43e3-b12d-6491514659e2-utilities\") pod \"22898585-7306-43e3-b12d-6491514659e2\" (UID: \"22898585-7306-43e3-b12d-6491514659e2\") " Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.739725 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5xvn\" (UniqueName: \"kubernetes.io/projected/22898585-7306-43e3-b12d-6491514659e2-kube-api-access-m5xvn\") pod \"22898585-7306-43e3-b12d-6491514659e2\" (UID: \"22898585-7306-43e3-b12d-6491514659e2\") " Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.741846 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/22898585-7306-43e3-b12d-6491514659e2-utilities" (OuterVolumeSpecName: "utilities") pod "22898585-7306-43e3-b12d-6491514659e2" (UID: "22898585-7306-43e3-b12d-6491514659e2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.749329 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22898585-7306-43e3-b12d-6491514659e2-kube-api-access-m5xvn" (OuterVolumeSpecName: "kube-api-access-m5xvn") pod "22898585-7306-43e3-b12d-6491514659e2" (UID: "22898585-7306-43e3-b12d-6491514659e2"). InnerVolumeSpecName "kube-api-access-m5xvn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.767547 4933 scope.go:117] "RemoveContainer" containerID="55a4ab4cc0cb0bd4afbad0411e4d5533dd84437f59e387abc9b3356e122ee3c7" Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.803982 4933 scope.go:117] "RemoveContainer" containerID="b4fb07a329158918759d3619773c94f0c1fde43db65dfd6f8aa127ef56411eb4" Jan 22 06:33:20 crc kubenswrapper[4933]: E0122 06:33:20.804670 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4fb07a329158918759d3619773c94f0c1fde43db65dfd6f8aa127ef56411eb4\": container with ID starting with b4fb07a329158918759d3619773c94f0c1fde43db65dfd6f8aa127ef56411eb4 not found: ID does not exist" containerID="b4fb07a329158918759d3619773c94f0c1fde43db65dfd6f8aa127ef56411eb4" Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.804720 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4fb07a329158918759d3619773c94f0c1fde43db65dfd6f8aa127ef56411eb4"} err="failed to get container status \"b4fb07a329158918759d3619773c94f0c1fde43db65dfd6f8aa127ef56411eb4\": rpc error: code = NotFound desc = could not find container \"b4fb07a329158918759d3619773c94f0c1fde43db65dfd6f8aa127ef56411eb4\": container with ID starting with b4fb07a329158918759d3619773c94f0c1fde43db65dfd6f8aa127ef56411eb4 not found: ID does not exist" Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.804747 4933 scope.go:117] "RemoveContainer" containerID="6444ef17178da3210de22543950cafdbf41a697cf1b97db5fc7a214a5c4d62da" Jan 22 06:33:20 crc kubenswrapper[4933]: E0122 06:33:20.805108 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6444ef17178da3210de22543950cafdbf41a697cf1b97db5fc7a214a5c4d62da\": container with ID starting with 6444ef17178da3210de22543950cafdbf41a697cf1b97db5fc7a214a5c4d62da not found: ID does not exist" containerID="6444ef17178da3210de22543950cafdbf41a697cf1b97db5fc7a214a5c4d62da" Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.805157 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6444ef17178da3210de22543950cafdbf41a697cf1b97db5fc7a214a5c4d62da"} err="failed to get container status \"6444ef17178da3210de22543950cafdbf41a697cf1b97db5fc7a214a5c4d62da\": rpc error: code = NotFound desc = could not find container \"6444ef17178da3210de22543950cafdbf41a697cf1b97db5fc7a214a5c4d62da\": container with ID starting with 6444ef17178da3210de22543950cafdbf41a697cf1b97db5fc7a214a5c4d62da not found: ID does not exist" Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.805189 4933 scope.go:117] "RemoveContainer" containerID="55a4ab4cc0cb0bd4afbad0411e4d5533dd84437f59e387abc9b3356e122ee3c7" Jan 22 06:33:20 crc kubenswrapper[4933]: E0122 06:33:20.805514 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55a4ab4cc0cb0bd4afbad0411e4d5533dd84437f59e387abc9b3356e122ee3c7\": container with ID starting with 55a4ab4cc0cb0bd4afbad0411e4d5533dd84437f59e387abc9b3356e122ee3c7 not found: ID does not exist" containerID="55a4ab4cc0cb0bd4afbad0411e4d5533dd84437f59e387abc9b3356e122ee3c7" Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.805550 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55a4ab4cc0cb0bd4afbad0411e4d5533dd84437f59e387abc9b3356e122ee3c7"} err="failed to get container status \"55a4ab4cc0cb0bd4afbad0411e4d5533dd84437f59e387abc9b3356e122ee3c7\": rpc error: code = NotFound desc = could not find container \"55a4ab4cc0cb0bd4afbad0411e4d5533dd84437f59e387abc9b3356e122ee3c7\": container with ID starting with 55a4ab4cc0cb0bd4afbad0411e4d5533dd84437f59e387abc9b3356e122ee3c7 not found: ID does not exist" Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.815874 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/22898585-7306-43e3-b12d-6491514659e2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "22898585-7306-43e3-b12d-6491514659e2" (UID: "22898585-7306-43e3-b12d-6491514659e2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.840772 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22898585-7306-43e3-b12d-6491514659e2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.840803 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22898585-7306-43e3-b12d-6491514659e2-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:33:20 crc kubenswrapper[4933]: I0122 06:33:20.840814 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m5xvn\" (UniqueName: \"kubernetes.io/projected/22898585-7306-43e3-b12d-6491514659e2-kube-api-access-m5xvn\") on node \"crc\" DevicePath \"\"" Jan 22 06:33:21 crc kubenswrapper[4933]: I0122 06:33:21.061992 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jlk64"] Jan 22 06:33:21 crc kubenswrapper[4933]: I0122 06:33:21.069250 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-jlk64"] Jan 22 06:33:22 crc kubenswrapper[4933]: I0122 06:33:22.502614 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22898585-7306-43e3-b12d-6491514659e2" path="/var/lib/kubelet/pods/22898585-7306-43e3-b12d-6491514659e2/volumes" Jan 22 06:33:31 crc kubenswrapper[4933]: I0122 06:33:31.489620 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-kr5ht"] Jan 22 06:33:31 crc kubenswrapper[4933]: E0122 06:33:31.494581 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22898585-7306-43e3-b12d-6491514659e2" containerName="extract-utilities" Jan 22 06:33:31 crc kubenswrapper[4933]: I0122 06:33:31.494607 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="22898585-7306-43e3-b12d-6491514659e2" containerName="extract-utilities" Jan 22 06:33:31 crc kubenswrapper[4933]: E0122 06:33:31.494640 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22898585-7306-43e3-b12d-6491514659e2" containerName="registry-server" Jan 22 06:33:31 crc kubenswrapper[4933]: I0122 06:33:31.494653 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="22898585-7306-43e3-b12d-6491514659e2" containerName="registry-server" Jan 22 06:33:31 crc kubenswrapper[4933]: E0122 06:33:31.494677 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22898585-7306-43e3-b12d-6491514659e2" containerName="extract-content" Jan 22 06:33:31 crc kubenswrapper[4933]: I0122 06:33:31.494690 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="22898585-7306-43e3-b12d-6491514659e2" containerName="extract-content" Jan 22 06:33:31 crc kubenswrapper[4933]: I0122 06:33:31.494969 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="22898585-7306-43e3-b12d-6491514659e2" containerName="registry-server" Jan 22 06:33:31 crc kubenswrapper[4933]: I0122 06:33:31.496962 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kr5ht" Jan 22 06:33:31 crc kubenswrapper[4933]: I0122 06:33:31.554407 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kr5ht"] Jan 22 06:33:31 crc kubenswrapper[4933]: I0122 06:33:31.607297 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d890ca07-0e4f-4da9-84df-9642860ae113-catalog-content\") pod \"redhat-operators-kr5ht\" (UID: \"d890ca07-0e4f-4da9-84df-9642860ae113\") " pod="openshift-marketplace/redhat-operators-kr5ht" Jan 22 06:33:31 crc kubenswrapper[4933]: I0122 06:33:31.607349 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d890ca07-0e4f-4da9-84df-9642860ae113-utilities\") pod \"redhat-operators-kr5ht\" (UID: \"d890ca07-0e4f-4da9-84df-9642860ae113\") " pod="openshift-marketplace/redhat-operators-kr5ht" Jan 22 06:33:31 crc kubenswrapper[4933]: I0122 06:33:31.607408 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wjsk\" (UniqueName: \"kubernetes.io/projected/d890ca07-0e4f-4da9-84df-9642860ae113-kube-api-access-9wjsk\") pod \"redhat-operators-kr5ht\" (UID: \"d890ca07-0e4f-4da9-84df-9642860ae113\") " pod="openshift-marketplace/redhat-operators-kr5ht" Jan 22 06:33:31 crc kubenswrapper[4933]: I0122 06:33:31.708264 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d890ca07-0e4f-4da9-84df-9642860ae113-catalog-content\") pod \"redhat-operators-kr5ht\" (UID: \"d890ca07-0e4f-4da9-84df-9642860ae113\") " pod="openshift-marketplace/redhat-operators-kr5ht" Jan 22 06:33:31 crc kubenswrapper[4933]: I0122 06:33:31.708316 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d890ca07-0e4f-4da9-84df-9642860ae113-utilities\") pod \"redhat-operators-kr5ht\" (UID: \"d890ca07-0e4f-4da9-84df-9642860ae113\") " pod="openshift-marketplace/redhat-operators-kr5ht" Jan 22 06:33:31 crc kubenswrapper[4933]: I0122 06:33:31.708365 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wjsk\" (UniqueName: \"kubernetes.io/projected/d890ca07-0e4f-4da9-84df-9642860ae113-kube-api-access-9wjsk\") pod \"redhat-operators-kr5ht\" (UID: \"d890ca07-0e4f-4da9-84df-9642860ae113\") " pod="openshift-marketplace/redhat-operators-kr5ht" Jan 22 06:33:31 crc kubenswrapper[4933]: I0122 06:33:31.708923 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d890ca07-0e4f-4da9-84df-9642860ae113-utilities\") pod \"redhat-operators-kr5ht\" (UID: \"d890ca07-0e4f-4da9-84df-9642860ae113\") " pod="openshift-marketplace/redhat-operators-kr5ht" Jan 22 06:33:31 crc kubenswrapper[4933]: I0122 06:33:31.709098 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d890ca07-0e4f-4da9-84df-9642860ae113-catalog-content\") pod \"redhat-operators-kr5ht\" (UID: \"d890ca07-0e4f-4da9-84df-9642860ae113\") " pod="openshift-marketplace/redhat-operators-kr5ht" Jan 22 06:33:31 crc kubenswrapper[4933]: I0122 06:33:31.733964 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wjsk\" (UniqueName: \"kubernetes.io/projected/d890ca07-0e4f-4da9-84df-9642860ae113-kube-api-access-9wjsk\") pod \"redhat-operators-kr5ht\" (UID: \"d890ca07-0e4f-4da9-84df-9642860ae113\") " pod="openshift-marketplace/redhat-operators-kr5ht" Jan 22 06:33:31 crc kubenswrapper[4933]: I0122 06:33:31.866096 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kr5ht" Jan 22 06:33:32 crc kubenswrapper[4933]: I0122 06:33:32.306834 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kr5ht"] Jan 22 06:33:32 crc kubenswrapper[4933]: I0122 06:33:32.817433 4933 generic.go:334] "Generic (PLEG): container finished" podID="d890ca07-0e4f-4da9-84df-9642860ae113" containerID="0b85e9cc6bf06273d5ef030bc2713dcdd815e629930c12ea500857f82bfa1948" exitCode=0 Jan 22 06:33:32 crc kubenswrapper[4933]: I0122 06:33:32.817485 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kr5ht" event={"ID":"d890ca07-0e4f-4da9-84df-9642860ae113","Type":"ContainerDied","Data":"0b85e9cc6bf06273d5ef030bc2713dcdd815e629930c12ea500857f82bfa1948"} Jan 22 06:33:32 crc kubenswrapper[4933]: I0122 06:33:32.817514 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kr5ht" event={"ID":"d890ca07-0e4f-4da9-84df-9642860ae113","Type":"ContainerStarted","Data":"41c38c22291f13fc4a0a1654cb173923ea317a3dbbdbd93e656269fad68897eb"} Jan 22 06:33:33 crc kubenswrapper[4933]: I0122 06:33:33.829729 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kr5ht" event={"ID":"d890ca07-0e4f-4da9-84df-9642860ae113","Type":"ContainerStarted","Data":"9a53f45f5611c54e1809b7fdd893fbccaea9950de9f398b81269220fbff2c430"} Jan 22 06:33:34 crc kubenswrapper[4933]: I0122 06:33:34.841379 4933 generic.go:334] "Generic (PLEG): container finished" podID="d890ca07-0e4f-4da9-84df-9642860ae113" containerID="9a53f45f5611c54e1809b7fdd893fbccaea9950de9f398b81269220fbff2c430" exitCode=0 Jan 22 06:33:34 crc kubenswrapper[4933]: I0122 06:33:34.841446 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kr5ht" event={"ID":"d890ca07-0e4f-4da9-84df-9642860ae113","Type":"ContainerDied","Data":"9a53f45f5611c54e1809b7fdd893fbccaea9950de9f398b81269220fbff2c430"} Jan 22 06:33:35 crc kubenswrapper[4933]: I0122 06:33:35.852172 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kr5ht" event={"ID":"d890ca07-0e4f-4da9-84df-9642860ae113","Type":"ContainerStarted","Data":"d1885f5719d701cf2ded09a4347755e9d267496532366580e82f1fcd118cf563"} Jan 22 06:33:35 crc kubenswrapper[4933]: I0122 06:33:35.878674 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-kr5ht" podStartSLOduration=2.448121925 podStartE2EDuration="4.878654442s" podCreationTimestamp="2026-01-22 06:33:31 +0000 UTC" firstStartedPulling="2026-01-22 06:33:32.819011394 +0000 UTC m=+2860.656136747" lastFinishedPulling="2026-01-22 06:33:35.249543901 +0000 UTC m=+2863.086669264" observedRunningTime="2026-01-22 06:33:35.873446006 +0000 UTC m=+2863.710571449" watchObservedRunningTime="2026-01-22 06:33:35.878654442 +0000 UTC m=+2863.715779795" Jan 22 06:33:40 crc kubenswrapper[4933]: I0122 06:33:40.943203 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:33:40 crc kubenswrapper[4933]: I0122 06:33:40.943618 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:33:41 crc kubenswrapper[4933]: I0122 06:33:41.866496 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-kr5ht" Jan 22 06:33:41 crc kubenswrapper[4933]: I0122 06:33:41.866642 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-kr5ht" Jan 22 06:33:42 crc kubenswrapper[4933]: I0122 06:33:42.932366 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-kr5ht" podUID="d890ca07-0e4f-4da9-84df-9642860ae113" containerName="registry-server" probeResult="failure" output=< Jan 22 06:33:42 crc kubenswrapper[4933]: timeout: failed to connect service ":50051" within 1s Jan 22 06:33:42 crc kubenswrapper[4933]: > Jan 22 06:33:51 crc kubenswrapper[4933]: I0122 06:33:51.940467 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-kr5ht" Jan 22 06:33:51 crc kubenswrapper[4933]: I0122 06:33:51.997204 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-kr5ht" Jan 22 06:33:52 crc kubenswrapper[4933]: I0122 06:33:52.178833 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kr5ht"] Jan 22 06:33:53 crc kubenswrapper[4933]: I0122 06:33:53.001436 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-kr5ht" podUID="d890ca07-0e4f-4da9-84df-9642860ae113" containerName="registry-server" containerID="cri-o://d1885f5719d701cf2ded09a4347755e9d267496532366580e82f1fcd118cf563" gracePeriod=2 Jan 22 06:33:53 crc kubenswrapper[4933]: I0122 06:33:53.441368 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kr5ht" Jan 22 06:33:53 crc kubenswrapper[4933]: I0122 06:33:53.470220 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9wjsk\" (UniqueName: \"kubernetes.io/projected/d890ca07-0e4f-4da9-84df-9642860ae113-kube-api-access-9wjsk\") pod \"d890ca07-0e4f-4da9-84df-9642860ae113\" (UID: \"d890ca07-0e4f-4da9-84df-9642860ae113\") " Jan 22 06:33:53 crc kubenswrapper[4933]: I0122 06:33:53.470277 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d890ca07-0e4f-4da9-84df-9642860ae113-utilities\") pod \"d890ca07-0e4f-4da9-84df-9642860ae113\" (UID: \"d890ca07-0e4f-4da9-84df-9642860ae113\") " Jan 22 06:33:53 crc kubenswrapper[4933]: I0122 06:33:53.470398 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d890ca07-0e4f-4da9-84df-9642860ae113-catalog-content\") pod \"d890ca07-0e4f-4da9-84df-9642860ae113\" (UID: \"d890ca07-0e4f-4da9-84df-9642860ae113\") " Jan 22 06:33:53 crc kubenswrapper[4933]: I0122 06:33:53.476260 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d890ca07-0e4f-4da9-84df-9642860ae113-utilities" (OuterVolumeSpecName: "utilities") pod "d890ca07-0e4f-4da9-84df-9642860ae113" (UID: "d890ca07-0e4f-4da9-84df-9642860ae113"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:33:53 crc kubenswrapper[4933]: I0122 06:33:53.481564 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d890ca07-0e4f-4da9-84df-9642860ae113-kube-api-access-9wjsk" (OuterVolumeSpecName: "kube-api-access-9wjsk") pod "d890ca07-0e4f-4da9-84df-9642860ae113" (UID: "d890ca07-0e4f-4da9-84df-9642860ae113"). InnerVolumeSpecName "kube-api-access-9wjsk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:33:53 crc kubenswrapper[4933]: I0122 06:33:53.572048 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9wjsk\" (UniqueName: \"kubernetes.io/projected/d890ca07-0e4f-4da9-84df-9642860ae113-kube-api-access-9wjsk\") on node \"crc\" DevicePath \"\"" Jan 22 06:33:53 crc kubenswrapper[4933]: I0122 06:33:53.572095 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d890ca07-0e4f-4da9-84df-9642860ae113-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:33:53 crc kubenswrapper[4933]: I0122 06:33:53.583560 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d890ca07-0e4f-4da9-84df-9642860ae113-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d890ca07-0e4f-4da9-84df-9642860ae113" (UID: "d890ca07-0e4f-4da9-84df-9642860ae113"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:33:53 crc kubenswrapper[4933]: I0122 06:33:53.673739 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d890ca07-0e4f-4da9-84df-9642860ae113-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:33:54 crc kubenswrapper[4933]: I0122 06:33:54.012518 4933 generic.go:334] "Generic (PLEG): container finished" podID="d890ca07-0e4f-4da9-84df-9642860ae113" containerID="d1885f5719d701cf2ded09a4347755e9d267496532366580e82f1fcd118cf563" exitCode=0 Jan 22 06:33:54 crc kubenswrapper[4933]: I0122 06:33:54.012599 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kr5ht" event={"ID":"d890ca07-0e4f-4da9-84df-9642860ae113","Type":"ContainerDied","Data":"d1885f5719d701cf2ded09a4347755e9d267496532366580e82f1fcd118cf563"} Jan 22 06:33:54 crc kubenswrapper[4933]: I0122 06:33:54.012645 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kr5ht" event={"ID":"d890ca07-0e4f-4da9-84df-9642860ae113","Type":"ContainerDied","Data":"41c38c22291f13fc4a0a1654cb173923ea317a3dbbdbd93e656269fad68897eb"} Jan 22 06:33:54 crc kubenswrapper[4933]: I0122 06:33:54.012681 4933 scope.go:117] "RemoveContainer" containerID="d1885f5719d701cf2ded09a4347755e9d267496532366580e82f1fcd118cf563" Jan 22 06:33:54 crc kubenswrapper[4933]: I0122 06:33:54.012906 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kr5ht" Jan 22 06:33:54 crc kubenswrapper[4933]: I0122 06:33:54.061291 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kr5ht"] Jan 22 06:33:54 crc kubenswrapper[4933]: I0122 06:33:54.067306 4933 scope.go:117] "RemoveContainer" containerID="9a53f45f5611c54e1809b7fdd893fbccaea9950de9f398b81269220fbff2c430" Jan 22 06:33:54 crc kubenswrapper[4933]: I0122 06:33:54.068206 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-kr5ht"] Jan 22 06:33:54 crc kubenswrapper[4933]: I0122 06:33:54.093512 4933 scope.go:117] "RemoveContainer" containerID="0b85e9cc6bf06273d5ef030bc2713dcdd815e629930c12ea500857f82bfa1948" Jan 22 06:33:54 crc kubenswrapper[4933]: I0122 06:33:54.118604 4933 scope.go:117] "RemoveContainer" containerID="d1885f5719d701cf2ded09a4347755e9d267496532366580e82f1fcd118cf563" Jan 22 06:33:54 crc kubenswrapper[4933]: E0122 06:33:54.119511 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1885f5719d701cf2ded09a4347755e9d267496532366580e82f1fcd118cf563\": container with ID starting with d1885f5719d701cf2ded09a4347755e9d267496532366580e82f1fcd118cf563 not found: ID does not exist" containerID="d1885f5719d701cf2ded09a4347755e9d267496532366580e82f1fcd118cf563" Jan 22 06:33:54 crc kubenswrapper[4933]: I0122 06:33:54.119575 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1885f5719d701cf2ded09a4347755e9d267496532366580e82f1fcd118cf563"} err="failed to get container status \"d1885f5719d701cf2ded09a4347755e9d267496532366580e82f1fcd118cf563\": rpc error: code = NotFound desc = could not find container \"d1885f5719d701cf2ded09a4347755e9d267496532366580e82f1fcd118cf563\": container with ID starting with d1885f5719d701cf2ded09a4347755e9d267496532366580e82f1fcd118cf563 not found: ID does not exist" Jan 22 06:33:54 crc kubenswrapper[4933]: I0122 06:33:54.119614 4933 scope.go:117] "RemoveContainer" containerID="9a53f45f5611c54e1809b7fdd893fbccaea9950de9f398b81269220fbff2c430" Jan 22 06:33:54 crc kubenswrapper[4933]: E0122 06:33:54.120341 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a53f45f5611c54e1809b7fdd893fbccaea9950de9f398b81269220fbff2c430\": container with ID starting with 9a53f45f5611c54e1809b7fdd893fbccaea9950de9f398b81269220fbff2c430 not found: ID does not exist" containerID="9a53f45f5611c54e1809b7fdd893fbccaea9950de9f398b81269220fbff2c430" Jan 22 06:33:54 crc kubenswrapper[4933]: I0122 06:33:54.120554 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a53f45f5611c54e1809b7fdd893fbccaea9950de9f398b81269220fbff2c430"} err="failed to get container status \"9a53f45f5611c54e1809b7fdd893fbccaea9950de9f398b81269220fbff2c430\": rpc error: code = NotFound desc = could not find container \"9a53f45f5611c54e1809b7fdd893fbccaea9950de9f398b81269220fbff2c430\": container with ID starting with 9a53f45f5611c54e1809b7fdd893fbccaea9950de9f398b81269220fbff2c430 not found: ID does not exist" Jan 22 06:33:54 crc kubenswrapper[4933]: I0122 06:33:54.120792 4933 scope.go:117] "RemoveContainer" containerID="0b85e9cc6bf06273d5ef030bc2713dcdd815e629930c12ea500857f82bfa1948" Jan 22 06:33:54 crc kubenswrapper[4933]: E0122 06:33:54.121564 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b85e9cc6bf06273d5ef030bc2713dcdd815e629930c12ea500857f82bfa1948\": container with ID starting with 0b85e9cc6bf06273d5ef030bc2713dcdd815e629930c12ea500857f82bfa1948 not found: ID does not exist" containerID="0b85e9cc6bf06273d5ef030bc2713dcdd815e629930c12ea500857f82bfa1948" Jan 22 06:33:54 crc kubenswrapper[4933]: I0122 06:33:54.121649 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b85e9cc6bf06273d5ef030bc2713dcdd815e629930c12ea500857f82bfa1948"} err="failed to get container status \"0b85e9cc6bf06273d5ef030bc2713dcdd815e629930c12ea500857f82bfa1948\": rpc error: code = NotFound desc = could not find container \"0b85e9cc6bf06273d5ef030bc2713dcdd815e629930c12ea500857f82bfa1948\": container with ID starting with 0b85e9cc6bf06273d5ef030bc2713dcdd815e629930c12ea500857f82bfa1948 not found: ID does not exist" Jan 22 06:33:54 crc kubenswrapper[4933]: I0122 06:33:54.499545 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d890ca07-0e4f-4da9-84df-9642860ae113" path="/var/lib/kubelet/pods/d890ca07-0e4f-4da9-84df-9642860ae113/volumes" Jan 22 06:33:57 crc kubenswrapper[4933]: I0122 06:33:57.659952 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bfc9x"] Jan 22 06:33:57 crc kubenswrapper[4933]: E0122 06:33:57.660991 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d890ca07-0e4f-4da9-84df-9642860ae113" containerName="registry-server" Jan 22 06:33:57 crc kubenswrapper[4933]: I0122 06:33:57.661028 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d890ca07-0e4f-4da9-84df-9642860ae113" containerName="registry-server" Jan 22 06:33:57 crc kubenswrapper[4933]: E0122 06:33:57.661068 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d890ca07-0e4f-4da9-84df-9642860ae113" containerName="extract-content" Jan 22 06:33:57 crc kubenswrapper[4933]: I0122 06:33:57.661137 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d890ca07-0e4f-4da9-84df-9642860ae113" containerName="extract-content" Jan 22 06:33:57 crc kubenswrapper[4933]: E0122 06:33:57.661208 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d890ca07-0e4f-4da9-84df-9642860ae113" containerName="extract-utilities" Jan 22 06:33:57 crc kubenswrapper[4933]: I0122 06:33:57.661227 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d890ca07-0e4f-4da9-84df-9642860ae113" containerName="extract-utilities" Jan 22 06:33:57 crc kubenswrapper[4933]: I0122 06:33:57.661562 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="d890ca07-0e4f-4da9-84df-9642860ae113" containerName="registry-server" Jan 22 06:33:57 crc kubenswrapper[4933]: I0122 06:33:57.663973 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bfc9x" Jan 22 06:33:57 crc kubenswrapper[4933]: I0122 06:33:57.669607 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfc9x"] Jan 22 06:33:57 crc kubenswrapper[4933]: I0122 06:33:57.730663 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kg77\" (UniqueName: \"kubernetes.io/projected/cedada7a-1b26-437f-86c3-e0770b8f6b21-kube-api-access-4kg77\") pod \"redhat-marketplace-bfc9x\" (UID: \"cedada7a-1b26-437f-86c3-e0770b8f6b21\") " pod="openshift-marketplace/redhat-marketplace-bfc9x" Jan 22 06:33:57 crc kubenswrapper[4933]: I0122 06:33:57.730941 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cedada7a-1b26-437f-86c3-e0770b8f6b21-utilities\") pod \"redhat-marketplace-bfc9x\" (UID: \"cedada7a-1b26-437f-86c3-e0770b8f6b21\") " pod="openshift-marketplace/redhat-marketplace-bfc9x" Jan 22 06:33:57 crc kubenswrapper[4933]: I0122 06:33:57.730987 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cedada7a-1b26-437f-86c3-e0770b8f6b21-catalog-content\") pod \"redhat-marketplace-bfc9x\" (UID: \"cedada7a-1b26-437f-86c3-e0770b8f6b21\") " pod="openshift-marketplace/redhat-marketplace-bfc9x" Jan 22 06:33:57 crc kubenswrapper[4933]: I0122 06:33:57.831925 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kg77\" (UniqueName: \"kubernetes.io/projected/cedada7a-1b26-437f-86c3-e0770b8f6b21-kube-api-access-4kg77\") pod \"redhat-marketplace-bfc9x\" (UID: \"cedada7a-1b26-437f-86c3-e0770b8f6b21\") " pod="openshift-marketplace/redhat-marketplace-bfc9x" Jan 22 06:33:57 crc kubenswrapper[4933]: I0122 06:33:57.832004 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cedada7a-1b26-437f-86c3-e0770b8f6b21-utilities\") pod \"redhat-marketplace-bfc9x\" (UID: \"cedada7a-1b26-437f-86c3-e0770b8f6b21\") " pod="openshift-marketplace/redhat-marketplace-bfc9x" Jan 22 06:33:57 crc kubenswrapper[4933]: I0122 06:33:57.832067 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cedada7a-1b26-437f-86c3-e0770b8f6b21-catalog-content\") pod \"redhat-marketplace-bfc9x\" (UID: \"cedada7a-1b26-437f-86c3-e0770b8f6b21\") " pod="openshift-marketplace/redhat-marketplace-bfc9x" Jan 22 06:33:57 crc kubenswrapper[4933]: I0122 06:33:57.832617 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cedada7a-1b26-437f-86c3-e0770b8f6b21-catalog-content\") pod \"redhat-marketplace-bfc9x\" (UID: \"cedada7a-1b26-437f-86c3-e0770b8f6b21\") " pod="openshift-marketplace/redhat-marketplace-bfc9x" Jan 22 06:33:57 crc kubenswrapper[4933]: I0122 06:33:57.832799 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cedada7a-1b26-437f-86c3-e0770b8f6b21-utilities\") pod \"redhat-marketplace-bfc9x\" (UID: \"cedada7a-1b26-437f-86c3-e0770b8f6b21\") " pod="openshift-marketplace/redhat-marketplace-bfc9x" Jan 22 06:33:57 crc kubenswrapper[4933]: I0122 06:33:57.854030 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kg77\" (UniqueName: \"kubernetes.io/projected/cedada7a-1b26-437f-86c3-e0770b8f6b21-kube-api-access-4kg77\") pod \"redhat-marketplace-bfc9x\" (UID: \"cedada7a-1b26-437f-86c3-e0770b8f6b21\") " pod="openshift-marketplace/redhat-marketplace-bfc9x" Jan 22 06:33:57 crc kubenswrapper[4933]: I0122 06:33:57.983498 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bfc9x" Jan 22 06:33:58 crc kubenswrapper[4933]: I0122 06:33:58.282656 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfc9x"] Jan 22 06:33:59 crc kubenswrapper[4933]: I0122 06:33:59.055943 4933 generic.go:334] "Generic (PLEG): container finished" podID="cedada7a-1b26-437f-86c3-e0770b8f6b21" containerID="506afcf2b9c00fa7467865059c6aca384055d663d3d6477cf84571f3f9b4030e" exitCode=0 Jan 22 06:33:59 crc kubenswrapper[4933]: I0122 06:33:59.056013 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfc9x" event={"ID":"cedada7a-1b26-437f-86c3-e0770b8f6b21","Type":"ContainerDied","Data":"506afcf2b9c00fa7467865059c6aca384055d663d3d6477cf84571f3f9b4030e"} Jan 22 06:33:59 crc kubenswrapper[4933]: I0122 06:33:59.056868 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfc9x" event={"ID":"cedada7a-1b26-437f-86c3-e0770b8f6b21","Type":"ContainerStarted","Data":"4ddf701d0bd8f6bd6a1c0095b7b2ffa09ebabdd3b5f995708246b26c9860acff"} Jan 22 06:34:00 crc kubenswrapper[4933]: I0122 06:34:00.068538 4933 generic.go:334] "Generic (PLEG): container finished" podID="cedada7a-1b26-437f-86c3-e0770b8f6b21" containerID="e5b68aec6b66ec5ddd28044cae6d80748ae4c7cca5a4771a4d9ce9101fb32557" exitCode=0 Jan 22 06:34:00 crc kubenswrapper[4933]: I0122 06:34:00.068720 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfc9x" event={"ID":"cedada7a-1b26-437f-86c3-e0770b8f6b21","Type":"ContainerDied","Data":"e5b68aec6b66ec5ddd28044cae6d80748ae4c7cca5a4771a4d9ce9101fb32557"} Jan 22 06:34:01 crc kubenswrapper[4933]: I0122 06:34:01.082636 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfc9x" event={"ID":"cedada7a-1b26-437f-86c3-e0770b8f6b21","Type":"ContainerStarted","Data":"1ec8d48e6ceea405c5ffd538aa3de83fc5096f38a615e66a6a524dd9b360dc12"} Jan 22 06:34:01 crc kubenswrapper[4933]: I0122 06:34:01.115498 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bfc9x" podStartSLOduration=2.705803584 podStartE2EDuration="4.115472921s" podCreationTimestamp="2026-01-22 06:33:57 +0000 UTC" firstStartedPulling="2026-01-22 06:33:59.058972673 +0000 UTC m=+2886.896098046" lastFinishedPulling="2026-01-22 06:34:00.46864203 +0000 UTC m=+2888.305767383" observedRunningTime="2026-01-22 06:34:01.111592557 +0000 UTC m=+2888.948717920" watchObservedRunningTime="2026-01-22 06:34:01.115472921 +0000 UTC m=+2888.952598284" Jan 22 06:34:07 crc kubenswrapper[4933]: I0122 06:34:07.984866 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bfc9x" Jan 22 06:34:07 crc kubenswrapper[4933]: I0122 06:34:07.985565 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bfc9x" Jan 22 06:34:08 crc kubenswrapper[4933]: I0122 06:34:08.045397 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bfc9x" Jan 22 06:34:08 crc kubenswrapper[4933]: I0122 06:34:08.182680 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bfc9x" Jan 22 06:34:08 crc kubenswrapper[4933]: I0122 06:34:08.284121 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfc9x"] Jan 22 06:34:10 crc kubenswrapper[4933]: I0122 06:34:10.158361 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bfc9x" podUID="cedada7a-1b26-437f-86c3-e0770b8f6b21" containerName="registry-server" containerID="cri-o://1ec8d48e6ceea405c5ffd538aa3de83fc5096f38a615e66a6a524dd9b360dc12" gracePeriod=2 Jan 22 06:34:10 crc kubenswrapper[4933]: I0122 06:34:10.943000 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:34:10 crc kubenswrapper[4933]: I0122 06:34:10.943436 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.145674 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bfc9x" Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.174853 4933 generic.go:334] "Generic (PLEG): container finished" podID="cedada7a-1b26-437f-86c3-e0770b8f6b21" containerID="1ec8d48e6ceea405c5ffd538aa3de83fc5096f38a615e66a6a524dd9b360dc12" exitCode=0 Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.174915 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfc9x" event={"ID":"cedada7a-1b26-437f-86c3-e0770b8f6b21","Type":"ContainerDied","Data":"1ec8d48e6ceea405c5ffd538aa3de83fc5096f38a615e66a6a524dd9b360dc12"} Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.174946 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bfc9x" Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.174978 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfc9x" event={"ID":"cedada7a-1b26-437f-86c3-e0770b8f6b21","Type":"ContainerDied","Data":"4ddf701d0bd8f6bd6a1c0095b7b2ffa09ebabdd3b5f995708246b26c9860acff"} Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.175004 4933 scope.go:117] "RemoveContainer" containerID="1ec8d48e6ceea405c5ffd538aa3de83fc5096f38a615e66a6a524dd9b360dc12" Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.196672 4933 scope.go:117] "RemoveContainer" containerID="e5b68aec6b66ec5ddd28044cae6d80748ae4c7cca5a4771a4d9ce9101fb32557" Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.227186 4933 scope.go:117] "RemoveContainer" containerID="506afcf2b9c00fa7467865059c6aca384055d663d3d6477cf84571f3f9b4030e" Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.228374 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cedada7a-1b26-437f-86c3-e0770b8f6b21-catalog-content\") pod \"cedada7a-1b26-437f-86c3-e0770b8f6b21\" (UID: \"cedada7a-1b26-437f-86c3-e0770b8f6b21\") " Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.228427 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4kg77\" (UniqueName: \"kubernetes.io/projected/cedada7a-1b26-437f-86c3-e0770b8f6b21-kube-api-access-4kg77\") pod \"cedada7a-1b26-437f-86c3-e0770b8f6b21\" (UID: \"cedada7a-1b26-437f-86c3-e0770b8f6b21\") " Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.228470 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cedada7a-1b26-437f-86c3-e0770b8f6b21-utilities\") pod \"cedada7a-1b26-437f-86c3-e0770b8f6b21\" (UID: \"cedada7a-1b26-437f-86c3-e0770b8f6b21\") " Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.232338 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cedada7a-1b26-437f-86c3-e0770b8f6b21-utilities" (OuterVolumeSpecName: "utilities") pod "cedada7a-1b26-437f-86c3-e0770b8f6b21" (UID: "cedada7a-1b26-437f-86c3-e0770b8f6b21"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.235134 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cedada7a-1b26-437f-86c3-e0770b8f6b21-kube-api-access-4kg77" (OuterVolumeSpecName: "kube-api-access-4kg77") pod "cedada7a-1b26-437f-86c3-e0770b8f6b21" (UID: "cedada7a-1b26-437f-86c3-e0770b8f6b21"). InnerVolumeSpecName "kube-api-access-4kg77". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.250988 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cedada7a-1b26-437f-86c3-e0770b8f6b21-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cedada7a-1b26-437f-86c3-e0770b8f6b21" (UID: "cedada7a-1b26-437f-86c3-e0770b8f6b21"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.265324 4933 scope.go:117] "RemoveContainer" containerID="1ec8d48e6ceea405c5ffd538aa3de83fc5096f38a615e66a6a524dd9b360dc12" Jan 22 06:34:11 crc kubenswrapper[4933]: E0122 06:34:11.265746 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ec8d48e6ceea405c5ffd538aa3de83fc5096f38a615e66a6a524dd9b360dc12\": container with ID starting with 1ec8d48e6ceea405c5ffd538aa3de83fc5096f38a615e66a6a524dd9b360dc12 not found: ID does not exist" containerID="1ec8d48e6ceea405c5ffd538aa3de83fc5096f38a615e66a6a524dd9b360dc12" Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.265792 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ec8d48e6ceea405c5ffd538aa3de83fc5096f38a615e66a6a524dd9b360dc12"} err="failed to get container status \"1ec8d48e6ceea405c5ffd538aa3de83fc5096f38a615e66a6a524dd9b360dc12\": rpc error: code = NotFound desc = could not find container \"1ec8d48e6ceea405c5ffd538aa3de83fc5096f38a615e66a6a524dd9b360dc12\": container with ID starting with 1ec8d48e6ceea405c5ffd538aa3de83fc5096f38a615e66a6a524dd9b360dc12 not found: ID does not exist" Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.265817 4933 scope.go:117] "RemoveContainer" containerID="e5b68aec6b66ec5ddd28044cae6d80748ae4c7cca5a4771a4d9ce9101fb32557" Jan 22 06:34:11 crc kubenswrapper[4933]: E0122 06:34:11.266228 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5b68aec6b66ec5ddd28044cae6d80748ae4c7cca5a4771a4d9ce9101fb32557\": container with ID starting with e5b68aec6b66ec5ddd28044cae6d80748ae4c7cca5a4771a4d9ce9101fb32557 not found: ID does not exist" containerID="e5b68aec6b66ec5ddd28044cae6d80748ae4c7cca5a4771a4d9ce9101fb32557" Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.266243 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5b68aec6b66ec5ddd28044cae6d80748ae4c7cca5a4771a4d9ce9101fb32557"} err="failed to get container status \"e5b68aec6b66ec5ddd28044cae6d80748ae4c7cca5a4771a4d9ce9101fb32557\": rpc error: code = NotFound desc = could not find container \"e5b68aec6b66ec5ddd28044cae6d80748ae4c7cca5a4771a4d9ce9101fb32557\": container with ID starting with e5b68aec6b66ec5ddd28044cae6d80748ae4c7cca5a4771a4d9ce9101fb32557 not found: ID does not exist" Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.266254 4933 scope.go:117] "RemoveContainer" containerID="506afcf2b9c00fa7467865059c6aca384055d663d3d6477cf84571f3f9b4030e" Jan 22 06:34:11 crc kubenswrapper[4933]: E0122 06:34:11.266615 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"506afcf2b9c00fa7467865059c6aca384055d663d3d6477cf84571f3f9b4030e\": container with ID starting with 506afcf2b9c00fa7467865059c6aca384055d663d3d6477cf84571f3f9b4030e not found: ID does not exist" containerID="506afcf2b9c00fa7467865059c6aca384055d663d3d6477cf84571f3f9b4030e" Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.266644 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"506afcf2b9c00fa7467865059c6aca384055d663d3d6477cf84571f3f9b4030e"} err="failed to get container status \"506afcf2b9c00fa7467865059c6aca384055d663d3d6477cf84571f3f9b4030e\": rpc error: code = NotFound desc = could not find container \"506afcf2b9c00fa7467865059c6aca384055d663d3d6477cf84571f3f9b4030e\": container with ID starting with 506afcf2b9c00fa7467865059c6aca384055d663d3d6477cf84571f3f9b4030e not found: ID does not exist" Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.329965 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cedada7a-1b26-437f-86c3-e0770b8f6b21-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.330010 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4kg77\" (UniqueName: \"kubernetes.io/projected/cedada7a-1b26-437f-86c3-e0770b8f6b21-kube-api-access-4kg77\") on node \"crc\" DevicePath \"\"" Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.330025 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cedada7a-1b26-437f-86c3-e0770b8f6b21-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.521874 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfc9x"] Jan 22 06:34:11 crc kubenswrapper[4933]: I0122 06:34:11.528826 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfc9x"] Jan 22 06:34:12 crc kubenswrapper[4933]: I0122 06:34:12.501740 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cedada7a-1b26-437f-86c3-e0770b8f6b21" path="/var/lib/kubelet/pods/cedada7a-1b26-437f-86c3-e0770b8f6b21/volumes" Jan 22 06:34:40 crc kubenswrapper[4933]: I0122 06:34:40.943169 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:34:40 crc kubenswrapper[4933]: I0122 06:34:40.943631 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:34:40 crc kubenswrapper[4933]: I0122 06:34:40.943669 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 06:34:40 crc kubenswrapper[4933]: I0122 06:34:40.944179 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"30d623b3b05ccfca699be78d903490dfc9f2de1ca28a85a83a15fc90af4e5258"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:34:40 crc kubenswrapper[4933]: I0122 06:34:40.944225 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://30d623b3b05ccfca699be78d903490dfc9f2de1ca28a85a83a15fc90af4e5258" gracePeriod=600 Jan 22 06:34:41 crc kubenswrapper[4933]: I0122 06:34:41.408210 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="30d623b3b05ccfca699be78d903490dfc9f2de1ca28a85a83a15fc90af4e5258" exitCode=0 Jan 22 06:34:41 crc kubenswrapper[4933]: I0122 06:34:41.408286 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"30d623b3b05ccfca699be78d903490dfc9f2de1ca28a85a83a15fc90af4e5258"} Jan 22 06:34:41 crc kubenswrapper[4933]: I0122 06:34:41.408529 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718"} Jan 22 06:34:41 crc kubenswrapper[4933]: I0122 06:34:41.408559 4933 scope.go:117] "RemoveContainer" containerID="9441aafeefcbaee830d62084b2728e4a2bab7fe497b4fa5480565e507d8741df" Jan 22 06:37:10 crc kubenswrapper[4933]: I0122 06:37:10.943094 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:37:10 crc kubenswrapper[4933]: I0122 06:37:10.943596 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:37:40 crc kubenswrapper[4933]: I0122 06:37:40.943622 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:37:40 crc kubenswrapper[4933]: I0122 06:37:40.944465 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:38:10 crc kubenswrapper[4933]: I0122 06:38:10.943809 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:38:10 crc kubenswrapper[4933]: I0122 06:38:10.944593 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:38:10 crc kubenswrapper[4933]: I0122 06:38:10.944674 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 06:38:10 crc kubenswrapper[4933]: I0122 06:38:10.945698 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:38:10 crc kubenswrapper[4933]: I0122 06:38:10.945800 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" gracePeriod=600 Jan 22 06:38:11 crc kubenswrapper[4933]: E0122 06:38:11.593533 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:38:11 crc kubenswrapper[4933]: I0122 06:38:11.983753 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" exitCode=0 Jan 22 06:38:11 crc kubenswrapper[4933]: I0122 06:38:11.983853 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718"} Jan 22 06:38:11 crc kubenswrapper[4933]: I0122 06:38:11.984064 4933 scope.go:117] "RemoveContainer" containerID="30d623b3b05ccfca699be78d903490dfc9f2de1ca28a85a83a15fc90af4e5258" Jan 22 06:38:11 crc kubenswrapper[4933]: I0122 06:38:11.984952 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:38:11 crc kubenswrapper[4933]: E0122 06:38:11.985404 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:38:26 crc kubenswrapper[4933]: I0122 06:38:26.498225 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:38:26 crc kubenswrapper[4933]: E0122 06:38:26.499011 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:38:39 crc kubenswrapper[4933]: I0122 06:38:39.490521 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:38:39 crc kubenswrapper[4933]: E0122 06:38:39.492477 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:38:51 crc kubenswrapper[4933]: I0122 06:38:51.491854 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:38:51 crc kubenswrapper[4933]: E0122 06:38:51.492522 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:39:03 crc kubenswrapper[4933]: I0122 06:39:03.490674 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:39:03 crc kubenswrapper[4933]: E0122 06:39:03.491472 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:39:18 crc kubenswrapper[4933]: I0122 06:39:18.247591 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:39:18 crc kubenswrapper[4933]: E0122 06:39:18.248291 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:39:30 crc kubenswrapper[4933]: I0122 06:39:30.491271 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:39:30 crc kubenswrapper[4933]: E0122 06:39:30.492002 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:39:44 crc kubenswrapper[4933]: I0122 06:39:44.491000 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:39:44 crc kubenswrapper[4933]: E0122 06:39:44.491996 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:39:56 crc kubenswrapper[4933]: I0122 06:39:56.491236 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:39:56 crc kubenswrapper[4933]: E0122 06:39:56.492547 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:40:07 crc kubenswrapper[4933]: I0122 06:40:07.491733 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:40:07 crc kubenswrapper[4933]: E0122 06:40:07.492891 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:40:18 crc kubenswrapper[4933]: I0122 06:40:18.491521 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:40:18 crc kubenswrapper[4933]: E0122 06:40:18.492594 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:40:29 crc kubenswrapper[4933]: I0122 06:40:29.490492 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:40:29 crc kubenswrapper[4933]: E0122 06:40:29.491291 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:40:43 crc kubenswrapper[4933]: I0122 06:40:43.490266 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:40:43 crc kubenswrapper[4933]: E0122 06:40:43.490905 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:40:56 crc kubenswrapper[4933]: I0122 06:40:56.492710 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:40:56 crc kubenswrapper[4933]: E0122 06:40:56.493313 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:41:07 crc kubenswrapper[4933]: I0122 06:41:07.491658 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:41:07 crc kubenswrapper[4933]: E0122 06:41:07.492724 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:41:21 crc kubenswrapper[4933]: I0122 06:41:21.491015 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:41:21 crc kubenswrapper[4933]: E0122 06:41:21.491996 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:41:35 crc kubenswrapper[4933]: I0122 06:41:35.491144 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:41:35 crc kubenswrapper[4933]: E0122 06:41:35.492261 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:41:49 crc kubenswrapper[4933]: I0122 06:41:49.492035 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:41:49 crc kubenswrapper[4933]: E0122 06:41:49.493198 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:42:00 crc kubenswrapper[4933]: I0122 06:42:00.492121 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:42:00 crc kubenswrapper[4933]: E0122 06:42:00.493033 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:42:13 crc kubenswrapper[4933]: I0122 06:42:13.491062 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:42:13 crc kubenswrapper[4933]: E0122 06:42:13.491790 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:42:28 crc kubenswrapper[4933]: I0122 06:42:28.492168 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:42:28 crc kubenswrapper[4933]: E0122 06:42:28.493561 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:42:40 crc kubenswrapper[4933]: I0122 06:42:40.491979 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:42:40 crc kubenswrapper[4933]: E0122 06:42:40.492697 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:42:54 crc kubenswrapper[4933]: I0122 06:42:54.491748 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:42:54 crc kubenswrapper[4933]: E0122 06:42:54.492672 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:43:06 crc kubenswrapper[4933]: I0122 06:43:06.490812 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:43:06 crc kubenswrapper[4933]: E0122 06:43:06.491692 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:43:21 crc kubenswrapper[4933]: I0122 06:43:21.490879 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:43:21 crc kubenswrapper[4933]: I0122 06:43:21.736761 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"79ef21632b14ac76fe171d79c1f273dc38302cc41d7397ad9ad0c62b8ec99f30"} Jan 22 06:43:29 crc kubenswrapper[4933]: I0122 06:43:29.611927 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-c7g62"] Jan 22 06:43:29 crc kubenswrapper[4933]: E0122 06:43:29.619288 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cedada7a-1b26-437f-86c3-e0770b8f6b21" containerName="registry-server" Jan 22 06:43:29 crc kubenswrapper[4933]: I0122 06:43:29.619344 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="cedada7a-1b26-437f-86c3-e0770b8f6b21" containerName="registry-server" Jan 22 06:43:29 crc kubenswrapper[4933]: E0122 06:43:29.619374 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cedada7a-1b26-437f-86c3-e0770b8f6b21" containerName="extract-utilities" Jan 22 06:43:29 crc kubenswrapper[4933]: I0122 06:43:29.619389 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="cedada7a-1b26-437f-86c3-e0770b8f6b21" containerName="extract-utilities" Jan 22 06:43:29 crc kubenswrapper[4933]: E0122 06:43:29.619427 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cedada7a-1b26-437f-86c3-e0770b8f6b21" containerName="extract-content" Jan 22 06:43:29 crc kubenswrapper[4933]: I0122 06:43:29.619439 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="cedada7a-1b26-437f-86c3-e0770b8f6b21" containerName="extract-content" Jan 22 06:43:29 crc kubenswrapper[4933]: I0122 06:43:29.619818 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="cedada7a-1b26-437f-86c3-e0770b8f6b21" containerName="registry-server" Jan 22 06:43:29 crc kubenswrapper[4933]: I0122 06:43:29.622056 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c7g62" Jan 22 06:43:29 crc kubenswrapper[4933]: I0122 06:43:29.623917 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-c7g62"] Jan 22 06:43:29 crc kubenswrapper[4933]: I0122 06:43:29.745209 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/751a1180-87cc-4c8a-abde-4b822d8016f2-catalog-content\") pod \"community-operators-c7g62\" (UID: \"751a1180-87cc-4c8a-abde-4b822d8016f2\") " pod="openshift-marketplace/community-operators-c7g62" Jan 22 06:43:29 crc kubenswrapper[4933]: I0122 06:43:29.745570 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/751a1180-87cc-4c8a-abde-4b822d8016f2-utilities\") pod \"community-operators-c7g62\" (UID: \"751a1180-87cc-4c8a-abde-4b822d8016f2\") " pod="openshift-marketplace/community-operators-c7g62" Jan 22 06:43:29 crc kubenswrapper[4933]: I0122 06:43:29.745623 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wczmm\" (UniqueName: \"kubernetes.io/projected/751a1180-87cc-4c8a-abde-4b822d8016f2-kube-api-access-wczmm\") pod \"community-operators-c7g62\" (UID: \"751a1180-87cc-4c8a-abde-4b822d8016f2\") " pod="openshift-marketplace/community-operators-c7g62" Jan 22 06:43:29 crc kubenswrapper[4933]: I0122 06:43:29.846959 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/751a1180-87cc-4c8a-abde-4b822d8016f2-utilities\") pod \"community-operators-c7g62\" (UID: \"751a1180-87cc-4c8a-abde-4b822d8016f2\") " pod="openshift-marketplace/community-operators-c7g62" Jan 22 06:43:29 crc kubenswrapper[4933]: I0122 06:43:29.847030 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wczmm\" (UniqueName: \"kubernetes.io/projected/751a1180-87cc-4c8a-abde-4b822d8016f2-kube-api-access-wczmm\") pod \"community-operators-c7g62\" (UID: \"751a1180-87cc-4c8a-abde-4b822d8016f2\") " pod="openshift-marketplace/community-operators-c7g62" Jan 22 06:43:29 crc kubenswrapper[4933]: I0122 06:43:29.847116 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/751a1180-87cc-4c8a-abde-4b822d8016f2-catalog-content\") pod \"community-operators-c7g62\" (UID: \"751a1180-87cc-4c8a-abde-4b822d8016f2\") " pod="openshift-marketplace/community-operators-c7g62" Jan 22 06:43:29 crc kubenswrapper[4933]: I0122 06:43:29.847580 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/751a1180-87cc-4c8a-abde-4b822d8016f2-catalog-content\") pod \"community-operators-c7g62\" (UID: \"751a1180-87cc-4c8a-abde-4b822d8016f2\") " pod="openshift-marketplace/community-operators-c7g62" Jan 22 06:43:29 crc kubenswrapper[4933]: I0122 06:43:29.847919 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/751a1180-87cc-4c8a-abde-4b822d8016f2-utilities\") pod \"community-operators-c7g62\" (UID: \"751a1180-87cc-4c8a-abde-4b822d8016f2\") " pod="openshift-marketplace/community-operators-c7g62" Jan 22 06:43:29 crc kubenswrapper[4933]: I0122 06:43:29.871228 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wczmm\" (UniqueName: \"kubernetes.io/projected/751a1180-87cc-4c8a-abde-4b822d8016f2-kube-api-access-wczmm\") pod \"community-operators-c7g62\" (UID: \"751a1180-87cc-4c8a-abde-4b822d8016f2\") " pod="openshift-marketplace/community-operators-c7g62" Jan 22 06:43:29 crc kubenswrapper[4933]: I0122 06:43:29.958625 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c7g62" Jan 22 06:43:30 crc kubenswrapper[4933]: I0122 06:43:30.436797 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-c7g62"] Jan 22 06:43:30 crc kubenswrapper[4933]: W0122 06:43:30.446480 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod751a1180_87cc_4c8a_abde_4b822d8016f2.slice/crio-0b70eb0e59f07ff5eb3a48d2a738bc1b1c97c664c08f23390344daa38c81074e WatchSource:0}: Error finding container 0b70eb0e59f07ff5eb3a48d2a738bc1b1c97c664c08f23390344daa38c81074e: Status 404 returned error can't find the container with id 0b70eb0e59f07ff5eb3a48d2a738bc1b1c97c664c08f23390344daa38c81074e Jan 22 06:43:30 crc kubenswrapper[4933]: I0122 06:43:30.826197 4933 generic.go:334] "Generic (PLEG): container finished" podID="751a1180-87cc-4c8a-abde-4b822d8016f2" containerID="fae8ef10a28e0696cb316631fa019f7af6f538b3219301e90fb19dadf4071a9f" exitCode=0 Jan 22 06:43:30 crc kubenswrapper[4933]: I0122 06:43:30.826296 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c7g62" event={"ID":"751a1180-87cc-4c8a-abde-4b822d8016f2","Type":"ContainerDied","Data":"fae8ef10a28e0696cb316631fa019f7af6f538b3219301e90fb19dadf4071a9f"} Jan 22 06:43:30 crc kubenswrapper[4933]: I0122 06:43:30.827548 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c7g62" event={"ID":"751a1180-87cc-4c8a-abde-4b822d8016f2","Type":"ContainerStarted","Data":"0b70eb0e59f07ff5eb3a48d2a738bc1b1c97c664c08f23390344daa38c81074e"} Jan 22 06:43:30 crc kubenswrapper[4933]: I0122 06:43:30.828575 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 06:43:31 crc kubenswrapper[4933]: I0122 06:43:31.843148 4933 generic.go:334] "Generic (PLEG): container finished" podID="751a1180-87cc-4c8a-abde-4b822d8016f2" containerID="fdfddddde22036ffb9f50018beec71fe8d3cc8ae814c0a98fa55256a4750b363" exitCode=0 Jan 22 06:43:31 crc kubenswrapper[4933]: I0122 06:43:31.843239 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c7g62" event={"ID":"751a1180-87cc-4c8a-abde-4b822d8016f2","Type":"ContainerDied","Data":"fdfddddde22036ffb9f50018beec71fe8d3cc8ae814c0a98fa55256a4750b363"} Jan 22 06:43:32 crc kubenswrapper[4933]: I0122 06:43:32.863983 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c7g62" event={"ID":"751a1180-87cc-4c8a-abde-4b822d8016f2","Type":"ContainerStarted","Data":"7ee9e85368745aac5b5c77b97fc8290257060eaf89640016679c7dcc048c8e6f"} Jan 22 06:43:32 crc kubenswrapper[4933]: I0122 06:43:32.887022 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-c7g62" podStartSLOduration=2.411205511 podStartE2EDuration="3.886990981s" podCreationTimestamp="2026-01-22 06:43:29 +0000 UTC" firstStartedPulling="2026-01-22 06:43:30.82836174 +0000 UTC m=+3458.665487093" lastFinishedPulling="2026-01-22 06:43:32.30414717 +0000 UTC m=+3460.141272563" observedRunningTime="2026-01-22 06:43:32.885749751 +0000 UTC m=+3460.722875114" watchObservedRunningTime="2026-01-22 06:43:32.886990981 +0000 UTC m=+3460.724116334" Jan 22 06:43:39 crc kubenswrapper[4933]: I0122 06:43:39.959352 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-c7g62" Jan 22 06:43:39 crc kubenswrapper[4933]: I0122 06:43:39.959720 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-c7g62" Jan 22 06:43:40 crc kubenswrapper[4933]: I0122 06:43:40.016041 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-c7g62" Jan 22 06:43:40 crc kubenswrapper[4933]: I0122 06:43:40.970936 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-c7g62" Jan 22 06:43:41 crc kubenswrapper[4933]: I0122 06:43:41.027173 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-c7g62"] Jan 22 06:43:42 crc kubenswrapper[4933]: I0122 06:43:42.940445 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-c7g62" podUID="751a1180-87cc-4c8a-abde-4b822d8016f2" containerName="registry-server" containerID="cri-o://7ee9e85368745aac5b5c77b97fc8290257060eaf89640016679c7dcc048c8e6f" gracePeriod=2 Jan 22 06:43:43 crc kubenswrapper[4933]: I0122 06:43:43.884412 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c7g62" Jan 22 06:43:43 crc kubenswrapper[4933]: I0122 06:43:43.949200 4933 generic.go:334] "Generic (PLEG): container finished" podID="751a1180-87cc-4c8a-abde-4b822d8016f2" containerID="7ee9e85368745aac5b5c77b97fc8290257060eaf89640016679c7dcc048c8e6f" exitCode=0 Jan 22 06:43:43 crc kubenswrapper[4933]: I0122 06:43:43.949282 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c7g62" event={"ID":"751a1180-87cc-4c8a-abde-4b822d8016f2","Type":"ContainerDied","Data":"7ee9e85368745aac5b5c77b97fc8290257060eaf89640016679c7dcc048c8e6f"} Jan 22 06:43:43 crc kubenswrapper[4933]: I0122 06:43:43.949296 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c7g62" Jan 22 06:43:43 crc kubenswrapper[4933]: I0122 06:43:43.949324 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c7g62" event={"ID":"751a1180-87cc-4c8a-abde-4b822d8016f2","Type":"ContainerDied","Data":"0b70eb0e59f07ff5eb3a48d2a738bc1b1c97c664c08f23390344daa38c81074e"} Jan 22 06:43:43 crc kubenswrapper[4933]: I0122 06:43:43.949353 4933 scope.go:117] "RemoveContainer" containerID="7ee9e85368745aac5b5c77b97fc8290257060eaf89640016679c7dcc048c8e6f" Jan 22 06:43:43 crc kubenswrapper[4933]: I0122 06:43:43.988988 4933 scope.go:117] "RemoveContainer" containerID="fdfddddde22036ffb9f50018beec71fe8d3cc8ae814c0a98fa55256a4750b363" Jan 22 06:43:44 crc kubenswrapper[4933]: I0122 06:43:44.049135 4933 scope.go:117] "RemoveContainer" containerID="fae8ef10a28e0696cb316631fa019f7af6f538b3219301e90fb19dadf4071a9f" Jan 22 06:43:44 crc kubenswrapper[4933]: I0122 06:43:44.064148 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/751a1180-87cc-4c8a-abde-4b822d8016f2-utilities\") pod \"751a1180-87cc-4c8a-abde-4b822d8016f2\" (UID: \"751a1180-87cc-4c8a-abde-4b822d8016f2\") " Jan 22 06:43:44 crc kubenswrapper[4933]: I0122 06:43:44.064195 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wczmm\" (UniqueName: \"kubernetes.io/projected/751a1180-87cc-4c8a-abde-4b822d8016f2-kube-api-access-wczmm\") pod \"751a1180-87cc-4c8a-abde-4b822d8016f2\" (UID: \"751a1180-87cc-4c8a-abde-4b822d8016f2\") " Jan 22 06:43:44 crc kubenswrapper[4933]: I0122 06:43:44.064237 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/751a1180-87cc-4c8a-abde-4b822d8016f2-catalog-content\") pod \"751a1180-87cc-4c8a-abde-4b822d8016f2\" (UID: \"751a1180-87cc-4c8a-abde-4b822d8016f2\") " Jan 22 06:43:44 crc kubenswrapper[4933]: I0122 06:43:44.065438 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/751a1180-87cc-4c8a-abde-4b822d8016f2-utilities" (OuterVolumeSpecName: "utilities") pod "751a1180-87cc-4c8a-abde-4b822d8016f2" (UID: "751a1180-87cc-4c8a-abde-4b822d8016f2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:43:44 crc kubenswrapper[4933]: I0122 06:43:44.075575 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/751a1180-87cc-4c8a-abde-4b822d8016f2-kube-api-access-wczmm" (OuterVolumeSpecName: "kube-api-access-wczmm") pod "751a1180-87cc-4c8a-abde-4b822d8016f2" (UID: "751a1180-87cc-4c8a-abde-4b822d8016f2"). InnerVolumeSpecName "kube-api-access-wczmm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:43:44 crc kubenswrapper[4933]: I0122 06:43:44.075748 4933 scope.go:117] "RemoveContainer" containerID="7ee9e85368745aac5b5c77b97fc8290257060eaf89640016679c7dcc048c8e6f" Jan 22 06:43:44 crc kubenswrapper[4933]: E0122 06:43:44.076378 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ee9e85368745aac5b5c77b97fc8290257060eaf89640016679c7dcc048c8e6f\": container with ID starting with 7ee9e85368745aac5b5c77b97fc8290257060eaf89640016679c7dcc048c8e6f not found: ID does not exist" containerID="7ee9e85368745aac5b5c77b97fc8290257060eaf89640016679c7dcc048c8e6f" Jan 22 06:43:44 crc kubenswrapper[4933]: I0122 06:43:44.076480 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ee9e85368745aac5b5c77b97fc8290257060eaf89640016679c7dcc048c8e6f"} err="failed to get container status \"7ee9e85368745aac5b5c77b97fc8290257060eaf89640016679c7dcc048c8e6f\": rpc error: code = NotFound desc = could not find container \"7ee9e85368745aac5b5c77b97fc8290257060eaf89640016679c7dcc048c8e6f\": container with ID starting with 7ee9e85368745aac5b5c77b97fc8290257060eaf89640016679c7dcc048c8e6f not found: ID does not exist" Jan 22 06:43:44 crc kubenswrapper[4933]: I0122 06:43:44.076569 4933 scope.go:117] "RemoveContainer" containerID="fdfddddde22036ffb9f50018beec71fe8d3cc8ae814c0a98fa55256a4750b363" Jan 22 06:43:44 crc kubenswrapper[4933]: E0122 06:43:44.076988 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdfddddde22036ffb9f50018beec71fe8d3cc8ae814c0a98fa55256a4750b363\": container with ID starting with fdfddddde22036ffb9f50018beec71fe8d3cc8ae814c0a98fa55256a4750b363 not found: ID does not exist" containerID="fdfddddde22036ffb9f50018beec71fe8d3cc8ae814c0a98fa55256a4750b363" Jan 22 06:43:44 crc kubenswrapper[4933]: I0122 06:43:44.077056 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdfddddde22036ffb9f50018beec71fe8d3cc8ae814c0a98fa55256a4750b363"} err="failed to get container status \"fdfddddde22036ffb9f50018beec71fe8d3cc8ae814c0a98fa55256a4750b363\": rpc error: code = NotFound desc = could not find container \"fdfddddde22036ffb9f50018beec71fe8d3cc8ae814c0a98fa55256a4750b363\": container with ID starting with fdfddddde22036ffb9f50018beec71fe8d3cc8ae814c0a98fa55256a4750b363 not found: ID does not exist" Jan 22 06:43:44 crc kubenswrapper[4933]: I0122 06:43:44.077122 4933 scope.go:117] "RemoveContainer" containerID="fae8ef10a28e0696cb316631fa019f7af6f538b3219301e90fb19dadf4071a9f" Jan 22 06:43:44 crc kubenswrapper[4933]: E0122 06:43:44.077563 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fae8ef10a28e0696cb316631fa019f7af6f538b3219301e90fb19dadf4071a9f\": container with ID starting with fae8ef10a28e0696cb316631fa019f7af6f538b3219301e90fb19dadf4071a9f not found: ID does not exist" containerID="fae8ef10a28e0696cb316631fa019f7af6f538b3219301e90fb19dadf4071a9f" Jan 22 06:43:44 crc kubenswrapper[4933]: I0122 06:43:44.077674 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fae8ef10a28e0696cb316631fa019f7af6f538b3219301e90fb19dadf4071a9f"} err="failed to get container status \"fae8ef10a28e0696cb316631fa019f7af6f538b3219301e90fb19dadf4071a9f\": rpc error: code = NotFound desc = could not find container \"fae8ef10a28e0696cb316631fa019f7af6f538b3219301e90fb19dadf4071a9f\": container with ID starting with fae8ef10a28e0696cb316631fa019f7af6f538b3219301e90fb19dadf4071a9f not found: ID does not exist" Jan 22 06:43:44 crc kubenswrapper[4933]: I0122 06:43:44.141784 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/751a1180-87cc-4c8a-abde-4b822d8016f2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "751a1180-87cc-4c8a-abde-4b822d8016f2" (UID: "751a1180-87cc-4c8a-abde-4b822d8016f2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:43:44 crc kubenswrapper[4933]: I0122 06:43:44.165448 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/751a1180-87cc-4c8a-abde-4b822d8016f2-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:43:44 crc kubenswrapper[4933]: I0122 06:43:44.165489 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wczmm\" (UniqueName: \"kubernetes.io/projected/751a1180-87cc-4c8a-abde-4b822d8016f2-kube-api-access-wczmm\") on node \"crc\" DevicePath \"\"" Jan 22 06:43:44 crc kubenswrapper[4933]: I0122 06:43:44.165508 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/751a1180-87cc-4c8a-abde-4b822d8016f2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:43:44 crc kubenswrapper[4933]: I0122 06:43:44.288629 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-c7g62"] Jan 22 06:43:44 crc kubenswrapper[4933]: I0122 06:43:44.295175 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-c7g62"] Jan 22 06:43:44 crc kubenswrapper[4933]: I0122 06:43:44.508897 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="751a1180-87cc-4c8a-abde-4b822d8016f2" path="/var/lib/kubelet/pods/751a1180-87cc-4c8a-abde-4b822d8016f2/volumes" Jan 22 06:44:00 crc kubenswrapper[4933]: I0122 06:44:00.586899 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-n4qlq"] Jan 22 06:44:00 crc kubenswrapper[4933]: E0122 06:44:00.589458 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="751a1180-87cc-4c8a-abde-4b822d8016f2" containerName="extract-content" Jan 22 06:44:00 crc kubenswrapper[4933]: I0122 06:44:00.589511 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="751a1180-87cc-4c8a-abde-4b822d8016f2" containerName="extract-content" Jan 22 06:44:00 crc kubenswrapper[4933]: E0122 06:44:00.589545 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="751a1180-87cc-4c8a-abde-4b822d8016f2" containerName="registry-server" Jan 22 06:44:00 crc kubenswrapper[4933]: I0122 06:44:00.589565 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="751a1180-87cc-4c8a-abde-4b822d8016f2" containerName="registry-server" Jan 22 06:44:00 crc kubenswrapper[4933]: E0122 06:44:00.589592 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="751a1180-87cc-4c8a-abde-4b822d8016f2" containerName="extract-utilities" Jan 22 06:44:00 crc kubenswrapper[4933]: I0122 06:44:00.589610 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="751a1180-87cc-4c8a-abde-4b822d8016f2" containerName="extract-utilities" Jan 22 06:44:00 crc kubenswrapper[4933]: I0122 06:44:00.590067 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="751a1180-87cc-4c8a-abde-4b822d8016f2" containerName="registry-server" Jan 22 06:44:00 crc kubenswrapper[4933]: I0122 06:44:00.592393 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n4qlq" Jan 22 06:44:00 crc kubenswrapper[4933]: I0122 06:44:00.603322 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n4qlq"] Jan 22 06:44:00 crc kubenswrapper[4933]: I0122 06:44:00.765829 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52ddc498-f3be-412b-91d2-645e1f4b4835-utilities\") pod \"certified-operators-n4qlq\" (UID: \"52ddc498-f3be-412b-91d2-645e1f4b4835\") " pod="openshift-marketplace/certified-operators-n4qlq" Jan 22 06:44:00 crc kubenswrapper[4933]: I0122 06:44:00.765920 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dp9dp\" (UniqueName: \"kubernetes.io/projected/52ddc498-f3be-412b-91d2-645e1f4b4835-kube-api-access-dp9dp\") pod \"certified-operators-n4qlq\" (UID: \"52ddc498-f3be-412b-91d2-645e1f4b4835\") " pod="openshift-marketplace/certified-operators-n4qlq" Jan 22 06:44:00 crc kubenswrapper[4933]: I0122 06:44:00.766017 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52ddc498-f3be-412b-91d2-645e1f4b4835-catalog-content\") pod \"certified-operators-n4qlq\" (UID: \"52ddc498-f3be-412b-91d2-645e1f4b4835\") " pod="openshift-marketplace/certified-operators-n4qlq" Jan 22 06:44:00 crc kubenswrapper[4933]: I0122 06:44:00.867187 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52ddc498-f3be-412b-91d2-645e1f4b4835-utilities\") pod \"certified-operators-n4qlq\" (UID: \"52ddc498-f3be-412b-91d2-645e1f4b4835\") " pod="openshift-marketplace/certified-operators-n4qlq" Jan 22 06:44:00 crc kubenswrapper[4933]: I0122 06:44:00.867252 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dp9dp\" (UniqueName: \"kubernetes.io/projected/52ddc498-f3be-412b-91d2-645e1f4b4835-kube-api-access-dp9dp\") pod \"certified-operators-n4qlq\" (UID: \"52ddc498-f3be-412b-91d2-645e1f4b4835\") " pod="openshift-marketplace/certified-operators-n4qlq" Jan 22 06:44:00 crc kubenswrapper[4933]: I0122 06:44:00.867288 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52ddc498-f3be-412b-91d2-645e1f4b4835-catalog-content\") pod \"certified-operators-n4qlq\" (UID: \"52ddc498-f3be-412b-91d2-645e1f4b4835\") " pod="openshift-marketplace/certified-operators-n4qlq" Jan 22 06:44:00 crc kubenswrapper[4933]: I0122 06:44:00.867833 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52ddc498-f3be-412b-91d2-645e1f4b4835-utilities\") pod \"certified-operators-n4qlq\" (UID: \"52ddc498-f3be-412b-91d2-645e1f4b4835\") " pod="openshift-marketplace/certified-operators-n4qlq" Jan 22 06:44:00 crc kubenswrapper[4933]: I0122 06:44:00.867885 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52ddc498-f3be-412b-91d2-645e1f4b4835-catalog-content\") pod \"certified-operators-n4qlq\" (UID: \"52ddc498-f3be-412b-91d2-645e1f4b4835\") " pod="openshift-marketplace/certified-operators-n4qlq" Jan 22 06:44:00 crc kubenswrapper[4933]: I0122 06:44:00.895155 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dp9dp\" (UniqueName: \"kubernetes.io/projected/52ddc498-f3be-412b-91d2-645e1f4b4835-kube-api-access-dp9dp\") pod \"certified-operators-n4qlq\" (UID: \"52ddc498-f3be-412b-91d2-645e1f4b4835\") " pod="openshift-marketplace/certified-operators-n4qlq" Jan 22 06:44:00 crc kubenswrapper[4933]: I0122 06:44:00.932617 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n4qlq" Jan 22 06:44:01 crc kubenswrapper[4933]: I0122 06:44:01.432051 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n4qlq"] Jan 22 06:44:01 crc kubenswrapper[4933]: W0122 06:44:01.437008 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52ddc498_f3be_412b_91d2_645e1f4b4835.slice/crio-2f0997825e684d33736543311d5538d15a8bd04fbfa569c7fa21bbe0222018f4 WatchSource:0}: Error finding container 2f0997825e684d33736543311d5538d15a8bd04fbfa569c7fa21bbe0222018f4: Status 404 returned error can't find the container with id 2f0997825e684d33736543311d5538d15a8bd04fbfa569c7fa21bbe0222018f4 Jan 22 06:44:01 crc kubenswrapper[4933]: I0122 06:44:01.967011 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-d62d5"] Jan 22 06:44:01 crc kubenswrapper[4933]: I0122 06:44:01.969045 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d62d5" Jan 22 06:44:01 crc kubenswrapper[4933]: I0122 06:44:01.990124 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d62d5"] Jan 22 06:44:02 crc kubenswrapper[4933]: I0122 06:44:02.107796 4933 generic.go:334] "Generic (PLEG): container finished" podID="52ddc498-f3be-412b-91d2-645e1f4b4835" containerID="e5bd783623a8bf72fc1c56facc067673d23c95784388f06ebd019b4b6b39a88d" exitCode=0 Jan 22 06:44:02 crc kubenswrapper[4933]: I0122 06:44:02.107849 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n4qlq" event={"ID":"52ddc498-f3be-412b-91d2-645e1f4b4835","Type":"ContainerDied","Data":"e5bd783623a8bf72fc1c56facc067673d23c95784388f06ebd019b4b6b39a88d"} Jan 22 06:44:02 crc kubenswrapper[4933]: I0122 06:44:02.107877 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n4qlq" event={"ID":"52ddc498-f3be-412b-91d2-645e1f4b4835","Type":"ContainerStarted","Data":"2f0997825e684d33736543311d5538d15a8bd04fbfa569c7fa21bbe0222018f4"} Jan 22 06:44:02 crc kubenswrapper[4933]: I0122 06:44:02.114789 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/075cf213-0a91-4352-b3d5-9073e6b5ccba-catalog-content\") pod \"redhat-marketplace-d62d5\" (UID: \"075cf213-0a91-4352-b3d5-9073e6b5ccba\") " pod="openshift-marketplace/redhat-marketplace-d62d5" Jan 22 06:44:02 crc kubenswrapper[4933]: I0122 06:44:02.115201 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/075cf213-0a91-4352-b3d5-9073e6b5ccba-utilities\") pod \"redhat-marketplace-d62d5\" (UID: \"075cf213-0a91-4352-b3d5-9073e6b5ccba\") " pod="openshift-marketplace/redhat-marketplace-d62d5" Jan 22 06:44:02 crc kubenswrapper[4933]: I0122 06:44:02.115247 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nw4bv\" (UniqueName: \"kubernetes.io/projected/075cf213-0a91-4352-b3d5-9073e6b5ccba-kube-api-access-nw4bv\") pod \"redhat-marketplace-d62d5\" (UID: \"075cf213-0a91-4352-b3d5-9073e6b5ccba\") " pod="openshift-marketplace/redhat-marketplace-d62d5" Jan 22 06:44:02 crc kubenswrapper[4933]: I0122 06:44:02.216124 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nw4bv\" (UniqueName: \"kubernetes.io/projected/075cf213-0a91-4352-b3d5-9073e6b5ccba-kube-api-access-nw4bv\") pod \"redhat-marketplace-d62d5\" (UID: \"075cf213-0a91-4352-b3d5-9073e6b5ccba\") " pod="openshift-marketplace/redhat-marketplace-d62d5" Jan 22 06:44:02 crc kubenswrapper[4933]: I0122 06:44:02.216181 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/075cf213-0a91-4352-b3d5-9073e6b5ccba-catalog-content\") pod \"redhat-marketplace-d62d5\" (UID: \"075cf213-0a91-4352-b3d5-9073e6b5ccba\") " pod="openshift-marketplace/redhat-marketplace-d62d5" Jan 22 06:44:02 crc kubenswrapper[4933]: I0122 06:44:02.216264 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/075cf213-0a91-4352-b3d5-9073e6b5ccba-utilities\") pod \"redhat-marketplace-d62d5\" (UID: \"075cf213-0a91-4352-b3d5-9073e6b5ccba\") " pod="openshift-marketplace/redhat-marketplace-d62d5" Jan 22 06:44:02 crc kubenswrapper[4933]: I0122 06:44:02.216740 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/075cf213-0a91-4352-b3d5-9073e6b5ccba-utilities\") pod \"redhat-marketplace-d62d5\" (UID: \"075cf213-0a91-4352-b3d5-9073e6b5ccba\") " pod="openshift-marketplace/redhat-marketplace-d62d5" Jan 22 06:44:02 crc kubenswrapper[4933]: I0122 06:44:02.216912 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/075cf213-0a91-4352-b3d5-9073e6b5ccba-catalog-content\") pod \"redhat-marketplace-d62d5\" (UID: \"075cf213-0a91-4352-b3d5-9073e6b5ccba\") " pod="openshift-marketplace/redhat-marketplace-d62d5" Jan 22 06:44:02 crc kubenswrapper[4933]: I0122 06:44:02.239852 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nw4bv\" (UniqueName: \"kubernetes.io/projected/075cf213-0a91-4352-b3d5-9073e6b5ccba-kube-api-access-nw4bv\") pod \"redhat-marketplace-d62d5\" (UID: \"075cf213-0a91-4352-b3d5-9073e6b5ccba\") " pod="openshift-marketplace/redhat-marketplace-d62d5" Jan 22 06:44:02 crc kubenswrapper[4933]: I0122 06:44:02.310691 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d62d5" Jan 22 06:44:02 crc kubenswrapper[4933]: I0122 06:44:02.557156 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d62d5"] Jan 22 06:44:02 crc kubenswrapper[4933]: I0122 06:44:02.970214 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qnsgx"] Jan 22 06:44:02 crc kubenswrapper[4933]: I0122 06:44:02.972269 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qnsgx" Jan 22 06:44:02 crc kubenswrapper[4933]: I0122 06:44:02.978497 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qnsgx"] Jan 22 06:44:03 crc kubenswrapper[4933]: I0122 06:44:03.031987 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a182749-dece-407f-8fd0-2ce9fe079303-catalog-content\") pod \"redhat-operators-qnsgx\" (UID: \"2a182749-dece-407f-8fd0-2ce9fe079303\") " pod="openshift-marketplace/redhat-operators-qnsgx" Jan 22 06:44:03 crc kubenswrapper[4933]: I0122 06:44:03.032128 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrvh5\" (UniqueName: \"kubernetes.io/projected/2a182749-dece-407f-8fd0-2ce9fe079303-kube-api-access-vrvh5\") pod \"redhat-operators-qnsgx\" (UID: \"2a182749-dece-407f-8fd0-2ce9fe079303\") " pod="openshift-marketplace/redhat-operators-qnsgx" Jan 22 06:44:03 crc kubenswrapper[4933]: I0122 06:44:03.032167 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a182749-dece-407f-8fd0-2ce9fe079303-utilities\") pod \"redhat-operators-qnsgx\" (UID: \"2a182749-dece-407f-8fd0-2ce9fe079303\") " pod="openshift-marketplace/redhat-operators-qnsgx" Jan 22 06:44:03 crc kubenswrapper[4933]: I0122 06:44:03.125442 4933 generic.go:334] "Generic (PLEG): container finished" podID="075cf213-0a91-4352-b3d5-9073e6b5ccba" containerID="a46bc2b2b2d9a080f47fefe1ccd992ced9d196c8a06b72d00f2ba5749c162e93" exitCode=0 Jan 22 06:44:03 crc kubenswrapper[4933]: I0122 06:44:03.125518 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d62d5" event={"ID":"075cf213-0a91-4352-b3d5-9073e6b5ccba","Type":"ContainerDied","Data":"a46bc2b2b2d9a080f47fefe1ccd992ced9d196c8a06b72d00f2ba5749c162e93"} Jan 22 06:44:03 crc kubenswrapper[4933]: I0122 06:44:03.125550 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d62d5" event={"ID":"075cf213-0a91-4352-b3d5-9073e6b5ccba","Type":"ContainerStarted","Data":"a2db2be85285ce62ff197115bb23291aaa90da0e0b2d3808e4c308436e3e4949"} Jan 22 06:44:03 crc kubenswrapper[4933]: I0122 06:44:03.133107 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a182749-dece-407f-8fd0-2ce9fe079303-catalog-content\") pod \"redhat-operators-qnsgx\" (UID: \"2a182749-dece-407f-8fd0-2ce9fe079303\") " pod="openshift-marketplace/redhat-operators-qnsgx" Jan 22 06:44:03 crc kubenswrapper[4933]: I0122 06:44:03.133219 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrvh5\" (UniqueName: \"kubernetes.io/projected/2a182749-dece-407f-8fd0-2ce9fe079303-kube-api-access-vrvh5\") pod \"redhat-operators-qnsgx\" (UID: \"2a182749-dece-407f-8fd0-2ce9fe079303\") " pod="openshift-marketplace/redhat-operators-qnsgx" Jan 22 06:44:03 crc kubenswrapper[4933]: I0122 06:44:03.133261 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a182749-dece-407f-8fd0-2ce9fe079303-utilities\") pod \"redhat-operators-qnsgx\" (UID: \"2a182749-dece-407f-8fd0-2ce9fe079303\") " pod="openshift-marketplace/redhat-operators-qnsgx" Jan 22 06:44:03 crc kubenswrapper[4933]: I0122 06:44:03.133517 4933 generic.go:334] "Generic (PLEG): container finished" podID="52ddc498-f3be-412b-91d2-645e1f4b4835" containerID="b7d0092ff43d88862b3313cfed1572cc298aeb2ddccacfeaa7fcd14879fda77d" exitCode=0 Jan 22 06:44:03 crc kubenswrapper[4933]: I0122 06:44:03.133564 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n4qlq" event={"ID":"52ddc498-f3be-412b-91d2-645e1f4b4835","Type":"ContainerDied","Data":"b7d0092ff43d88862b3313cfed1572cc298aeb2ddccacfeaa7fcd14879fda77d"} Jan 22 06:44:03 crc kubenswrapper[4933]: I0122 06:44:03.133758 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a182749-dece-407f-8fd0-2ce9fe079303-utilities\") pod \"redhat-operators-qnsgx\" (UID: \"2a182749-dece-407f-8fd0-2ce9fe079303\") " pod="openshift-marketplace/redhat-operators-qnsgx" Jan 22 06:44:03 crc kubenswrapper[4933]: I0122 06:44:03.134179 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a182749-dece-407f-8fd0-2ce9fe079303-catalog-content\") pod \"redhat-operators-qnsgx\" (UID: \"2a182749-dece-407f-8fd0-2ce9fe079303\") " pod="openshift-marketplace/redhat-operators-qnsgx" Jan 22 06:44:03 crc kubenswrapper[4933]: I0122 06:44:03.170314 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrvh5\" (UniqueName: \"kubernetes.io/projected/2a182749-dece-407f-8fd0-2ce9fe079303-kube-api-access-vrvh5\") pod \"redhat-operators-qnsgx\" (UID: \"2a182749-dece-407f-8fd0-2ce9fe079303\") " pod="openshift-marketplace/redhat-operators-qnsgx" Jan 22 06:44:03 crc kubenswrapper[4933]: I0122 06:44:03.294132 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qnsgx" Jan 22 06:44:03 crc kubenswrapper[4933]: I0122 06:44:03.708701 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qnsgx"] Jan 22 06:44:03 crc kubenswrapper[4933]: W0122 06:44:03.723608 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2a182749_dece_407f_8fd0_2ce9fe079303.slice/crio-3304cbcdfd217275528a532953c396e25dc3ce853e29ab0c1789192d317dd740 WatchSource:0}: Error finding container 3304cbcdfd217275528a532953c396e25dc3ce853e29ab0c1789192d317dd740: Status 404 returned error can't find the container with id 3304cbcdfd217275528a532953c396e25dc3ce853e29ab0c1789192d317dd740 Jan 22 06:44:04 crc kubenswrapper[4933]: I0122 06:44:04.148671 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n4qlq" event={"ID":"52ddc498-f3be-412b-91d2-645e1f4b4835","Type":"ContainerStarted","Data":"3d40b018c01c9d16b31d199e1adb50777bdefe5fc9ff66ea732f0ba6e713a507"} Jan 22 06:44:04 crc kubenswrapper[4933]: I0122 06:44:04.150467 4933 generic.go:334] "Generic (PLEG): container finished" podID="075cf213-0a91-4352-b3d5-9073e6b5ccba" containerID="f6855d9f59e75bf1bc7b1365337d2c04e7329c987e9321ab7b356814402c9be3" exitCode=0 Jan 22 06:44:04 crc kubenswrapper[4933]: I0122 06:44:04.150528 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d62d5" event={"ID":"075cf213-0a91-4352-b3d5-9073e6b5ccba","Type":"ContainerDied","Data":"f6855d9f59e75bf1bc7b1365337d2c04e7329c987e9321ab7b356814402c9be3"} Jan 22 06:44:04 crc kubenswrapper[4933]: I0122 06:44:04.153708 4933 generic.go:334] "Generic (PLEG): container finished" podID="2a182749-dece-407f-8fd0-2ce9fe079303" containerID="b674556209247b36bb3affb06d8e8f02030518de4bb95f4253ead57c13c5eea3" exitCode=0 Jan 22 06:44:04 crc kubenswrapper[4933]: I0122 06:44:04.153733 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qnsgx" event={"ID":"2a182749-dece-407f-8fd0-2ce9fe079303","Type":"ContainerDied","Data":"b674556209247b36bb3affb06d8e8f02030518de4bb95f4253ead57c13c5eea3"} Jan 22 06:44:04 crc kubenswrapper[4933]: I0122 06:44:04.153751 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qnsgx" event={"ID":"2a182749-dece-407f-8fd0-2ce9fe079303","Type":"ContainerStarted","Data":"3304cbcdfd217275528a532953c396e25dc3ce853e29ab0c1789192d317dd740"} Jan 22 06:44:04 crc kubenswrapper[4933]: I0122 06:44:04.172956 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-n4qlq" podStartSLOduration=2.750592647 podStartE2EDuration="4.172939902s" podCreationTimestamp="2026-01-22 06:44:00 +0000 UTC" firstStartedPulling="2026-01-22 06:44:02.10916438 +0000 UTC m=+3489.946289753" lastFinishedPulling="2026-01-22 06:44:03.531511655 +0000 UTC m=+3491.368637008" observedRunningTime="2026-01-22 06:44:04.164577564 +0000 UTC m=+3492.001702927" watchObservedRunningTime="2026-01-22 06:44:04.172939902 +0000 UTC m=+3492.010065255" Jan 22 06:44:05 crc kubenswrapper[4933]: I0122 06:44:05.165956 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qnsgx" event={"ID":"2a182749-dece-407f-8fd0-2ce9fe079303","Type":"ContainerStarted","Data":"69773cd35bd2cb7ed85a7097f9d516b69b9586d48e115a657b4a350fcb10caaa"} Jan 22 06:44:05 crc kubenswrapper[4933]: I0122 06:44:05.170005 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d62d5" event={"ID":"075cf213-0a91-4352-b3d5-9073e6b5ccba","Type":"ContainerStarted","Data":"0ef4c04eb1f9234d2197b7260bdc338bfa97e9a1436eb00a830188941652fc5a"} Jan 22 06:44:05 crc kubenswrapper[4933]: I0122 06:44:05.220352 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-d62d5" podStartSLOduration=2.6627776 podStartE2EDuration="4.220331454s" podCreationTimestamp="2026-01-22 06:44:01 +0000 UTC" firstStartedPulling="2026-01-22 06:44:03.129128753 +0000 UTC m=+3490.966254106" lastFinishedPulling="2026-01-22 06:44:04.686682607 +0000 UTC m=+3492.523807960" observedRunningTime="2026-01-22 06:44:05.214055695 +0000 UTC m=+3493.051181068" watchObservedRunningTime="2026-01-22 06:44:05.220331454 +0000 UTC m=+3493.057456807" Jan 22 06:44:06 crc kubenswrapper[4933]: I0122 06:44:06.181206 4933 generic.go:334] "Generic (PLEG): container finished" podID="2a182749-dece-407f-8fd0-2ce9fe079303" containerID="69773cd35bd2cb7ed85a7097f9d516b69b9586d48e115a657b4a350fcb10caaa" exitCode=0 Jan 22 06:44:06 crc kubenswrapper[4933]: I0122 06:44:06.181303 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qnsgx" event={"ID":"2a182749-dece-407f-8fd0-2ce9fe079303","Type":"ContainerDied","Data":"69773cd35bd2cb7ed85a7097f9d516b69b9586d48e115a657b4a350fcb10caaa"} Jan 22 06:44:07 crc kubenswrapper[4933]: I0122 06:44:07.192866 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qnsgx" event={"ID":"2a182749-dece-407f-8fd0-2ce9fe079303","Type":"ContainerStarted","Data":"6a6ef3c202a177b5a5c5a758ebf2a1c8ba0a2d87df00e3cbe4d5c4a23afca80b"} Jan 22 06:44:07 crc kubenswrapper[4933]: I0122 06:44:07.217649 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qnsgx" podStartSLOduration=2.8093587700000002 podStartE2EDuration="5.217628792s" podCreationTimestamp="2026-01-22 06:44:02 +0000 UTC" firstStartedPulling="2026-01-22 06:44:04.154860504 +0000 UTC m=+3491.991985857" lastFinishedPulling="2026-01-22 06:44:06.563130516 +0000 UTC m=+3494.400255879" observedRunningTime="2026-01-22 06:44:07.215508712 +0000 UTC m=+3495.052634075" watchObservedRunningTime="2026-01-22 06:44:07.217628792 +0000 UTC m=+3495.054754155" Jan 22 06:44:10 crc kubenswrapper[4933]: I0122 06:44:10.933336 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-n4qlq" Jan 22 06:44:10 crc kubenswrapper[4933]: I0122 06:44:10.933595 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-n4qlq" Jan 22 06:44:10 crc kubenswrapper[4933]: I0122 06:44:10.989429 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-n4qlq" Jan 22 06:44:11 crc kubenswrapper[4933]: I0122 06:44:11.289360 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-n4qlq" Jan 22 06:44:11 crc kubenswrapper[4933]: I0122 06:44:11.951952 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-n4qlq"] Jan 22 06:44:12 crc kubenswrapper[4933]: I0122 06:44:12.311033 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-d62d5" Jan 22 06:44:12 crc kubenswrapper[4933]: I0122 06:44:12.311922 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-d62d5" Jan 22 06:44:12 crc kubenswrapper[4933]: I0122 06:44:12.360955 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-d62d5" Jan 22 06:44:13 crc kubenswrapper[4933]: I0122 06:44:13.242540 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-n4qlq" podUID="52ddc498-f3be-412b-91d2-645e1f4b4835" containerName="registry-server" containerID="cri-o://3d40b018c01c9d16b31d199e1adb50777bdefe5fc9ff66ea732f0ba6e713a507" gracePeriod=2 Jan 22 06:44:13 crc kubenswrapper[4933]: I0122 06:44:13.296532 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qnsgx" Jan 22 06:44:13 crc kubenswrapper[4933]: I0122 06:44:13.296713 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qnsgx" Jan 22 06:44:13 crc kubenswrapper[4933]: I0122 06:44:13.300591 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-d62d5" Jan 22 06:44:13 crc kubenswrapper[4933]: I0122 06:44:13.357023 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qnsgx" Jan 22 06:44:13 crc kubenswrapper[4933]: I0122 06:44:13.756185 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d62d5"] Jan 22 06:44:14 crc kubenswrapper[4933]: I0122 06:44:14.298609 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qnsgx" Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.158654 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qnsgx"] Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.264995 4933 generic.go:334] "Generic (PLEG): container finished" podID="52ddc498-f3be-412b-91d2-645e1f4b4835" containerID="3d40b018c01c9d16b31d199e1adb50777bdefe5fc9ff66ea732f0ba6e713a507" exitCode=0 Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.265155 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n4qlq" event={"ID":"52ddc498-f3be-412b-91d2-645e1f4b4835","Type":"ContainerDied","Data":"3d40b018c01c9d16b31d199e1adb50777bdefe5fc9ff66ea732f0ba6e713a507"} Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.265328 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-d62d5" podUID="075cf213-0a91-4352-b3d5-9073e6b5ccba" containerName="registry-server" containerID="cri-o://0ef4c04eb1f9234d2197b7260bdc338bfa97e9a1436eb00a830188941652fc5a" gracePeriod=2 Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.530085 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n4qlq" Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.541185 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52ddc498-f3be-412b-91d2-645e1f4b4835-catalog-content\") pod \"52ddc498-f3be-412b-91d2-645e1f4b4835\" (UID: \"52ddc498-f3be-412b-91d2-645e1f4b4835\") " Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.541340 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52ddc498-f3be-412b-91d2-645e1f4b4835-utilities\") pod \"52ddc498-f3be-412b-91d2-645e1f4b4835\" (UID: \"52ddc498-f3be-412b-91d2-645e1f4b4835\") " Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.541415 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dp9dp\" (UniqueName: \"kubernetes.io/projected/52ddc498-f3be-412b-91d2-645e1f4b4835-kube-api-access-dp9dp\") pod \"52ddc498-f3be-412b-91d2-645e1f4b4835\" (UID: \"52ddc498-f3be-412b-91d2-645e1f4b4835\") " Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.543820 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52ddc498-f3be-412b-91d2-645e1f4b4835-utilities" (OuterVolumeSpecName: "utilities") pod "52ddc498-f3be-412b-91d2-645e1f4b4835" (UID: "52ddc498-f3be-412b-91d2-645e1f4b4835"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.552323 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52ddc498-f3be-412b-91d2-645e1f4b4835-kube-api-access-dp9dp" (OuterVolumeSpecName: "kube-api-access-dp9dp") pod "52ddc498-f3be-412b-91d2-645e1f4b4835" (UID: "52ddc498-f3be-412b-91d2-645e1f4b4835"). InnerVolumeSpecName "kube-api-access-dp9dp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.603474 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52ddc498-f3be-412b-91d2-645e1f4b4835-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "52ddc498-f3be-412b-91d2-645e1f4b4835" (UID: "52ddc498-f3be-412b-91d2-645e1f4b4835"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.637560 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d62d5" Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.643240 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dp9dp\" (UniqueName: \"kubernetes.io/projected/52ddc498-f3be-412b-91d2-645e1f4b4835-kube-api-access-dp9dp\") on node \"crc\" DevicePath \"\"" Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.643272 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52ddc498-f3be-412b-91d2-645e1f4b4835-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.643281 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52ddc498-f3be-412b-91d2-645e1f4b4835-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.744696 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nw4bv\" (UniqueName: \"kubernetes.io/projected/075cf213-0a91-4352-b3d5-9073e6b5ccba-kube-api-access-nw4bv\") pod \"075cf213-0a91-4352-b3d5-9073e6b5ccba\" (UID: \"075cf213-0a91-4352-b3d5-9073e6b5ccba\") " Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.744942 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/075cf213-0a91-4352-b3d5-9073e6b5ccba-catalog-content\") pod \"075cf213-0a91-4352-b3d5-9073e6b5ccba\" (UID: \"075cf213-0a91-4352-b3d5-9073e6b5ccba\") " Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.744989 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/075cf213-0a91-4352-b3d5-9073e6b5ccba-utilities\") pod \"075cf213-0a91-4352-b3d5-9073e6b5ccba\" (UID: \"075cf213-0a91-4352-b3d5-9073e6b5ccba\") " Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.746257 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/075cf213-0a91-4352-b3d5-9073e6b5ccba-utilities" (OuterVolumeSpecName: "utilities") pod "075cf213-0a91-4352-b3d5-9073e6b5ccba" (UID: "075cf213-0a91-4352-b3d5-9073e6b5ccba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.758254 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/075cf213-0a91-4352-b3d5-9073e6b5ccba-kube-api-access-nw4bv" (OuterVolumeSpecName: "kube-api-access-nw4bv") pod "075cf213-0a91-4352-b3d5-9073e6b5ccba" (UID: "075cf213-0a91-4352-b3d5-9073e6b5ccba"). InnerVolumeSpecName "kube-api-access-nw4bv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.787410 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/075cf213-0a91-4352-b3d5-9073e6b5ccba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "075cf213-0a91-4352-b3d5-9073e6b5ccba" (UID: "075cf213-0a91-4352-b3d5-9073e6b5ccba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.847344 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/075cf213-0a91-4352-b3d5-9073e6b5ccba-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.847416 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nw4bv\" (UniqueName: \"kubernetes.io/projected/075cf213-0a91-4352-b3d5-9073e6b5ccba-kube-api-access-nw4bv\") on node \"crc\" DevicePath \"\"" Jan 22 06:44:15 crc kubenswrapper[4933]: I0122 06:44:15.847449 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/075cf213-0a91-4352-b3d5-9073e6b5ccba-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.282283 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n4qlq" event={"ID":"52ddc498-f3be-412b-91d2-645e1f4b4835","Type":"ContainerDied","Data":"2f0997825e684d33736543311d5538d15a8bd04fbfa569c7fa21bbe0222018f4"} Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.282340 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n4qlq" Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.282349 4933 scope.go:117] "RemoveContainer" containerID="3d40b018c01c9d16b31d199e1adb50777bdefe5fc9ff66ea732f0ba6e713a507" Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.285569 4933 generic.go:334] "Generic (PLEG): container finished" podID="075cf213-0a91-4352-b3d5-9073e6b5ccba" containerID="0ef4c04eb1f9234d2197b7260bdc338bfa97e9a1436eb00a830188941652fc5a" exitCode=0 Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.285830 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qnsgx" podUID="2a182749-dece-407f-8fd0-2ce9fe079303" containerName="registry-server" containerID="cri-o://6a6ef3c202a177b5a5c5a758ebf2a1c8ba0a2d87df00e3cbe4d5c4a23afca80b" gracePeriod=2 Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.286219 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d62d5" event={"ID":"075cf213-0a91-4352-b3d5-9073e6b5ccba","Type":"ContainerDied","Data":"0ef4c04eb1f9234d2197b7260bdc338bfa97e9a1436eb00a830188941652fc5a"} Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.286281 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d62d5" Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.286299 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d62d5" event={"ID":"075cf213-0a91-4352-b3d5-9073e6b5ccba","Type":"ContainerDied","Data":"a2db2be85285ce62ff197115bb23291aaa90da0e0b2d3808e4c308436e3e4949"} Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.325387 4933 scope.go:117] "RemoveContainer" containerID="b7d0092ff43d88862b3313cfed1572cc298aeb2ddccacfeaa7fcd14879fda77d" Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.348105 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-n4qlq"] Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.358170 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-n4qlq"] Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.364350 4933 scope.go:117] "RemoveContainer" containerID="e5bd783623a8bf72fc1c56facc067673d23c95784388f06ebd019b4b6b39a88d" Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.371362 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d62d5"] Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.376467 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-d62d5"] Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.388272 4933 scope.go:117] "RemoveContainer" containerID="0ef4c04eb1f9234d2197b7260bdc338bfa97e9a1436eb00a830188941652fc5a" Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.432362 4933 scope.go:117] "RemoveContainer" containerID="f6855d9f59e75bf1bc7b1365337d2c04e7329c987e9321ab7b356814402c9be3" Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.449795 4933 scope.go:117] "RemoveContainer" containerID="a46bc2b2b2d9a080f47fefe1ccd992ced9d196c8a06b72d00f2ba5749c162e93" Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.503675 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="075cf213-0a91-4352-b3d5-9073e6b5ccba" path="/var/lib/kubelet/pods/075cf213-0a91-4352-b3d5-9073e6b5ccba/volumes" Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.504801 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52ddc498-f3be-412b-91d2-645e1f4b4835" path="/var/lib/kubelet/pods/52ddc498-f3be-412b-91d2-645e1f4b4835/volumes" Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.840826 4933 scope.go:117] "RemoveContainer" containerID="0ef4c04eb1f9234d2197b7260bdc338bfa97e9a1436eb00a830188941652fc5a" Jan 22 06:44:16 crc kubenswrapper[4933]: E0122 06:44:16.841632 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ef4c04eb1f9234d2197b7260bdc338bfa97e9a1436eb00a830188941652fc5a\": container with ID starting with 0ef4c04eb1f9234d2197b7260bdc338bfa97e9a1436eb00a830188941652fc5a not found: ID does not exist" containerID="0ef4c04eb1f9234d2197b7260bdc338bfa97e9a1436eb00a830188941652fc5a" Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.841680 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ef4c04eb1f9234d2197b7260bdc338bfa97e9a1436eb00a830188941652fc5a"} err="failed to get container status \"0ef4c04eb1f9234d2197b7260bdc338bfa97e9a1436eb00a830188941652fc5a\": rpc error: code = NotFound desc = could not find container \"0ef4c04eb1f9234d2197b7260bdc338bfa97e9a1436eb00a830188941652fc5a\": container with ID starting with 0ef4c04eb1f9234d2197b7260bdc338bfa97e9a1436eb00a830188941652fc5a not found: ID does not exist" Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.841709 4933 scope.go:117] "RemoveContainer" containerID="f6855d9f59e75bf1bc7b1365337d2c04e7329c987e9321ab7b356814402c9be3" Jan 22 06:44:16 crc kubenswrapper[4933]: E0122 06:44:16.842218 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6855d9f59e75bf1bc7b1365337d2c04e7329c987e9321ab7b356814402c9be3\": container with ID starting with f6855d9f59e75bf1bc7b1365337d2c04e7329c987e9321ab7b356814402c9be3 not found: ID does not exist" containerID="f6855d9f59e75bf1bc7b1365337d2c04e7329c987e9321ab7b356814402c9be3" Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.842248 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6855d9f59e75bf1bc7b1365337d2c04e7329c987e9321ab7b356814402c9be3"} err="failed to get container status \"f6855d9f59e75bf1bc7b1365337d2c04e7329c987e9321ab7b356814402c9be3\": rpc error: code = NotFound desc = could not find container \"f6855d9f59e75bf1bc7b1365337d2c04e7329c987e9321ab7b356814402c9be3\": container with ID starting with f6855d9f59e75bf1bc7b1365337d2c04e7329c987e9321ab7b356814402c9be3 not found: ID does not exist" Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.842264 4933 scope.go:117] "RemoveContainer" containerID="a46bc2b2b2d9a080f47fefe1ccd992ced9d196c8a06b72d00f2ba5749c162e93" Jan 22 06:44:16 crc kubenswrapper[4933]: E0122 06:44:16.842667 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a46bc2b2b2d9a080f47fefe1ccd992ced9d196c8a06b72d00f2ba5749c162e93\": container with ID starting with a46bc2b2b2d9a080f47fefe1ccd992ced9d196c8a06b72d00f2ba5749c162e93 not found: ID does not exist" containerID="a46bc2b2b2d9a080f47fefe1ccd992ced9d196c8a06b72d00f2ba5749c162e93" Jan 22 06:44:16 crc kubenswrapper[4933]: I0122 06:44:16.842707 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a46bc2b2b2d9a080f47fefe1ccd992ced9d196c8a06b72d00f2ba5749c162e93"} err="failed to get container status \"a46bc2b2b2d9a080f47fefe1ccd992ced9d196c8a06b72d00f2ba5749c162e93\": rpc error: code = NotFound desc = could not find container \"a46bc2b2b2d9a080f47fefe1ccd992ced9d196c8a06b72d00f2ba5749c162e93\": container with ID starting with a46bc2b2b2d9a080f47fefe1ccd992ced9d196c8a06b72d00f2ba5749c162e93 not found: ID does not exist" Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.237472 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qnsgx" Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.270585 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vrvh5\" (UniqueName: \"kubernetes.io/projected/2a182749-dece-407f-8fd0-2ce9fe079303-kube-api-access-vrvh5\") pod \"2a182749-dece-407f-8fd0-2ce9fe079303\" (UID: \"2a182749-dece-407f-8fd0-2ce9fe079303\") " Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.270691 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a182749-dece-407f-8fd0-2ce9fe079303-utilities\") pod \"2a182749-dece-407f-8fd0-2ce9fe079303\" (UID: \"2a182749-dece-407f-8fd0-2ce9fe079303\") " Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.270735 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a182749-dece-407f-8fd0-2ce9fe079303-catalog-content\") pod \"2a182749-dece-407f-8fd0-2ce9fe079303\" (UID: \"2a182749-dece-407f-8fd0-2ce9fe079303\") " Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.272634 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a182749-dece-407f-8fd0-2ce9fe079303-utilities" (OuterVolumeSpecName: "utilities") pod "2a182749-dece-407f-8fd0-2ce9fe079303" (UID: "2a182749-dece-407f-8fd0-2ce9fe079303"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.275287 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a182749-dece-407f-8fd0-2ce9fe079303-kube-api-access-vrvh5" (OuterVolumeSpecName: "kube-api-access-vrvh5") pod "2a182749-dece-407f-8fd0-2ce9fe079303" (UID: "2a182749-dece-407f-8fd0-2ce9fe079303"). InnerVolumeSpecName "kube-api-access-vrvh5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.299427 4933 generic.go:334] "Generic (PLEG): container finished" podID="2a182749-dece-407f-8fd0-2ce9fe079303" containerID="6a6ef3c202a177b5a5c5a758ebf2a1c8ba0a2d87df00e3cbe4d5c4a23afca80b" exitCode=0 Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.299472 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qnsgx" event={"ID":"2a182749-dece-407f-8fd0-2ce9fe079303","Type":"ContainerDied","Data":"6a6ef3c202a177b5a5c5a758ebf2a1c8ba0a2d87df00e3cbe4d5c4a23afca80b"} Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.299502 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qnsgx" event={"ID":"2a182749-dece-407f-8fd0-2ce9fe079303","Type":"ContainerDied","Data":"3304cbcdfd217275528a532953c396e25dc3ce853e29ab0c1789192d317dd740"} Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.299524 4933 scope.go:117] "RemoveContainer" containerID="6a6ef3c202a177b5a5c5a758ebf2a1c8ba0a2d87df00e3cbe4d5c4a23afca80b" Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.299583 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qnsgx" Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.323227 4933 scope.go:117] "RemoveContainer" containerID="69773cd35bd2cb7ed85a7097f9d516b69b9586d48e115a657b4a350fcb10caaa" Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.372871 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrvh5\" (UniqueName: \"kubernetes.io/projected/2a182749-dece-407f-8fd0-2ce9fe079303-kube-api-access-vrvh5\") on node \"crc\" DevicePath \"\"" Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.372926 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a182749-dece-407f-8fd0-2ce9fe079303-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.416737 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a182749-dece-407f-8fd0-2ce9fe079303-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2a182749-dece-407f-8fd0-2ce9fe079303" (UID: "2a182749-dece-407f-8fd0-2ce9fe079303"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.474132 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a182749-dece-407f-8fd0-2ce9fe079303-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.661626 4933 scope.go:117] "RemoveContainer" containerID="b674556209247b36bb3affb06d8e8f02030518de4bb95f4253ead57c13c5eea3" Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.684676 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qnsgx"] Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.692125 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qnsgx"] Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.714032 4933 scope.go:117] "RemoveContainer" containerID="6a6ef3c202a177b5a5c5a758ebf2a1c8ba0a2d87df00e3cbe4d5c4a23afca80b" Jan 22 06:44:17 crc kubenswrapper[4933]: E0122 06:44:17.714674 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a6ef3c202a177b5a5c5a758ebf2a1c8ba0a2d87df00e3cbe4d5c4a23afca80b\": container with ID starting with 6a6ef3c202a177b5a5c5a758ebf2a1c8ba0a2d87df00e3cbe4d5c4a23afca80b not found: ID does not exist" containerID="6a6ef3c202a177b5a5c5a758ebf2a1c8ba0a2d87df00e3cbe4d5c4a23afca80b" Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.714777 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a6ef3c202a177b5a5c5a758ebf2a1c8ba0a2d87df00e3cbe4d5c4a23afca80b"} err="failed to get container status \"6a6ef3c202a177b5a5c5a758ebf2a1c8ba0a2d87df00e3cbe4d5c4a23afca80b\": rpc error: code = NotFound desc = could not find container \"6a6ef3c202a177b5a5c5a758ebf2a1c8ba0a2d87df00e3cbe4d5c4a23afca80b\": container with ID starting with 6a6ef3c202a177b5a5c5a758ebf2a1c8ba0a2d87df00e3cbe4d5c4a23afca80b not found: ID does not exist" Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.714850 4933 scope.go:117] "RemoveContainer" containerID="69773cd35bd2cb7ed85a7097f9d516b69b9586d48e115a657b4a350fcb10caaa" Jan 22 06:44:17 crc kubenswrapper[4933]: E0122 06:44:17.715463 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69773cd35bd2cb7ed85a7097f9d516b69b9586d48e115a657b4a350fcb10caaa\": container with ID starting with 69773cd35bd2cb7ed85a7097f9d516b69b9586d48e115a657b4a350fcb10caaa not found: ID does not exist" containerID="69773cd35bd2cb7ed85a7097f9d516b69b9586d48e115a657b4a350fcb10caaa" Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.715521 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69773cd35bd2cb7ed85a7097f9d516b69b9586d48e115a657b4a350fcb10caaa"} err="failed to get container status \"69773cd35bd2cb7ed85a7097f9d516b69b9586d48e115a657b4a350fcb10caaa\": rpc error: code = NotFound desc = could not find container \"69773cd35bd2cb7ed85a7097f9d516b69b9586d48e115a657b4a350fcb10caaa\": container with ID starting with 69773cd35bd2cb7ed85a7097f9d516b69b9586d48e115a657b4a350fcb10caaa not found: ID does not exist" Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.715558 4933 scope.go:117] "RemoveContainer" containerID="b674556209247b36bb3affb06d8e8f02030518de4bb95f4253ead57c13c5eea3" Jan 22 06:44:17 crc kubenswrapper[4933]: E0122 06:44:17.715945 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b674556209247b36bb3affb06d8e8f02030518de4bb95f4253ead57c13c5eea3\": container with ID starting with b674556209247b36bb3affb06d8e8f02030518de4bb95f4253ead57c13c5eea3 not found: ID does not exist" containerID="b674556209247b36bb3affb06d8e8f02030518de4bb95f4253ead57c13c5eea3" Jan 22 06:44:17 crc kubenswrapper[4933]: I0122 06:44:17.716212 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b674556209247b36bb3affb06d8e8f02030518de4bb95f4253ead57c13c5eea3"} err="failed to get container status \"b674556209247b36bb3affb06d8e8f02030518de4bb95f4253ead57c13c5eea3\": rpc error: code = NotFound desc = could not find container \"b674556209247b36bb3affb06d8e8f02030518de4bb95f4253ead57c13c5eea3\": container with ID starting with b674556209247b36bb3affb06d8e8f02030518de4bb95f4253ead57c13c5eea3 not found: ID does not exist" Jan 22 06:44:17 crc kubenswrapper[4933]: E0122 06:44:17.789799 4933 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2a182749_dece_407f_8fd0_2ce9fe079303.slice\": RecentStats: unable to find data in memory cache]" Jan 22 06:44:18 crc kubenswrapper[4933]: I0122 06:44:18.508148 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a182749-dece-407f-8fd0-2ce9fe079303" path="/var/lib/kubelet/pods/2a182749-dece-407f-8fd0-2ce9fe079303/volumes" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.188793 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt"] Jan 22 06:45:00 crc kubenswrapper[4933]: E0122 06:45:00.191116 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52ddc498-f3be-412b-91d2-645e1f4b4835" containerName="extract-utilities" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.191137 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="52ddc498-f3be-412b-91d2-645e1f4b4835" containerName="extract-utilities" Jan 22 06:45:00 crc kubenswrapper[4933]: E0122 06:45:00.191150 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="075cf213-0a91-4352-b3d5-9073e6b5ccba" containerName="registry-server" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.191157 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="075cf213-0a91-4352-b3d5-9073e6b5ccba" containerName="registry-server" Jan 22 06:45:00 crc kubenswrapper[4933]: E0122 06:45:00.191181 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a182749-dece-407f-8fd0-2ce9fe079303" containerName="extract-content" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.191190 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a182749-dece-407f-8fd0-2ce9fe079303" containerName="extract-content" Jan 22 06:45:00 crc kubenswrapper[4933]: E0122 06:45:00.191204 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="075cf213-0a91-4352-b3d5-9073e6b5ccba" containerName="extract-content" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.191211 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="075cf213-0a91-4352-b3d5-9073e6b5ccba" containerName="extract-content" Jan 22 06:45:00 crc kubenswrapper[4933]: E0122 06:45:00.191224 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a182749-dece-407f-8fd0-2ce9fe079303" containerName="extract-utilities" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.191231 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a182749-dece-407f-8fd0-2ce9fe079303" containerName="extract-utilities" Jan 22 06:45:00 crc kubenswrapper[4933]: E0122 06:45:00.191243 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a182749-dece-407f-8fd0-2ce9fe079303" containerName="registry-server" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.191250 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a182749-dece-407f-8fd0-2ce9fe079303" containerName="registry-server" Jan 22 06:45:00 crc kubenswrapper[4933]: E0122 06:45:00.191261 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52ddc498-f3be-412b-91d2-645e1f4b4835" containerName="registry-server" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.191269 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="52ddc498-f3be-412b-91d2-645e1f4b4835" containerName="registry-server" Jan 22 06:45:00 crc kubenswrapper[4933]: E0122 06:45:00.191280 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52ddc498-f3be-412b-91d2-645e1f4b4835" containerName="extract-content" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.191287 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="52ddc498-f3be-412b-91d2-645e1f4b4835" containerName="extract-content" Jan 22 06:45:00 crc kubenswrapper[4933]: E0122 06:45:00.191308 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="075cf213-0a91-4352-b3d5-9073e6b5ccba" containerName="extract-utilities" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.191315 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="075cf213-0a91-4352-b3d5-9073e6b5ccba" containerName="extract-utilities" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.191483 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a182749-dece-407f-8fd0-2ce9fe079303" containerName="registry-server" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.191500 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="52ddc498-f3be-412b-91d2-645e1f4b4835" containerName="registry-server" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.191512 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="075cf213-0a91-4352-b3d5-9073e6b5ccba" containerName="registry-server" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.192095 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.194691 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.198564 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.207301 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt"] Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.292366 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8cc8f64f-7f52-4255-b849-b2bad1f4dd09-secret-volume\") pod \"collect-profiles-29484405-bvspt\" (UID: \"8cc8f64f-7f52-4255-b849-b2bad1f4dd09\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.292459 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8cc8f64f-7f52-4255-b849-b2bad1f4dd09-config-volume\") pod \"collect-profiles-29484405-bvspt\" (UID: \"8cc8f64f-7f52-4255-b849-b2bad1f4dd09\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.292488 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8mv9\" (UniqueName: \"kubernetes.io/projected/8cc8f64f-7f52-4255-b849-b2bad1f4dd09-kube-api-access-g8mv9\") pod \"collect-profiles-29484405-bvspt\" (UID: \"8cc8f64f-7f52-4255-b849-b2bad1f4dd09\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.393577 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8cc8f64f-7f52-4255-b849-b2bad1f4dd09-config-volume\") pod \"collect-profiles-29484405-bvspt\" (UID: \"8cc8f64f-7f52-4255-b849-b2bad1f4dd09\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.393629 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8mv9\" (UniqueName: \"kubernetes.io/projected/8cc8f64f-7f52-4255-b849-b2bad1f4dd09-kube-api-access-g8mv9\") pod \"collect-profiles-29484405-bvspt\" (UID: \"8cc8f64f-7f52-4255-b849-b2bad1f4dd09\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.393687 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8cc8f64f-7f52-4255-b849-b2bad1f4dd09-secret-volume\") pod \"collect-profiles-29484405-bvspt\" (UID: \"8cc8f64f-7f52-4255-b849-b2bad1f4dd09\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.395288 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8cc8f64f-7f52-4255-b849-b2bad1f4dd09-config-volume\") pod \"collect-profiles-29484405-bvspt\" (UID: \"8cc8f64f-7f52-4255-b849-b2bad1f4dd09\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.400009 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8cc8f64f-7f52-4255-b849-b2bad1f4dd09-secret-volume\") pod \"collect-profiles-29484405-bvspt\" (UID: \"8cc8f64f-7f52-4255-b849-b2bad1f4dd09\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.410943 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8mv9\" (UniqueName: \"kubernetes.io/projected/8cc8f64f-7f52-4255-b849-b2bad1f4dd09-kube-api-access-g8mv9\") pod \"collect-profiles-29484405-bvspt\" (UID: \"8cc8f64f-7f52-4255-b849-b2bad1f4dd09\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.512499 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt" Jan 22 06:45:00 crc kubenswrapper[4933]: I0122 06:45:00.792989 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt"] Jan 22 06:45:01 crc kubenswrapper[4933]: I0122 06:45:01.688786 4933 generic.go:334] "Generic (PLEG): container finished" podID="8cc8f64f-7f52-4255-b849-b2bad1f4dd09" containerID="d7881c8672504e8aabb87d5ab3f0a9d504b4f1eeff09bf0cd46c45e5d454eb11" exitCode=0 Jan 22 06:45:01 crc kubenswrapper[4933]: I0122 06:45:01.688831 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt" event={"ID":"8cc8f64f-7f52-4255-b849-b2bad1f4dd09","Type":"ContainerDied","Data":"d7881c8672504e8aabb87d5ab3f0a9d504b4f1eeff09bf0cd46c45e5d454eb11"} Jan 22 06:45:01 crc kubenswrapper[4933]: I0122 06:45:01.688861 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt" event={"ID":"8cc8f64f-7f52-4255-b849-b2bad1f4dd09","Type":"ContainerStarted","Data":"df0ace85984993e656235dda1a46df38e114285542ce5f981aeaeade0182860c"} Jan 22 06:45:02 crc kubenswrapper[4933]: I0122 06:45:02.993978 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt" Jan 22 06:45:03 crc kubenswrapper[4933]: I0122 06:45:03.135718 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8mv9\" (UniqueName: \"kubernetes.io/projected/8cc8f64f-7f52-4255-b849-b2bad1f4dd09-kube-api-access-g8mv9\") pod \"8cc8f64f-7f52-4255-b849-b2bad1f4dd09\" (UID: \"8cc8f64f-7f52-4255-b849-b2bad1f4dd09\") " Jan 22 06:45:03 crc kubenswrapper[4933]: I0122 06:45:03.135881 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8cc8f64f-7f52-4255-b849-b2bad1f4dd09-secret-volume\") pod \"8cc8f64f-7f52-4255-b849-b2bad1f4dd09\" (UID: \"8cc8f64f-7f52-4255-b849-b2bad1f4dd09\") " Jan 22 06:45:03 crc kubenswrapper[4933]: I0122 06:45:03.135911 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8cc8f64f-7f52-4255-b849-b2bad1f4dd09-config-volume\") pod \"8cc8f64f-7f52-4255-b849-b2bad1f4dd09\" (UID: \"8cc8f64f-7f52-4255-b849-b2bad1f4dd09\") " Jan 22 06:45:03 crc kubenswrapper[4933]: I0122 06:45:03.136882 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cc8f64f-7f52-4255-b849-b2bad1f4dd09-config-volume" (OuterVolumeSpecName: "config-volume") pod "8cc8f64f-7f52-4255-b849-b2bad1f4dd09" (UID: "8cc8f64f-7f52-4255-b849-b2bad1f4dd09"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:45:03 crc kubenswrapper[4933]: I0122 06:45:03.141571 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cc8f64f-7f52-4255-b849-b2bad1f4dd09-kube-api-access-g8mv9" (OuterVolumeSpecName: "kube-api-access-g8mv9") pod "8cc8f64f-7f52-4255-b849-b2bad1f4dd09" (UID: "8cc8f64f-7f52-4255-b849-b2bad1f4dd09"). InnerVolumeSpecName "kube-api-access-g8mv9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:45:03 crc kubenswrapper[4933]: I0122 06:45:03.142641 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cc8f64f-7f52-4255-b849-b2bad1f4dd09-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8cc8f64f-7f52-4255-b849-b2bad1f4dd09" (UID: "8cc8f64f-7f52-4255-b849-b2bad1f4dd09"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:45:03 crc kubenswrapper[4933]: I0122 06:45:03.237156 4933 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8cc8f64f-7f52-4255-b849-b2bad1f4dd09-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 06:45:03 crc kubenswrapper[4933]: I0122 06:45:03.237183 4933 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8cc8f64f-7f52-4255-b849-b2bad1f4dd09-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 06:45:03 crc kubenswrapper[4933]: I0122 06:45:03.237193 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8mv9\" (UniqueName: \"kubernetes.io/projected/8cc8f64f-7f52-4255-b849-b2bad1f4dd09-kube-api-access-g8mv9\") on node \"crc\" DevicePath \"\"" Jan 22 06:45:03 crc kubenswrapper[4933]: I0122 06:45:03.711134 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt" event={"ID":"8cc8f64f-7f52-4255-b849-b2bad1f4dd09","Type":"ContainerDied","Data":"df0ace85984993e656235dda1a46df38e114285542ce5f981aeaeade0182860c"} Jan 22 06:45:03 crc kubenswrapper[4933]: I0122 06:45:03.711392 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="df0ace85984993e656235dda1a46df38e114285542ce5f981aeaeade0182860c" Jan 22 06:45:03 crc kubenswrapper[4933]: I0122 06:45:03.711280 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt" Jan 22 06:45:04 crc kubenswrapper[4933]: I0122 06:45:04.091235 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4"] Jan 22 06:45:04 crc kubenswrapper[4933]: I0122 06:45:04.101194 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484360-dkfb4"] Jan 22 06:45:04 crc kubenswrapper[4933]: I0122 06:45:04.507424 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="267706ea-2ceb-4ba1-a923-9b82f27a8ddf" path="/var/lib/kubelet/pods/267706ea-2ceb-4ba1-a923-9b82f27a8ddf/volumes" Jan 22 06:45:40 crc kubenswrapper[4933]: I0122 06:45:40.942920 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:45:40 crc kubenswrapper[4933]: I0122 06:45:40.943551 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:46:03 crc kubenswrapper[4933]: I0122 06:46:03.274408 4933 scope.go:117] "RemoveContainer" containerID="759867e7333ddc7383957cb9e7346c3d5215bbb832c39084d30e0885d2a20918" Jan 22 06:46:10 crc kubenswrapper[4933]: I0122 06:46:10.943573 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:46:10 crc kubenswrapper[4933]: I0122 06:46:10.944327 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:46:40 crc kubenswrapper[4933]: I0122 06:46:40.943547 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:46:40 crc kubenswrapper[4933]: I0122 06:46:40.944197 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:46:40 crc kubenswrapper[4933]: I0122 06:46:40.944255 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 06:46:40 crc kubenswrapper[4933]: I0122 06:46:40.944980 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"79ef21632b14ac76fe171d79c1f273dc38302cc41d7397ad9ad0c62b8ec99f30"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:46:40 crc kubenswrapper[4933]: I0122 06:46:40.945041 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://79ef21632b14ac76fe171d79c1f273dc38302cc41d7397ad9ad0c62b8ec99f30" gracePeriod=600 Jan 22 06:46:41 crc kubenswrapper[4933]: I0122 06:46:41.560596 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="79ef21632b14ac76fe171d79c1f273dc38302cc41d7397ad9ad0c62b8ec99f30" exitCode=0 Jan 22 06:46:41 crc kubenswrapper[4933]: I0122 06:46:41.560659 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"79ef21632b14ac76fe171d79c1f273dc38302cc41d7397ad9ad0c62b8ec99f30"} Jan 22 06:46:41 crc kubenswrapper[4933]: I0122 06:46:41.560960 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8"} Jan 22 06:46:41 crc kubenswrapper[4933]: I0122 06:46:41.560978 4933 scope.go:117] "RemoveContainer" containerID="23ddd73fbcac7612e5d13f424d3f1cd3924020e592e3e838e891b8ea94bbc718" Jan 22 06:49:10 crc kubenswrapper[4933]: I0122 06:49:10.942856 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:49:10 crc kubenswrapper[4933]: I0122 06:49:10.943506 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:49:40 crc kubenswrapper[4933]: I0122 06:49:40.943264 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:49:40 crc kubenswrapper[4933]: I0122 06:49:40.943990 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:50:10 crc kubenswrapper[4933]: I0122 06:50:10.943226 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:50:10 crc kubenswrapper[4933]: I0122 06:50:10.945305 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:50:10 crc kubenswrapper[4933]: I0122 06:50:10.945505 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 06:50:10 crc kubenswrapper[4933]: I0122 06:50:10.946365 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:50:10 crc kubenswrapper[4933]: I0122 06:50:10.946549 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" gracePeriod=600 Jan 22 06:50:11 crc kubenswrapper[4933]: E0122 06:50:11.069146 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:50:11 crc kubenswrapper[4933]: I0122 06:50:11.474241 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" exitCode=0 Jan 22 06:50:11 crc kubenswrapper[4933]: I0122 06:50:11.474308 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8"} Jan 22 06:50:11 crc kubenswrapper[4933]: I0122 06:50:11.474354 4933 scope.go:117] "RemoveContainer" containerID="79ef21632b14ac76fe171d79c1f273dc38302cc41d7397ad9ad0c62b8ec99f30" Jan 22 06:50:11 crc kubenswrapper[4933]: I0122 06:50:11.475212 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:50:11 crc kubenswrapper[4933]: E0122 06:50:11.475733 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:50:22 crc kubenswrapper[4933]: I0122 06:50:22.498181 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:50:22 crc kubenswrapper[4933]: E0122 06:50:22.499161 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:50:37 crc kubenswrapper[4933]: I0122 06:50:37.490902 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:50:37 crc kubenswrapper[4933]: E0122 06:50:37.491957 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:50:49 crc kubenswrapper[4933]: I0122 06:50:49.490982 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:50:49 crc kubenswrapper[4933]: E0122 06:50:49.491971 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:51:02 crc kubenswrapper[4933]: I0122 06:51:02.498010 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:51:02 crc kubenswrapper[4933]: E0122 06:51:02.499357 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:51:14 crc kubenswrapper[4933]: I0122 06:51:14.490542 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:51:14 crc kubenswrapper[4933]: E0122 06:51:14.491138 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:51:26 crc kubenswrapper[4933]: I0122 06:51:26.490885 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:51:26 crc kubenswrapper[4933]: E0122 06:51:26.491790 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:51:41 crc kubenswrapper[4933]: I0122 06:51:41.491432 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:51:41 crc kubenswrapper[4933]: E0122 06:51:41.492553 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:51:56 crc kubenswrapper[4933]: I0122 06:51:56.491117 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:51:56 crc kubenswrapper[4933]: E0122 06:51:56.491959 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:52:11 crc kubenswrapper[4933]: I0122 06:52:11.491242 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:52:11 crc kubenswrapper[4933]: E0122 06:52:11.492109 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:52:23 crc kubenswrapper[4933]: I0122 06:52:23.490370 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:52:23 crc kubenswrapper[4933]: E0122 06:52:23.491385 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:52:34 crc kubenswrapper[4933]: I0122 06:52:34.491021 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:52:34 crc kubenswrapper[4933]: E0122 06:52:34.493506 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:52:46 crc kubenswrapper[4933]: I0122 06:52:46.491706 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:52:46 crc kubenswrapper[4933]: E0122 06:52:46.492809 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:53:01 crc kubenswrapper[4933]: I0122 06:53:01.491260 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:53:01 crc kubenswrapper[4933]: E0122 06:53:01.492231 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:53:13 crc kubenswrapper[4933]: I0122 06:53:13.492194 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:53:13 crc kubenswrapper[4933]: E0122 06:53:13.493107 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:53:28 crc kubenswrapper[4933]: I0122 06:53:28.492306 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:53:28 crc kubenswrapper[4933]: E0122 06:53:28.495067 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:53:40 crc kubenswrapper[4933]: I0122 06:53:40.491264 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:53:40 crc kubenswrapper[4933]: E0122 06:53:40.493874 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:53:54 crc kubenswrapper[4933]: I0122 06:53:54.490907 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:53:54 crc kubenswrapper[4933]: E0122 06:53:54.492068 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:53:56 crc kubenswrapper[4933]: I0122 06:53:56.428346 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rjpmf"] Jan 22 06:53:56 crc kubenswrapper[4933]: E0122 06:53:56.429452 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cc8f64f-7f52-4255-b849-b2bad1f4dd09" containerName="collect-profiles" Jan 22 06:53:56 crc kubenswrapper[4933]: I0122 06:53:56.429500 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cc8f64f-7f52-4255-b849-b2bad1f4dd09" containerName="collect-profiles" Jan 22 06:53:56 crc kubenswrapper[4933]: I0122 06:53:56.429827 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cc8f64f-7f52-4255-b849-b2bad1f4dd09" containerName="collect-profiles" Jan 22 06:53:56 crc kubenswrapper[4933]: I0122 06:53:56.431752 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rjpmf" Jan 22 06:53:56 crc kubenswrapper[4933]: I0122 06:53:56.437376 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rjpmf"] Jan 22 06:53:56 crc kubenswrapper[4933]: I0122 06:53:56.510359 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5-utilities\") pod \"community-operators-rjpmf\" (UID: \"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5\") " pod="openshift-marketplace/community-operators-rjpmf" Jan 22 06:53:56 crc kubenswrapper[4933]: I0122 06:53:56.510410 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pk99\" (UniqueName: \"kubernetes.io/projected/4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5-kube-api-access-6pk99\") pod \"community-operators-rjpmf\" (UID: \"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5\") " pod="openshift-marketplace/community-operators-rjpmf" Jan 22 06:53:56 crc kubenswrapper[4933]: I0122 06:53:56.510436 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5-catalog-content\") pod \"community-operators-rjpmf\" (UID: \"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5\") " pod="openshift-marketplace/community-operators-rjpmf" Jan 22 06:53:56 crc kubenswrapper[4933]: I0122 06:53:56.611595 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5-utilities\") pod \"community-operators-rjpmf\" (UID: \"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5\") " pod="openshift-marketplace/community-operators-rjpmf" Jan 22 06:53:56 crc kubenswrapper[4933]: I0122 06:53:56.611673 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pk99\" (UniqueName: \"kubernetes.io/projected/4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5-kube-api-access-6pk99\") pod \"community-operators-rjpmf\" (UID: \"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5\") " pod="openshift-marketplace/community-operators-rjpmf" Jan 22 06:53:56 crc kubenswrapper[4933]: I0122 06:53:56.611708 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5-catalog-content\") pod \"community-operators-rjpmf\" (UID: \"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5\") " pod="openshift-marketplace/community-operators-rjpmf" Jan 22 06:53:56 crc kubenswrapper[4933]: I0122 06:53:56.612222 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5-utilities\") pod \"community-operators-rjpmf\" (UID: \"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5\") " pod="openshift-marketplace/community-operators-rjpmf" Jan 22 06:53:56 crc kubenswrapper[4933]: I0122 06:53:56.612306 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5-catalog-content\") pod \"community-operators-rjpmf\" (UID: \"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5\") " pod="openshift-marketplace/community-operators-rjpmf" Jan 22 06:53:56 crc kubenswrapper[4933]: I0122 06:53:56.637152 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pk99\" (UniqueName: \"kubernetes.io/projected/4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5-kube-api-access-6pk99\") pod \"community-operators-rjpmf\" (UID: \"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5\") " pod="openshift-marketplace/community-operators-rjpmf" Jan 22 06:53:56 crc kubenswrapper[4933]: I0122 06:53:56.813748 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rjpmf" Jan 22 06:53:57 crc kubenswrapper[4933]: I0122 06:53:57.269930 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rjpmf"] Jan 22 06:53:57 crc kubenswrapper[4933]: W0122 06:53:57.287434 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4ea2db51_8bf0_44ca_9f17_6bb2fef3cbb5.slice/crio-69574079243eb4100c9ee0116e21e160b05155c56819a6cba0876ce9b89a8b92 WatchSource:0}: Error finding container 69574079243eb4100c9ee0116e21e160b05155c56819a6cba0876ce9b89a8b92: Status 404 returned error can't find the container with id 69574079243eb4100c9ee0116e21e160b05155c56819a6cba0876ce9b89a8b92 Jan 22 06:53:57 crc kubenswrapper[4933]: I0122 06:53:57.437921 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rjpmf" event={"ID":"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5","Type":"ContainerStarted","Data":"69574079243eb4100c9ee0116e21e160b05155c56819a6cba0876ce9b89a8b92"} Jan 22 06:53:58 crc kubenswrapper[4933]: I0122 06:53:58.448749 4933 generic.go:334] "Generic (PLEG): container finished" podID="4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5" containerID="95827192ac5e21d7fd699ce05d24b8a882085b16008c8c4e05b7e0a015ce4193" exitCode=0 Jan 22 06:53:58 crc kubenswrapper[4933]: I0122 06:53:58.448842 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rjpmf" event={"ID":"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5","Type":"ContainerDied","Data":"95827192ac5e21d7fd699ce05d24b8a882085b16008c8c4e05b7e0a015ce4193"} Jan 22 06:53:58 crc kubenswrapper[4933]: I0122 06:53:58.451856 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 06:54:02 crc kubenswrapper[4933]: I0122 06:54:02.476617 4933 generic.go:334] "Generic (PLEG): container finished" podID="4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5" containerID="c7357cd665ad54998be4b28d0f6baef02e78862766ef793107ca722870f0bcec" exitCode=0 Jan 22 06:54:02 crc kubenswrapper[4933]: I0122 06:54:02.476679 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rjpmf" event={"ID":"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5","Type":"ContainerDied","Data":"c7357cd665ad54998be4b28d0f6baef02e78862766ef793107ca722870f0bcec"} Jan 22 06:54:03 crc kubenswrapper[4933]: I0122 06:54:03.487733 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rjpmf" event={"ID":"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5","Type":"ContainerStarted","Data":"be140cd4b37c2c9b24a2cd46714214d1fea8a2bb6a24eae56d0b90e9f8f20892"} Jan 22 06:54:03 crc kubenswrapper[4933]: I0122 06:54:03.508337 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rjpmf" podStartSLOduration=3.025705254 podStartE2EDuration="7.508314505s" podCreationTimestamp="2026-01-22 06:53:56 +0000 UTC" firstStartedPulling="2026-01-22 06:53:58.451443894 +0000 UTC m=+4086.288569267" lastFinishedPulling="2026-01-22 06:54:02.934053125 +0000 UTC m=+4090.771178518" observedRunningTime="2026-01-22 06:54:03.506155173 +0000 UTC m=+4091.343280586" watchObservedRunningTime="2026-01-22 06:54:03.508314505 +0000 UTC m=+4091.345439878" Jan 22 06:54:06 crc kubenswrapper[4933]: I0122 06:54:06.814646 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rjpmf" Jan 22 06:54:06 crc kubenswrapper[4933]: I0122 06:54:06.815013 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rjpmf" Jan 22 06:54:06 crc kubenswrapper[4933]: I0122 06:54:06.871433 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rjpmf" Jan 22 06:54:08 crc kubenswrapper[4933]: I0122 06:54:08.625240 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rjpmf" Jan 22 06:54:09 crc kubenswrapper[4933]: I0122 06:54:09.490935 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:54:09 crc kubenswrapper[4933]: E0122 06:54:09.491252 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:54:10 crc kubenswrapper[4933]: I0122 06:54:10.234251 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rjpmf"] Jan 22 06:54:10 crc kubenswrapper[4933]: I0122 06:54:10.399355 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-m4tv2"] Jan 22 06:54:10 crc kubenswrapper[4933]: I0122 06:54:10.400375 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-m4tv2" podUID="59c8d87a-51df-446d-8e17-197464398b18" containerName="registry-server" containerID="cri-o://561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246" gracePeriod=2 Jan 22 06:54:10 crc kubenswrapper[4933]: E0122 06:54:10.575772 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246 is running failed: container process not found" containerID="561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246" cmd=["grpc_health_probe","-addr=:50051"] Jan 22 06:54:10 crc kubenswrapper[4933]: E0122 06:54:10.576794 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246 is running failed: container process not found" containerID="561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246" cmd=["grpc_health_probe","-addr=:50051"] Jan 22 06:54:10 crc kubenswrapper[4933]: E0122 06:54:10.577509 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246 is running failed: container process not found" containerID="561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246" cmd=["grpc_health_probe","-addr=:50051"] Jan 22 06:54:10 crc kubenswrapper[4933]: E0122 06:54:10.577563 4933 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/community-operators-m4tv2" podUID="59c8d87a-51df-446d-8e17-197464398b18" containerName="registry-server" Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.460423 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m4tv2" Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.555482 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59c8d87a-51df-446d-8e17-197464398b18-utilities\") pod \"59c8d87a-51df-446d-8e17-197464398b18\" (UID: \"59c8d87a-51df-446d-8e17-197464398b18\") " Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.555559 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59c8d87a-51df-446d-8e17-197464398b18-catalog-content\") pod \"59c8d87a-51df-446d-8e17-197464398b18\" (UID: \"59c8d87a-51df-446d-8e17-197464398b18\") " Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.555685 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r85g8\" (UniqueName: \"kubernetes.io/projected/59c8d87a-51df-446d-8e17-197464398b18-kube-api-access-r85g8\") pod \"59c8d87a-51df-446d-8e17-197464398b18\" (UID: \"59c8d87a-51df-446d-8e17-197464398b18\") " Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.556413 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59c8d87a-51df-446d-8e17-197464398b18-utilities" (OuterVolumeSpecName: "utilities") pod "59c8d87a-51df-446d-8e17-197464398b18" (UID: "59c8d87a-51df-446d-8e17-197464398b18"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.573936 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59c8d87a-51df-446d-8e17-197464398b18-kube-api-access-r85g8" (OuterVolumeSpecName: "kube-api-access-r85g8") pod "59c8d87a-51df-446d-8e17-197464398b18" (UID: "59c8d87a-51df-446d-8e17-197464398b18"). InnerVolumeSpecName "kube-api-access-r85g8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.588742 4933 generic.go:334] "Generic (PLEG): container finished" podID="59c8d87a-51df-446d-8e17-197464398b18" containerID="561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246" exitCode=0 Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.588795 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m4tv2" event={"ID":"59c8d87a-51df-446d-8e17-197464398b18","Type":"ContainerDied","Data":"561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246"} Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.588835 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m4tv2" Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.588853 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m4tv2" event={"ID":"59c8d87a-51df-446d-8e17-197464398b18","Type":"ContainerDied","Data":"44df16837340ce55fb81ce76c9d7c55bd0adbcb6204926ebf655bfdbb972046d"} Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.588873 4933 scope.go:117] "RemoveContainer" containerID="561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246" Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.604828 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59c8d87a-51df-446d-8e17-197464398b18-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "59c8d87a-51df-446d-8e17-197464398b18" (UID: "59c8d87a-51df-446d-8e17-197464398b18"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.611288 4933 scope.go:117] "RemoveContainer" containerID="c4b2603445e41ae580ddd7f20c1c7e65104977f0a65c94839e7e805bade17074" Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.635886 4933 scope.go:117] "RemoveContainer" containerID="8823da99e4c204aa572d288ff99812e47162df808e4f479016887f72dccd5155" Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.657169 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r85g8\" (UniqueName: \"kubernetes.io/projected/59c8d87a-51df-446d-8e17-197464398b18-kube-api-access-r85g8\") on node \"crc\" DevicePath \"\"" Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.657201 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59c8d87a-51df-446d-8e17-197464398b18-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.657212 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59c8d87a-51df-446d-8e17-197464398b18-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.663530 4933 scope.go:117] "RemoveContainer" containerID="561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246" Jan 22 06:54:11 crc kubenswrapper[4933]: E0122 06:54:11.664015 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246\": container with ID starting with 561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246 not found: ID does not exist" containerID="561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246" Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.664065 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246"} err="failed to get container status \"561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246\": rpc error: code = NotFound desc = could not find container \"561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246\": container with ID starting with 561ed4d1b26241a759040b1b289e03620b01a40d5d6b3ea4b150e5263a0cb246 not found: ID does not exist" Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.664135 4933 scope.go:117] "RemoveContainer" containerID="c4b2603445e41ae580ddd7f20c1c7e65104977f0a65c94839e7e805bade17074" Jan 22 06:54:11 crc kubenswrapper[4933]: E0122 06:54:11.664502 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4b2603445e41ae580ddd7f20c1c7e65104977f0a65c94839e7e805bade17074\": container with ID starting with c4b2603445e41ae580ddd7f20c1c7e65104977f0a65c94839e7e805bade17074 not found: ID does not exist" containerID="c4b2603445e41ae580ddd7f20c1c7e65104977f0a65c94839e7e805bade17074" Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.664545 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4b2603445e41ae580ddd7f20c1c7e65104977f0a65c94839e7e805bade17074"} err="failed to get container status \"c4b2603445e41ae580ddd7f20c1c7e65104977f0a65c94839e7e805bade17074\": rpc error: code = NotFound desc = could not find container \"c4b2603445e41ae580ddd7f20c1c7e65104977f0a65c94839e7e805bade17074\": container with ID starting with c4b2603445e41ae580ddd7f20c1c7e65104977f0a65c94839e7e805bade17074 not found: ID does not exist" Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.664574 4933 scope.go:117] "RemoveContainer" containerID="8823da99e4c204aa572d288ff99812e47162df808e4f479016887f72dccd5155" Jan 22 06:54:11 crc kubenswrapper[4933]: E0122 06:54:11.664808 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8823da99e4c204aa572d288ff99812e47162df808e4f479016887f72dccd5155\": container with ID starting with 8823da99e4c204aa572d288ff99812e47162df808e4f479016887f72dccd5155 not found: ID does not exist" containerID="8823da99e4c204aa572d288ff99812e47162df808e4f479016887f72dccd5155" Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.664840 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8823da99e4c204aa572d288ff99812e47162df808e4f479016887f72dccd5155"} err="failed to get container status \"8823da99e4c204aa572d288ff99812e47162df808e4f479016887f72dccd5155\": rpc error: code = NotFound desc = could not find container \"8823da99e4c204aa572d288ff99812e47162df808e4f479016887f72dccd5155\": container with ID starting with 8823da99e4c204aa572d288ff99812e47162df808e4f479016887f72dccd5155 not found: ID does not exist" Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.920513 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-m4tv2"] Jan 22 06:54:11 crc kubenswrapper[4933]: I0122 06:54:11.925494 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-m4tv2"] Jan 22 06:54:12 crc kubenswrapper[4933]: I0122 06:54:12.509864 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59c8d87a-51df-446d-8e17-197464398b18" path="/var/lib/kubelet/pods/59c8d87a-51df-446d-8e17-197464398b18/volumes" Jan 22 06:54:24 crc kubenswrapper[4933]: I0122 06:54:24.490360 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:54:24 crc kubenswrapper[4933]: E0122 06:54:24.491058 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:54:37 crc kubenswrapper[4933]: I0122 06:54:37.490936 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:54:37 crc kubenswrapper[4933]: E0122 06:54:37.492429 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:54:48 crc kubenswrapper[4933]: I0122 06:54:48.282207 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mbkft"] Jan 22 06:54:48 crc kubenswrapper[4933]: E0122 06:54:48.283065 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59c8d87a-51df-446d-8e17-197464398b18" containerName="extract-utilities" Jan 22 06:54:48 crc kubenswrapper[4933]: I0122 06:54:48.283095 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="59c8d87a-51df-446d-8e17-197464398b18" containerName="extract-utilities" Jan 22 06:54:48 crc kubenswrapper[4933]: E0122 06:54:48.283121 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59c8d87a-51df-446d-8e17-197464398b18" containerName="registry-server" Jan 22 06:54:48 crc kubenswrapper[4933]: I0122 06:54:48.283129 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="59c8d87a-51df-446d-8e17-197464398b18" containerName="registry-server" Jan 22 06:54:48 crc kubenswrapper[4933]: E0122 06:54:48.283154 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59c8d87a-51df-446d-8e17-197464398b18" containerName="extract-content" Jan 22 06:54:48 crc kubenswrapper[4933]: I0122 06:54:48.283162 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="59c8d87a-51df-446d-8e17-197464398b18" containerName="extract-content" Jan 22 06:54:48 crc kubenswrapper[4933]: I0122 06:54:48.283318 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="59c8d87a-51df-446d-8e17-197464398b18" containerName="registry-server" Jan 22 06:54:48 crc kubenswrapper[4933]: I0122 06:54:48.287174 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mbkft" Jan 22 06:54:48 crc kubenswrapper[4933]: I0122 06:54:48.297445 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mbkft"] Jan 22 06:54:48 crc kubenswrapper[4933]: I0122 06:54:48.455494 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhfjx\" (UniqueName: \"kubernetes.io/projected/083bbfdf-9e1f-44c2-a472-ddcae3659afe-kube-api-access-mhfjx\") pod \"redhat-marketplace-mbkft\" (UID: \"083bbfdf-9e1f-44c2-a472-ddcae3659afe\") " pod="openshift-marketplace/redhat-marketplace-mbkft" Jan 22 06:54:48 crc kubenswrapper[4933]: I0122 06:54:48.455597 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/083bbfdf-9e1f-44c2-a472-ddcae3659afe-utilities\") pod \"redhat-marketplace-mbkft\" (UID: \"083bbfdf-9e1f-44c2-a472-ddcae3659afe\") " pod="openshift-marketplace/redhat-marketplace-mbkft" Jan 22 06:54:48 crc kubenswrapper[4933]: I0122 06:54:48.455623 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/083bbfdf-9e1f-44c2-a472-ddcae3659afe-catalog-content\") pod \"redhat-marketplace-mbkft\" (UID: \"083bbfdf-9e1f-44c2-a472-ddcae3659afe\") " pod="openshift-marketplace/redhat-marketplace-mbkft" Jan 22 06:54:48 crc kubenswrapper[4933]: I0122 06:54:48.557501 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/083bbfdf-9e1f-44c2-a472-ddcae3659afe-catalog-content\") pod \"redhat-marketplace-mbkft\" (UID: \"083bbfdf-9e1f-44c2-a472-ddcae3659afe\") " pod="openshift-marketplace/redhat-marketplace-mbkft" Jan 22 06:54:48 crc kubenswrapper[4933]: I0122 06:54:48.557609 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhfjx\" (UniqueName: \"kubernetes.io/projected/083bbfdf-9e1f-44c2-a472-ddcae3659afe-kube-api-access-mhfjx\") pod \"redhat-marketplace-mbkft\" (UID: \"083bbfdf-9e1f-44c2-a472-ddcae3659afe\") " pod="openshift-marketplace/redhat-marketplace-mbkft" Jan 22 06:54:48 crc kubenswrapper[4933]: I0122 06:54:48.557686 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/083bbfdf-9e1f-44c2-a472-ddcae3659afe-utilities\") pod \"redhat-marketplace-mbkft\" (UID: \"083bbfdf-9e1f-44c2-a472-ddcae3659afe\") " pod="openshift-marketplace/redhat-marketplace-mbkft" Jan 22 06:54:48 crc kubenswrapper[4933]: I0122 06:54:48.558215 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/083bbfdf-9e1f-44c2-a472-ddcae3659afe-utilities\") pod \"redhat-marketplace-mbkft\" (UID: \"083bbfdf-9e1f-44c2-a472-ddcae3659afe\") " pod="openshift-marketplace/redhat-marketplace-mbkft" Jan 22 06:54:48 crc kubenswrapper[4933]: I0122 06:54:48.558478 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/083bbfdf-9e1f-44c2-a472-ddcae3659afe-catalog-content\") pod \"redhat-marketplace-mbkft\" (UID: \"083bbfdf-9e1f-44c2-a472-ddcae3659afe\") " pod="openshift-marketplace/redhat-marketplace-mbkft" Jan 22 06:54:48 crc kubenswrapper[4933]: I0122 06:54:48.585887 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhfjx\" (UniqueName: \"kubernetes.io/projected/083bbfdf-9e1f-44c2-a472-ddcae3659afe-kube-api-access-mhfjx\") pod \"redhat-marketplace-mbkft\" (UID: \"083bbfdf-9e1f-44c2-a472-ddcae3659afe\") " pod="openshift-marketplace/redhat-marketplace-mbkft" Jan 22 06:54:48 crc kubenswrapper[4933]: I0122 06:54:48.611903 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mbkft" Jan 22 06:54:48 crc kubenswrapper[4933]: I0122 06:54:48.873869 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mbkft"] Jan 22 06:54:48 crc kubenswrapper[4933]: I0122 06:54:48.921984 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mbkft" event={"ID":"083bbfdf-9e1f-44c2-a472-ddcae3659afe","Type":"ContainerStarted","Data":"2287e64a6c2604214b440962f94309ce13d82a9a41bf785c945620a8c64f5a32"} Jan 22 06:54:49 crc kubenswrapper[4933]: I0122 06:54:49.929868 4933 generic.go:334] "Generic (PLEG): container finished" podID="083bbfdf-9e1f-44c2-a472-ddcae3659afe" containerID="f68b13b92ba5f82e967f86e6e5044d473bc029a569c1fa29c79dff6aed82ffdf" exitCode=0 Jan 22 06:54:49 crc kubenswrapper[4933]: I0122 06:54:49.929931 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mbkft" event={"ID":"083bbfdf-9e1f-44c2-a472-ddcae3659afe","Type":"ContainerDied","Data":"f68b13b92ba5f82e967f86e6e5044d473bc029a569c1fa29c79dff6aed82ffdf"} Jan 22 06:54:50 crc kubenswrapper[4933]: I0122 06:54:50.941698 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mbkft" event={"ID":"083bbfdf-9e1f-44c2-a472-ddcae3659afe","Type":"ContainerStarted","Data":"595b8051533a006dc6b207691ff3a24916465bc6eb22f2183da844019dec2f71"} Jan 22 06:54:51 crc kubenswrapper[4933]: I0122 06:54:51.062512 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-z78dp"] Jan 22 06:54:51 crc kubenswrapper[4933]: I0122 06:54:51.066608 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z78dp" Jan 22 06:54:51 crc kubenswrapper[4933]: I0122 06:54:51.078405 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z78dp"] Jan 22 06:54:51 crc kubenswrapper[4933]: I0122 06:54:51.195874 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwpgx\" (UniqueName: \"kubernetes.io/projected/5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba-kube-api-access-zwpgx\") pod \"certified-operators-z78dp\" (UID: \"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba\") " pod="openshift-marketplace/certified-operators-z78dp" Jan 22 06:54:51 crc kubenswrapper[4933]: I0122 06:54:51.195994 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba-catalog-content\") pod \"certified-operators-z78dp\" (UID: \"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba\") " pod="openshift-marketplace/certified-operators-z78dp" Jan 22 06:54:51 crc kubenswrapper[4933]: I0122 06:54:51.196109 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba-utilities\") pod \"certified-operators-z78dp\" (UID: \"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba\") " pod="openshift-marketplace/certified-operators-z78dp" Jan 22 06:54:51 crc kubenswrapper[4933]: I0122 06:54:51.297935 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba-utilities\") pod \"certified-operators-z78dp\" (UID: \"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba\") " pod="openshift-marketplace/certified-operators-z78dp" Jan 22 06:54:51 crc kubenswrapper[4933]: I0122 06:54:51.298016 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwpgx\" (UniqueName: \"kubernetes.io/projected/5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba-kube-api-access-zwpgx\") pod \"certified-operators-z78dp\" (UID: \"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba\") " pod="openshift-marketplace/certified-operators-z78dp" Jan 22 06:54:51 crc kubenswrapper[4933]: I0122 06:54:51.298091 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba-catalog-content\") pod \"certified-operators-z78dp\" (UID: \"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba\") " pod="openshift-marketplace/certified-operators-z78dp" Jan 22 06:54:51 crc kubenswrapper[4933]: I0122 06:54:51.298521 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba-utilities\") pod \"certified-operators-z78dp\" (UID: \"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba\") " pod="openshift-marketplace/certified-operators-z78dp" Jan 22 06:54:51 crc kubenswrapper[4933]: I0122 06:54:51.298627 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba-catalog-content\") pod \"certified-operators-z78dp\" (UID: \"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba\") " pod="openshift-marketplace/certified-operators-z78dp" Jan 22 06:54:51 crc kubenswrapper[4933]: I0122 06:54:51.322119 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwpgx\" (UniqueName: \"kubernetes.io/projected/5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba-kube-api-access-zwpgx\") pod \"certified-operators-z78dp\" (UID: \"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba\") " pod="openshift-marketplace/certified-operators-z78dp" Jan 22 06:54:51 crc kubenswrapper[4933]: I0122 06:54:51.420114 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z78dp" Jan 22 06:54:51 crc kubenswrapper[4933]: I0122 06:54:51.949788 4933 generic.go:334] "Generic (PLEG): container finished" podID="083bbfdf-9e1f-44c2-a472-ddcae3659afe" containerID="595b8051533a006dc6b207691ff3a24916465bc6eb22f2183da844019dec2f71" exitCode=0 Jan 22 06:54:51 crc kubenswrapper[4933]: I0122 06:54:51.949894 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mbkft" event={"ID":"083bbfdf-9e1f-44c2-a472-ddcae3659afe","Type":"ContainerDied","Data":"595b8051533a006dc6b207691ff3a24916465bc6eb22f2183da844019dec2f71"} Jan 22 06:54:52 crc kubenswrapper[4933]: I0122 06:54:52.116574 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z78dp"] Jan 22 06:54:52 crc kubenswrapper[4933]: I0122 06:54:52.503820 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:54:52 crc kubenswrapper[4933]: E0122 06:54:52.504339 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:54:52 crc kubenswrapper[4933]: I0122 06:54:52.959914 4933 generic.go:334] "Generic (PLEG): container finished" podID="5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba" containerID="9619dae4158b5c872697387bfaf789be2987413af4fed6835ab2c60ee0d63212" exitCode=0 Jan 22 06:54:52 crc kubenswrapper[4933]: I0122 06:54:52.960007 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z78dp" event={"ID":"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba","Type":"ContainerDied","Data":"9619dae4158b5c872697387bfaf789be2987413af4fed6835ab2c60ee0d63212"} Jan 22 06:54:52 crc kubenswrapper[4933]: I0122 06:54:52.960293 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z78dp" event={"ID":"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba","Type":"ContainerStarted","Data":"6904930abb258c7ac518633352a3f2fa749928fba02c6042ec85c919345ea33f"} Jan 22 06:54:52 crc kubenswrapper[4933]: I0122 06:54:52.965834 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mbkft" event={"ID":"083bbfdf-9e1f-44c2-a472-ddcae3659afe","Type":"ContainerStarted","Data":"b833ed4a51067197c6250b5dd5c692892ea7a6ba614f8f23111761d2df78e1be"} Jan 22 06:54:53 crc kubenswrapper[4933]: I0122 06:54:53.979279 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z78dp" event={"ID":"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba","Type":"ContainerStarted","Data":"fa0de77e47848786d8510ed408bf876febfeab573fb82dc679cd80b40c366cc6"} Jan 22 06:54:54 crc kubenswrapper[4933]: I0122 06:54:54.005034 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mbkft" podStartSLOduration=3.6153487 podStartE2EDuration="6.005017664s" podCreationTimestamp="2026-01-22 06:54:48 +0000 UTC" firstStartedPulling="2026-01-22 06:54:49.931853429 +0000 UTC m=+4137.768978782" lastFinishedPulling="2026-01-22 06:54:52.321522393 +0000 UTC m=+4140.158647746" observedRunningTime="2026-01-22 06:54:53.011129569 +0000 UTC m=+4140.848254942" watchObservedRunningTime="2026-01-22 06:54:54.005017664 +0000 UTC m=+4141.842143017" Jan 22 06:54:54 crc kubenswrapper[4933]: I0122 06:54:54.990226 4933 generic.go:334] "Generic (PLEG): container finished" podID="5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba" containerID="fa0de77e47848786d8510ed408bf876febfeab573fb82dc679cd80b40c366cc6" exitCode=0 Jan 22 06:54:54 crc kubenswrapper[4933]: I0122 06:54:54.990281 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z78dp" event={"ID":"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba","Type":"ContainerDied","Data":"fa0de77e47848786d8510ed408bf876febfeab573fb82dc679cd80b40c366cc6"} Jan 22 06:54:56 crc kubenswrapper[4933]: I0122 06:54:56.000110 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z78dp" event={"ID":"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba","Type":"ContainerStarted","Data":"b88561a3add6e27f055fa1c1897ee711b17bd31f5fe0992ac271ac2f20cc6e62"} Jan 22 06:54:56 crc kubenswrapper[4933]: I0122 06:54:56.023947 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-z78dp" podStartSLOduration=2.512284925 podStartE2EDuration="5.023913555s" podCreationTimestamp="2026-01-22 06:54:51 +0000 UTC" firstStartedPulling="2026-01-22 06:54:52.962733817 +0000 UTC m=+4140.799859210" lastFinishedPulling="2026-01-22 06:54:55.474362457 +0000 UTC m=+4143.311487840" observedRunningTime="2026-01-22 06:54:56.01882904 +0000 UTC m=+4143.855954483" watchObservedRunningTime="2026-01-22 06:54:56.023913555 +0000 UTC m=+4143.861038958" Jan 22 06:54:58 crc kubenswrapper[4933]: I0122 06:54:58.612728 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mbkft" Jan 22 06:54:58 crc kubenswrapper[4933]: I0122 06:54:58.613366 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mbkft" Jan 22 06:54:58 crc kubenswrapper[4933]: I0122 06:54:58.687208 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mbkft" Jan 22 06:54:59 crc kubenswrapper[4933]: I0122 06:54:59.104966 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mbkft" Jan 22 06:54:59 crc kubenswrapper[4933]: I0122 06:54:59.859430 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mbkft"] Jan 22 06:55:01 crc kubenswrapper[4933]: I0122 06:55:01.051337 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mbkft" podUID="083bbfdf-9e1f-44c2-a472-ddcae3659afe" containerName="registry-server" containerID="cri-o://b833ed4a51067197c6250b5dd5c692892ea7a6ba614f8f23111761d2df78e1be" gracePeriod=2 Jan 22 06:55:01 crc kubenswrapper[4933]: I0122 06:55:01.421044 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-z78dp" Jan 22 06:55:01 crc kubenswrapper[4933]: I0122 06:55:01.421397 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-z78dp" Jan 22 06:55:01 crc kubenswrapper[4933]: I0122 06:55:01.497004 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-z78dp" Jan 22 06:55:02 crc kubenswrapper[4933]: I0122 06:55:02.061321 4933 generic.go:334] "Generic (PLEG): container finished" podID="083bbfdf-9e1f-44c2-a472-ddcae3659afe" containerID="b833ed4a51067197c6250b5dd5c692892ea7a6ba614f8f23111761d2df78e1be" exitCode=0 Jan 22 06:55:02 crc kubenswrapper[4933]: I0122 06:55:02.061597 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mbkft" event={"ID":"083bbfdf-9e1f-44c2-a472-ddcae3659afe","Type":"ContainerDied","Data":"b833ed4a51067197c6250b5dd5c692892ea7a6ba614f8f23111761d2df78e1be"} Jan 22 06:55:02 crc kubenswrapper[4933]: I0122 06:55:02.127631 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-z78dp" Jan 22 06:55:02 crc kubenswrapper[4933]: I0122 06:55:02.624534 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mbkft" Jan 22 06:55:02 crc kubenswrapper[4933]: I0122 06:55:02.763401 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mhfjx\" (UniqueName: \"kubernetes.io/projected/083bbfdf-9e1f-44c2-a472-ddcae3659afe-kube-api-access-mhfjx\") pod \"083bbfdf-9e1f-44c2-a472-ddcae3659afe\" (UID: \"083bbfdf-9e1f-44c2-a472-ddcae3659afe\") " Jan 22 06:55:02 crc kubenswrapper[4933]: I0122 06:55:02.763498 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/083bbfdf-9e1f-44c2-a472-ddcae3659afe-catalog-content\") pod \"083bbfdf-9e1f-44c2-a472-ddcae3659afe\" (UID: \"083bbfdf-9e1f-44c2-a472-ddcae3659afe\") " Jan 22 06:55:02 crc kubenswrapper[4933]: I0122 06:55:02.763575 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/083bbfdf-9e1f-44c2-a472-ddcae3659afe-utilities\") pod \"083bbfdf-9e1f-44c2-a472-ddcae3659afe\" (UID: \"083bbfdf-9e1f-44c2-a472-ddcae3659afe\") " Jan 22 06:55:02 crc kubenswrapper[4933]: I0122 06:55:02.764268 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/083bbfdf-9e1f-44c2-a472-ddcae3659afe-utilities" (OuterVolumeSpecName: "utilities") pod "083bbfdf-9e1f-44c2-a472-ddcae3659afe" (UID: "083bbfdf-9e1f-44c2-a472-ddcae3659afe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:55:02 crc kubenswrapper[4933]: I0122 06:55:02.771171 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/083bbfdf-9e1f-44c2-a472-ddcae3659afe-kube-api-access-mhfjx" (OuterVolumeSpecName: "kube-api-access-mhfjx") pod "083bbfdf-9e1f-44c2-a472-ddcae3659afe" (UID: "083bbfdf-9e1f-44c2-a472-ddcae3659afe"). InnerVolumeSpecName "kube-api-access-mhfjx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:55:02 crc kubenswrapper[4933]: I0122 06:55:02.785769 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/083bbfdf-9e1f-44c2-a472-ddcae3659afe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "083bbfdf-9e1f-44c2-a472-ddcae3659afe" (UID: "083bbfdf-9e1f-44c2-a472-ddcae3659afe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:55:02 crc kubenswrapper[4933]: I0122 06:55:02.864919 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/083bbfdf-9e1f-44c2-a472-ddcae3659afe-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:55:02 crc kubenswrapper[4933]: I0122 06:55:02.864954 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/083bbfdf-9e1f-44c2-a472-ddcae3659afe-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:55:02 crc kubenswrapper[4933]: I0122 06:55:02.864967 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mhfjx\" (UniqueName: \"kubernetes.io/projected/083bbfdf-9e1f-44c2-a472-ddcae3659afe-kube-api-access-mhfjx\") on node \"crc\" DevicePath \"\"" Jan 22 06:55:03 crc kubenswrapper[4933]: I0122 06:55:03.072056 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mbkft" Jan 22 06:55:03 crc kubenswrapper[4933]: I0122 06:55:03.072491 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mbkft" event={"ID":"083bbfdf-9e1f-44c2-a472-ddcae3659afe","Type":"ContainerDied","Data":"2287e64a6c2604214b440962f94309ce13d82a9a41bf785c945620a8c64f5a32"} Jan 22 06:55:03 crc kubenswrapper[4933]: I0122 06:55:03.072529 4933 scope.go:117] "RemoveContainer" containerID="b833ed4a51067197c6250b5dd5c692892ea7a6ba614f8f23111761d2df78e1be" Jan 22 06:55:03 crc kubenswrapper[4933]: I0122 06:55:03.093363 4933 scope.go:117] "RemoveContainer" containerID="595b8051533a006dc6b207691ff3a24916465bc6eb22f2183da844019dec2f71" Jan 22 06:55:03 crc kubenswrapper[4933]: I0122 06:55:03.102715 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mbkft"] Jan 22 06:55:03 crc kubenswrapper[4933]: I0122 06:55:03.107998 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mbkft"] Jan 22 06:55:03 crc kubenswrapper[4933]: I0122 06:55:03.142909 4933 scope.go:117] "RemoveContainer" containerID="f68b13b92ba5f82e967f86e6e5044d473bc029a569c1fa29c79dff6aed82ffdf" Jan 22 06:55:03 crc kubenswrapper[4933]: I0122 06:55:03.661282 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z78dp"] Jan 22 06:55:04 crc kubenswrapper[4933]: I0122 06:55:04.077549 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-z78dp" podUID="5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba" containerName="registry-server" containerID="cri-o://b88561a3add6e27f055fa1c1897ee711b17bd31f5fe0992ac271ac2f20cc6e62" gracePeriod=2 Jan 22 06:55:04 crc kubenswrapper[4933]: I0122 06:55:04.491977 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:55:04 crc kubenswrapper[4933]: E0122 06:55:04.492414 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 06:55:04 crc kubenswrapper[4933]: I0122 06:55:04.500644 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="083bbfdf-9e1f-44c2-a472-ddcae3659afe" path="/var/lib/kubelet/pods/083bbfdf-9e1f-44c2-a472-ddcae3659afe/volumes" Jan 22 06:55:04 crc kubenswrapper[4933]: I0122 06:55:04.864779 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z78dp" Jan 22 06:55:04 crc kubenswrapper[4933]: I0122 06:55:04.993159 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zwpgx\" (UniqueName: \"kubernetes.io/projected/5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba-kube-api-access-zwpgx\") pod \"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba\" (UID: \"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba\") " Jan 22 06:55:04 crc kubenswrapper[4933]: I0122 06:55:04.993227 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba-utilities\") pod \"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba\" (UID: \"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba\") " Jan 22 06:55:04 crc kubenswrapper[4933]: I0122 06:55:04.993364 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba-catalog-content\") pod \"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba\" (UID: \"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba\") " Jan 22 06:55:04 crc kubenswrapper[4933]: I0122 06:55:04.994192 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba-utilities" (OuterVolumeSpecName: "utilities") pod "5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba" (UID: "5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:04.998701 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba-kube-api-access-zwpgx" (OuterVolumeSpecName: "kube-api-access-zwpgx") pod "5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba" (UID: "5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba"). InnerVolumeSpecName "kube-api-access-zwpgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:05.084520 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba" (UID: "5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:05.091832 4933 generic.go:334] "Generic (PLEG): container finished" podID="5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba" containerID="b88561a3add6e27f055fa1c1897ee711b17bd31f5fe0992ac271ac2f20cc6e62" exitCode=0 Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:05.091875 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z78dp" event={"ID":"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba","Type":"ContainerDied","Data":"b88561a3add6e27f055fa1c1897ee711b17bd31f5fe0992ac271ac2f20cc6e62"} Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:05.091899 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z78dp" event={"ID":"5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba","Type":"ContainerDied","Data":"6904930abb258c7ac518633352a3f2fa749928fba02c6042ec85c919345ea33f"} Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:05.091916 4933 scope.go:117] "RemoveContainer" containerID="b88561a3add6e27f055fa1c1897ee711b17bd31f5fe0992ac271ac2f20cc6e62" Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:05.091966 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z78dp" Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:05.094948 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zwpgx\" (UniqueName: \"kubernetes.io/projected/5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba-kube-api-access-zwpgx\") on node \"crc\" DevicePath \"\"" Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:05.094978 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:05.095004 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:05.556396 4933 scope.go:117] "RemoveContainer" containerID="fa0de77e47848786d8510ed408bf876febfeab573fb82dc679cd80b40c366cc6" Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:05.569585 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z78dp"] Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:05.588091 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-z78dp"] Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:05.590795 4933 scope.go:117] "RemoveContainer" containerID="9619dae4158b5c872697387bfaf789be2987413af4fed6835ab2c60ee0d63212" Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:05.632072 4933 scope.go:117] "RemoveContainer" containerID="b88561a3add6e27f055fa1c1897ee711b17bd31f5fe0992ac271ac2f20cc6e62" Jan 22 06:55:05 crc kubenswrapper[4933]: E0122 06:55:05.632573 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b88561a3add6e27f055fa1c1897ee711b17bd31f5fe0992ac271ac2f20cc6e62\": container with ID starting with b88561a3add6e27f055fa1c1897ee711b17bd31f5fe0992ac271ac2f20cc6e62 not found: ID does not exist" containerID="b88561a3add6e27f055fa1c1897ee711b17bd31f5fe0992ac271ac2f20cc6e62" Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:05.632604 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b88561a3add6e27f055fa1c1897ee711b17bd31f5fe0992ac271ac2f20cc6e62"} err="failed to get container status \"b88561a3add6e27f055fa1c1897ee711b17bd31f5fe0992ac271ac2f20cc6e62\": rpc error: code = NotFound desc = could not find container \"b88561a3add6e27f055fa1c1897ee711b17bd31f5fe0992ac271ac2f20cc6e62\": container with ID starting with b88561a3add6e27f055fa1c1897ee711b17bd31f5fe0992ac271ac2f20cc6e62 not found: ID does not exist" Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:05.632624 4933 scope.go:117] "RemoveContainer" containerID="fa0de77e47848786d8510ed408bf876febfeab573fb82dc679cd80b40c366cc6" Jan 22 06:55:05 crc kubenswrapper[4933]: E0122 06:55:05.632992 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa0de77e47848786d8510ed408bf876febfeab573fb82dc679cd80b40c366cc6\": container with ID starting with fa0de77e47848786d8510ed408bf876febfeab573fb82dc679cd80b40c366cc6 not found: ID does not exist" containerID="fa0de77e47848786d8510ed408bf876febfeab573fb82dc679cd80b40c366cc6" Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:05.633036 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa0de77e47848786d8510ed408bf876febfeab573fb82dc679cd80b40c366cc6"} err="failed to get container status \"fa0de77e47848786d8510ed408bf876febfeab573fb82dc679cd80b40c366cc6\": rpc error: code = NotFound desc = could not find container \"fa0de77e47848786d8510ed408bf876febfeab573fb82dc679cd80b40c366cc6\": container with ID starting with fa0de77e47848786d8510ed408bf876febfeab573fb82dc679cd80b40c366cc6 not found: ID does not exist" Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:05.633067 4933 scope.go:117] "RemoveContainer" containerID="9619dae4158b5c872697387bfaf789be2987413af4fed6835ab2c60ee0d63212" Jan 22 06:55:05 crc kubenswrapper[4933]: E0122 06:55:05.633590 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9619dae4158b5c872697387bfaf789be2987413af4fed6835ab2c60ee0d63212\": container with ID starting with 9619dae4158b5c872697387bfaf789be2987413af4fed6835ab2c60ee0d63212 not found: ID does not exist" containerID="9619dae4158b5c872697387bfaf789be2987413af4fed6835ab2c60ee0d63212" Jan 22 06:55:05 crc kubenswrapper[4933]: I0122 06:55:05.633665 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9619dae4158b5c872697387bfaf789be2987413af4fed6835ab2c60ee0d63212"} err="failed to get container status \"9619dae4158b5c872697387bfaf789be2987413af4fed6835ab2c60ee0d63212\": rpc error: code = NotFound desc = could not find container \"9619dae4158b5c872697387bfaf789be2987413af4fed6835ab2c60ee0d63212\": container with ID starting with 9619dae4158b5c872697387bfaf789be2987413af4fed6835ab2c60ee0d63212 not found: ID does not exist" Jan 22 06:55:06 crc kubenswrapper[4933]: I0122 06:55:06.500533 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba" path="/var/lib/kubelet/pods/5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba/volumes" Jan 22 06:55:19 crc kubenswrapper[4933]: I0122 06:55:19.490551 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 06:55:20 crc kubenswrapper[4933]: I0122 06:55:20.207700 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"f62d8efce28d309e2776793632a3f44dfad69a688d95e8e9960d541814171bfc"} Jan 22 06:55:32 crc kubenswrapper[4933]: I0122 06:55:32.814637 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-c6r69"] Jan 22 06:55:32 crc kubenswrapper[4933]: E0122 06:55:32.815389 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="083bbfdf-9e1f-44c2-a472-ddcae3659afe" containerName="registry-server" Jan 22 06:55:32 crc kubenswrapper[4933]: I0122 06:55:32.815402 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="083bbfdf-9e1f-44c2-a472-ddcae3659afe" containerName="registry-server" Jan 22 06:55:32 crc kubenswrapper[4933]: E0122 06:55:32.815420 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="083bbfdf-9e1f-44c2-a472-ddcae3659afe" containerName="extract-utilities" Jan 22 06:55:32 crc kubenswrapper[4933]: I0122 06:55:32.815428 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="083bbfdf-9e1f-44c2-a472-ddcae3659afe" containerName="extract-utilities" Jan 22 06:55:32 crc kubenswrapper[4933]: E0122 06:55:32.815451 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba" containerName="extract-content" Jan 22 06:55:32 crc kubenswrapper[4933]: I0122 06:55:32.815460 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba" containerName="extract-content" Jan 22 06:55:32 crc kubenswrapper[4933]: E0122 06:55:32.815473 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba" containerName="extract-utilities" Jan 22 06:55:32 crc kubenswrapper[4933]: I0122 06:55:32.815481 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba" containerName="extract-utilities" Jan 22 06:55:32 crc kubenswrapper[4933]: E0122 06:55:32.815496 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="083bbfdf-9e1f-44c2-a472-ddcae3659afe" containerName="extract-content" Jan 22 06:55:32 crc kubenswrapper[4933]: I0122 06:55:32.815504 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="083bbfdf-9e1f-44c2-a472-ddcae3659afe" containerName="extract-content" Jan 22 06:55:32 crc kubenswrapper[4933]: E0122 06:55:32.815515 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba" containerName="registry-server" Jan 22 06:55:32 crc kubenswrapper[4933]: I0122 06:55:32.815522 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba" containerName="registry-server" Jan 22 06:55:32 crc kubenswrapper[4933]: I0122 06:55:32.815685 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="083bbfdf-9e1f-44c2-a472-ddcae3659afe" containerName="registry-server" Jan 22 06:55:32 crc kubenswrapper[4933]: I0122 06:55:32.815716 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f34e9c4-26db-4e4a-8e83-8e5f7633b6ba" containerName="registry-server" Jan 22 06:55:32 crc kubenswrapper[4933]: I0122 06:55:32.816904 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c6r69" Jan 22 06:55:32 crc kubenswrapper[4933]: I0122 06:55:32.835663 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c6r69"] Jan 22 06:55:32 crc kubenswrapper[4933]: I0122 06:55:32.924956 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc9d2214-278d-471e-a122-be30b9782ae6-utilities\") pod \"redhat-operators-c6r69\" (UID: \"fc9d2214-278d-471e-a122-be30b9782ae6\") " pod="openshift-marketplace/redhat-operators-c6r69" Jan 22 06:55:32 crc kubenswrapper[4933]: I0122 06:55:32.925040 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dsgb7\" (UniqueName: \"kubernetes.io/projected/fc9d2214-278d-471e-a122-be30b9782ae6-kube-api-access-dsgb7\") pod \"redhat-operators-c6r69\" (UID: \"fc9d2214-278d-471e-a122-be30b9782ae6\") " pod="openshift-marketplace/redhat-operators-c6r69" Jan 22 06:55:32 crc kubenswrapper[4933]: I0122 06:55:32.925146 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc9d2214-278d-471e-a122-be30b9782ae6-catalog-content\") pod \"redhat-operators-c6r69\" (UID: \"fc9d2214-278d-471e-a122-be30b9782ae6\") " pod="openshift-marketplace/redhat-operators-c6r69" Jan 22 06:55:33 crc kubenswrapper[4933]: I0122 06:55:33.026220 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc9d2214-278d-471e-a122-be30b9782ae6-utilities\") pod \"redhat-operators-c6r69\" (UID: \"fc9d2214-278d-471e-a122-be30b9782ae6\") " pod="openshift-marketplace/redhat-operators-c6r69" Jan 22 06:55:33 crc kubenswrapper[4933]: I0122 06:55:33.026314 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dsgb7\" (UniqueName: \"kubernetes.io/projected/fc9d2214-278d-471e-a122-be30b9782ae6-kube-api-access-dsgb7\") pod \"redhat-operators-c6r69\" (UID: \"fc9d2214-278d-471e-a122-be30b9782ae6\") " pod="openshift-marketplace/redhat-operators-c6r69" Jan 22 06:55:33 crc kubenswrapper[4933]: I0122 06:55:33.026372 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc9d2214-278d-471e-a122-be30b9782ae6-catalog-content\") pod \"redhat-operators-c6r69\" (UID: \"fc9d2214-278d-471e-a122-be30b9782ae6\") " pod="openshift-marketplace/redhat-operators-c6r69" Jan 22 06:55:33 crc kubenswrapper[4933]: I0122 06:55:33.026749 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc9d2214-278d-471e-a122-be30b9782ae6-utilities\") pod \"redhat-operators-c6r69\" (UID: \"fc9d2214-278d-471e-a122-be30b9782ae6\") " pod="openshift-marketplace/redhat-operators-c6r69" Jan 22 06:55:33 crc kubenswrapper[4933]: I0122 06:55:33.026819 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc9d2214-278d-471e-a122-be30b9782ae6-catalog-content\") pod \"redhat-operators-c6r69\" (UID: \"fc9d2214-278d-471e-a122-be30b9782ae6\") " pod="openshift-marketplace/redhat-operators-c6r69" Jan 22 06:55:33 crc kubenswrapper[4933]: I0122 06:55:33.046180 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dsgb7\" (UniqueName: \"kubernetes.io/projected/fc9d2214-278d-471e-a122-be30b9782ae6-kube-api-access-dsgb7\") pod \"redhat-operators-c6r69\" (UID: \"fc9d2214-278d-471e-a122-be30b9782ae6\") " pod="openshift-marketplace/redhat-operators-c6r69" Jan 22 06:55:33 crc kubenswrapper[4933]: I0122 06:55:33.142320 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c6r69" Jan 22 06:55:33 crc kubenswrapper[4933]: I0122 06:55:33.584417 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c6r69"] Jan 22 06:55:34 crc kubenswrapper[4933]: I0122 06:55:34.301787 4933 generic.go:334] "Generic (PLEG): container finished" podID="fc9d2214-278d-471e-a122-be30b9782ae6" containerID="bac31453262de1023afe3908a39dc6a571d3fa07c7b07dc35cfd8d1f5480045b" exitCode=0 Jan 22 06:55:34 crc kubenswrapper[4933]: I0122 06:55:34.301940 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6r69" event={"ID":"fc9d2214-278d-471e-a122-be30b9782ae6","Type":"ContainerDied","Data":"bac31453262de1023afe3908a39dc6a571d3fa07c7b07dc35cfd8d1f5480045b"} Jan 22 06:55:34 crc kubenswrapper[4933]: I0122 06:55:34.302071 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6r69" event={"ID":"fc9d2214-278d-471e-a122-be30b9782ae6","Type":"ContainerStarted","Data":"91ff994acc059f3e756c949266eef67a5809607713a8d73924ad27a4239a80c1"} Jan 22 06:55:36 crc kubenswrapper[4933]: I0122 06:55:36.318009 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6r69" event={"ID":"fc9d2214-278d-471e-a122-be30b9782ae6","Type":"ContainerStarted","Data":"670abc1cd0421a1c56b3b4e06415ac15cb4ca679c277f07ac4627bf530f0b38c"} Jan 22 06:55:37 crc kubenswrapper[4933]: I0122 06:55:37.329180 4933 generic.go:334] "Generic (PLEG): container finished" podID="fc9d2214-278d-471e-a122-be30b9782ae6" containerID="670abc1cd0421a1c56b3b4e06415ac15cb4ca679c277f07ac4627bf530f0b38c" exitCode=0 Jan 22 06:55:37 crc kubenswrapper[4933]: I0122 06:55:37.329307 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6r69" event={"ID":"fc9d2214-278d-471e-a122-be30b9782ae6","Type":"ContainerDied","Data":"670abc1cd0421a1c56b3b4e06415ac15cb4ca679c277f07ac4627bf530f0b38c"} Jan 22 06:55:38 crc kubenswrapper[4933]: I0122 06:55:38.338867 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6r69" event={"ID":"fc9d2214-278d-471e-a122-be30b9782ae6","Type":"ContainerStarted","Data":"7d70b44c8fdb851bd10762d464b50baf268ccd40095dfe3c681ec7ab266fce53"} Jan 22 06:55:38 crc kubenswrapper[4933]: I0122 06:55:38.363272 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-c6r69" podStartSLOduration=2.921909008 podStartE2EDuration="6.363253007s" podCreationTimestamp="2026-01-22 06:55:32 +0000 UTC" firstStartedPulling="2026-01-22 06:55:34.304998527 +0000 UTC m=+4182.142123880" lastFinishedPulling="2026-01-22 06:55:37.746342486 +0000 UTC m=+4185.583467879" observedRunningTime="2026-01-22 06:55:38.358913721 +0000 UTC m=+4186.196039114" watchObservedRunningTime="2026-01-22 06:55:38.363253007 +0000 UTC m=+4186.200378360" Jan 22 06:55:43 crc kubenswrapper[4933]: I0122 06:55:43.142673 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-c6r69" Jan 22 06:55:43 crc kubenswrapper[4933]: I0122 06:55:43.142954 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-c6r69" Jan 22 06:55:44 crc kubenswrapper[4933]: I0122 06:55:44.198885 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c6r69" podUID="fc9d2214-278d-471e-a122-be30b9782ae6" containerName="registry-server" probeResult="failure" output=< Jan 22 06:55:44 crc kubenswrapper[4933]: timeout: failed to connect service ":50051" within 1s Jan 22 06:55:44 crc kubenswrapper[4933]: > Jan 22 06:55:53 crc kubenswrapper[4933]: I0122 06:55:53.202692 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-c6r69" Jan 22 06:55:53 crc kubenswrapper[4933]: I0122 06:55:53.248011 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-c6r69" Jan 22 06:55:53 crc kubenswrapper[4933]: I0122 06:55:53.447840 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c6r69"] Jan 22 06:55:54 crc kubenswrapper[4933]: I0122 06:55:54.455259 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-c6r69" podUID="fc9d2214-278d-471e-a122-be30b9782ae6" containerName="registry-server" containerID="cri-o://7d70b44c8fdb851bd10762d464b50baf268ccd40095dfe3c681ec7ab266fce53" gracePeriod=2 Jan 22 06:55:55 crc kubenswrapper[4933]: I0122 06:55:55.479040 4933 generic.go:334] "Generic (PLEG): container finished" podID="fc9d2214-278d-471e-a122-be30b9782ae6" containerID="7d70b44c8fdb851bd10762d464b50baf268ccd40095dfe3c681ec7ab266fce53" exitCode=0 Jan 22 06:55:55 crc kubenswrapper[4933]: I0122 06:55:55.479127 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6r69" event={"ID":"fc9d2214-278d-471e-a122-be30b9782ae6","Type":"ContainerDied","Data":"7d70b44c8fdb851bd10762d464b50baf268ccd40095dfe3c681ec7ab266fce53"} Jan 22 06:55:55 crc kubenswrapper[4933]: I0122 06:55:55.891891 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c6r69" Jan 22 06:55:55 crc kubenswrapper[4933]: I0122 06:55:55.965634 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc9d2214-278d-471e-a122-be30b9782ae6-utilities\") pod \"fc9d2214-278d-471e-a122-be30b9782ae6\" (UID: \"fc9d2214-278d-471e-a122-be30b9782ae6\") " Jan 22 06:55:55 crc kubenswrapper[4933]: I0122 06:55:55.965706 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dsgb7\" (UniqueName: \"kubernetes.io/projected/fc9d2214-278d-471e-a122-be30b9782ae6-kube-api-access-dsgb7\") pod \"fc9d2214-278d-471e-a122-be30b9782ae6\" (UID: \"fc9d2214-278d-471e-a122-be30b9782ae6\") " Jan 22 06:55:55 crc kubenswrapper[4933]: I0122 06:55:55.965877 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc9d2214-278d-471e-a122-be30b9782ae6-catalog-content\") pod \"fc9d2214-278d-471e-a122-be30b9782ae6\" (UID: \"fc9d2214-278d-471e-a122-be30b9782ae6\") " Jan 22 06:55:55 crc kubenswrapper[4933]: I0122 06:55:55.967436 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc9d2214-278d-471e-a122-be30b9782ae6-utilities" (OuterVolumeSpecName: "utilities") pod "fc9d2214-278d-471e-a122-be30b9782ae6" (UID: "fc9d2214-278d-471e-a122-be30b9782ae6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:55:55 crc kubenswrapper[4933]: I0122 06:55:55.975412 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc9d2214-278d-471e-a122-be30b9782ae6-kube-api-access-dsgb7" (OuterVolumeSpecName: "kube-api-access-dsgb7") pod "fc9d2214-278d-471e-a122-be30b9782ae6" (UID: "fc9d2214-278d-471e-a122-be30b9782ae6"). InnerVolumeSpecName "kube-api-access-dsgb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:55:56 crc kubenswrapper[4933]: I0122 06:55:56.068122 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc9d2214-278d-471e-a122-be30b9782ae6-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:55:56 crc kubenswrapper[4933]: I0122 06:55:56.068148 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dsgb7\" (UniqueName: \"kubernetes.io/projected/fc9d2214-278d-471e-a122-be30b9782ae6-kube-api-access-dsgb7\") on node \"crc\" DevicePath \"\"" Jan 22 06:55:56 crc kubenswrapper[4933]: I0122 06:55:56.115656 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc9d2214-278d-471e-a122-be30b9782ae6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fc9d2214-278d-471e-a122-be30b9782ae6" (UID: "fc9d2214-278d-471e-a122-be30b9782ae6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:55:56 crc kubenswrapper[4933]: I0122 06:55:56.169427 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc9d2214-278d-471e-a122-be30b9782ae6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:55:56 crc kubenswrapper[4933]: I0122 06:55:56.493869 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c6r69" Jan 22 06:55:56 crc kubenswrapper[4933]: I0122 06:55:56.498271 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6r69" event={"ID":"fc9d2214-278d-471e-a122-be30b9782ae6","Type":"ContainerDied","Data":"91ff994acc059f3e756c949266eef67a5809607713a8d73924ad27a4239a80c1"} Jan 22 06:55:56 crc kubenswrapper[4933]: I0122 06:55:56.498323 4933 scope.go:117] "RemoveContainer" containerID="7d70b44c8fdb851bd10762d464b50baf268ccd40095dfe3c681ec7ab266fce53" Jan 22 06:55:56 crc kubenswrapper[4933]: I0122 06:55:56.516131 4933 scope.go:117] "RemoveContainer" containerID="670abc1cd0421a1c56b3b4e06415ac15cb4ca679c277f07ac4627bf530f0b38c" Jan 22 06:55:56 crc kubenswrapper[4933]: I0122 06:55:56.535277 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c6r69"] Jan 22 06:55:56 crc kubenswrapper[4933]: I0122 06:55:56.544996 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-c6r69"] Jan 22 06:55:56 crc kubenswrapper[4933]: I0122 06:55:56.552512 4933 scope.go:117] "RemoveContainer" containerID="bac31453262de1023afe3908a39dc6a571d3fa07c7b07dc35cfd8d1f5480045b" Jan 22 06:55:58 crc kubenswrapper[4933]: I0122 06:55:58.507786 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc9d2214-278d-471e-a122-be30b9782ae6" path="/var/lib/kubelet/pods/fc9d2214-278d-471e-a122-be30b9782ae6/volumes" Jan 22 06:57:40 crc kubenswrapper[4933]: I0122 06:57:40.943875 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:57:40 crc kubenswrapper[4933]: I0122 06:57:40.944753 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:58:10 crc kubenswrapper[4933]: I0122 06:58:10.943053 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:58:10 crc kubenswrapper[4933]: I0122 06:58:10.943648 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:58:40 crc kubenswrapper[4933]: I0122 06:58:40.943575 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:58:40 crc kubenswrapper[4933]: I0122 06:58:40.944152 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:58:40 crc kubenswrapper[4933]: I0122 06:58:40.944214 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 06:58:40 crc kubenswrapper[4933]: I0122 06:58:40.945034 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f62d8efce28d309e2776793632a3f44dfad69a688d95e8e9960d541814171bfc"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:58:40 crc kubenswrapper[4933]: I0122 06:58:40.945166 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://f62d8efce28d309e2776793632a3f44dfad69a688d95e8e9960d541814171bfc" gracePeriod=600 Jan 22 06:58:41 crc kubenswrapper[4933]: I0122 06:58:41.896371 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="f62d8efce28d309e2776793632a3f44dfad69a688d95e8e9960d541814171bfc" exitCode=0 Jan 22 06:58:41 crc kubenswrapper[4933]: I0122 06:58:41.896480 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"f62d8efce28d309e2776793632a3f44dfad69a688d95e8e9960d541814171bfc"} Jan 22 06:58:41 crc kubenswrapper[4933]: I0122 06:58:41.897059 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2"} Jan 22 06:58:41 crc kubenswrapper[4933]: I0122 06:58:41.897110 4933 scope.go:117] "RemoveContainer" containerID="70acd110ff58947ee03c9d55a10cfccd0f2ffcaa0ff8dbffe4645cb8592c27a8" Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.194432 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m"] Jan 22 07:00:00 crc kubenswrapper[4933]: E0122 07:00:00.195636 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc9d2214-278d-471e-a122-be30b9782ae6" containerName="extract-utilities" Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.195656 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc9d2214-278d-471e-a122-be30b9782ae6" containerName="extract-utilities" Jan 22 07:00:00 crc kubenswrapper[4933]: E0122 07:00:00.195677 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc9d2214-278d-471e-a122-be30b9782ae6" containerName="extract-content" Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.195686 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc9d2214-278d-471e-a122-be30b9782ae6" containerName="extract-content" Jan 22 07:00:00 crc kubenswrapper[4933]: E0122 07:00:00.195707 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc9d2214-278d-471e-a122-be30b9782ae6" containerName="registry-server" Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.195716 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc9d2214-278d-471e-a122-be30b9782ae6" containerName="registry-server" Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.195878 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc9d2214-278d-471e-a122-be30b9782ae6" containerName="registry-server" Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.196466 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m" Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.200632 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.200894 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.219229 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m"] Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.308658 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/91ecac75-7375-46f4-ab03-d11965c60ca7-secret-volume\") pod \"collect-profiles-29484420-7l26m\" (UID: \"91ecac75-7375-46f4-ab03-d11965c60ca7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m" Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.308726 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pwpzl\" (UniqueName: \"kubernetes.io/projected/91ecac75-7375-46f4-ab03-d11965c60ca7-kube-api-access-pwpzl\") pod \"collect-profiles-29484420-7l26m\" (UID: \"91ecac75-7375-46f4-ab03-d11965c60ca7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m" Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.308904 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/91ecac75-7375-46f4-ab03-d11965c60ca7-config-volume\") pod \"collect-profiles-29484420-7l26m\" (UID: \"91ecac75-7375-46f4-ab03-d11965c60ca7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m" Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.410727 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/91ecac75-7375-46f4-ab03-d11965c60ca7-config-volume\") pod \"collect-profiles-29484420-7l26m\" (UID: \"91ecac75-7375-46f4-ab03-d11965c60ca7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m" Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.410868 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/91ecac75-7375-46f4-ab03-d11965c60ca7-secret-volume\") pod \"collect-profiles-29484420-7l26m\" (UID: \"91ecac75-7375-46f4-ab03-d11965c60ca7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m" Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.410904 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pwpzl\" (UniqueName: \"kubernetes.io/projected/91ecac75-7375-46f4-ab03-d11965c60ca7-kube-api-access-pwpzl\") pod \"collect-profiles-29484420-7l26m\" (UID: \"91ecac75-7375-46f4-ab03-d11965c60ca7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m" Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.411718 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/91ecac75-7375-46f4-ab03-d11965c60ca7-config-volume\") pod \"collect-profiles-29484420-7l26m\" (UID: \"91ecac75-7375-46f4-ab03-d11965c60ca7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m" Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.421236 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/91ecac75-7375-46f4-ab03-d11965c60ca7-secret-volume\") pod \"collect-profiles-29484420-7l26m\" (UID: \"91ecac75-7375-46f4-ab03-d11965c60ca7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m" Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.428424 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pwpzl\" (UniqueName: \"kubernetes.io/projected/91ecac75-7375-46f4-ab03-d11965c60ca7-kube-api-access-pwpzl\") pod \"collect-profiles-29484420-7l26m\" (UID: \"91ecac75-7375-46f4-ab03-d11965c60ca7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m" Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.525971 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m" Jan 22 07:00:00 crc kubenswrapper[4933]: I0122 07:00:00.960195 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m"] Jan 22 07:00:01 crc kubenswrapper[4933]: I0122 07:00:01.627477 4933 generic.go:334] "Generic (PLEG): container finished" podID="91ecac75-7375-46f4-ab03-d11965c60ca7" containerID="383a573a21c2b648d11ce18eff2704d0db9cffb9be1e9d1d6125ea59d687e759" exitCode=0 Jan 22 07:00:01 crc kubenswrapper[4933]: I0122 07:00:01.627532 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m" event={"ID":"91ecac75-7375-46f4-ab03-d11965c60ca7","Type":"ContainerDied","Data":"383a573a21c2b648d11ce18eff2704d0db9cffb9be1e9d1d6125ea59d687e759"} Jan 22 07:00:01 crc kubenswrapper[4933]: I0122 07:00:01.627750 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m" event={"ID":"91ecac75-7375-46f4-ab03-d11965c60ca7","Type":"ContainerStarted","Data":"e0bd220b19c7e95e11e2882b15f94b612b10f2710ef6b9d577cad623d5ae2de9"} Jan 22 07:00:02 crc kubenswrapper[4933]: I0122 07:00:02.879131 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m" Jan 22 07:00:02 crc kubenswrapper[4933]: I0122 07:00:02.942737 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pwpzl\" (UniqueName: \"kubernetes.io/projected/91ecac75-7375-46f4-ab03-d11965c60ca7-kube-api-access-pwpzl\") pod \"91ecac75-7375-46f4-ab03-d11965c60ca7\" (UID: \"91ecac75-7375-46f4-ab03-d11965c60ca7\") " Jan 22 07:00:02 crc kubenswrapper[4933]: I0122 07:00:02.942822 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/91ecac75-7375-46f4-ab03-d11965c60ca7-secret-volume\") pod \"91ecac75-7375-46f4-ab03-d11965c60ca7\" (UID: \"91ecac75-7375-46f4-ab03-d11965c60ca7\") " Jan 22 07:00:02 crc kubenswrapper[4933]: I0122 07:00:02.942855 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/91ecac75-7375-46f4-ab03-d11965c60ca7-config-volume\") pod \"91ecac75-7375-46f4-ab03-d11965c60ca7\" (UID: \"91ecac75-7375-46f4-ab03-d11965c60ca7\") " Jan 22 07:00:02 crc kubenswrapper[4933]: I0122 07:00:02.943788 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91ecac75-7375-46f4-ab03-d11965c60ca7-config-volume" (OuterVolumeSpecName: "config-volume") pod "91ecac75-7375-46f4-ab03-d11965c60ca7" (UID: "91ecac75-7375-46f4-ab03-d11965c60ca7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:00:02 crc kubenswrapper[4933]: I0122 07:00:02.948194 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91ecac75-7375-46f4-ab03-d11965c60ca7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "91ecac75-7375-46f4-ab03-d11965c60ca7" (UID: "91ecac75-7375-46f4-ab03-d11965c60ca7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:00:02 crc kubenswrapper[4933]: I0122 07:00:02.949141 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91ecac75-7375-46f4-ab03-d11965c60ca7-kube-api-access-pwpzl" (OuterVolumeSpecName: "kube-api-access-pwpzl") pod "91ecac75-7375-46f4-ab03-d11965c60ca7" (UID: "91ecac75-7375-46f4-ab03-d11965c60ca7"). InnerVolumeSpecName "kube-api-access-pwpzl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:00:03 crc kubenswrapper[4933]: I0122 07:00:03.044747 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pwpzl\" (UniqueName: \"kubernetes.io/projected/91ecac75-7375-46f4-ab03-d11965c60ca7-kube-api-access-pwpzl\") on node \"crc\" DevicePath \"\"" Jan 22 07:00:03 crc kubenswrapper[4933]: I0122 07:00:03.045234 4933 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/91ecac75-7375-46f4-ab03-d11965c60ca7-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 07:00:03 crc kubenswrapper[4933]: I0122 07:00:03.045359 4933 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/91ecac75-7375-46f4-ab03-d11965c60ca7-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 07:00:03 crc kubenswrapper[4933]: I0122 07:00:03.642503 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m" event={"ID":"91ecac75-7375-46f4-ab03-d11965c60ca7","Type":"ContainerDied","Data":"e0bd220b19c7e95e11e2882b15f94b612b10f2710ef6b9d577cad623d5ae2de9"} Jan 22 07:00:03 crc kubenswrapper[4933]: I0122 07:00:03.642555 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e0bd220b19c7e95e11e2882b15f94b612b10f2710ef6b9d577cad623d5ae2de9" Jan 22 07:00:03 crc kubenswrapper[4933]: I0122 07:00:03.642618 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m" Jan 22 07:00:03 crc kubenswrapper[4933]: I0122 07:00:03.952160 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4"] Jan 22 07:00:03 crc kubenswrapper[4933]: I0122 07:00:03.957481 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484375-lhpj4"] Jan 22 07:00:04 crc kubenswrapper[4933]: I0122 07:00:04.506929 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1" path="/var/lib/kubelet/pods/3a6cf41a-1b9a-4d31-86ee-63b22a54c2a1/volumes" Jan 22 07:01:03 crc kubenswrapper[4933]: I0122 07:01:03.584664 4933 scope.go:117] "RemoveContainer" containerID="d448ee6ab88e31963d507d2b89e8f43643f2d6bdf6a926e176b9b5fd1cf4bca9" Jan 22 07:01:10 crc kubenswrapper[4933]: I0122 07:01:10.943824 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:01:10 crc kubenswrapper[4933]: I0122 07:01:10.945485 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:01:40 crc kubenswrapper[4933]: I0122 07:01:40.948401 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:01:40 crc kubenswrapper[4933]: I0122 07:01:40.948886 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:02:10 crc kubenswrapper[4933]: I0122 07:02:10.942670 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:02:10 crc kubenswrapper[4933]: I0122 07:02:10.943378 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:02:10 crc kubenswrapper[4933]: I0122 07:02:10.943444 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 07:02:10 crc kubenswrapper[4933]: I0122 07:02:10.944207 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 07:02:10 crc kubenswrapper[4933]: I0122 07:02:10.944267 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" gracePeriod=600 Jan 22 07:02:11 crc kubenswrapper[4933]: E0122 07:02:11.581383 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:02:11 crc kubenswrapper[4933]: I0122 07:02:11.616167 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" exitCode=0 Jan 22 07:02:11 crc kubenswrapper[4933]: I0122 07:02:11.616209 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2"} Jan 22 07:02:11 crc kubenswrapper[4933]: I0122 07:02:11.616242 4933 scope.go:117] "RemoveContainer" containerID="f62d8efce28d309e2776793632a3f44dfad69a688d95e8e9960d541814171bfc" Jan 22 07:02:11 crc kubenswrapper[4933]: I0122 07:02:11.617368 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:02:11 crc kubenswrapper[4933]: E0122 07:02:11.618229 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:02:23 crc kubenswrapper[4933]: I0122 07:02:23.491199 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:02:23 crc kubenswrapper[4933]: E0122 07:02:23.491899 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:02:35 crc kubenswrapper[4933]: I0122 07:02:35.491145 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:02:35 crc kubenswrapper[4933]: E0122 07:02:35.492052 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:02:46 crc kubenswrapper[4933]: I0122 07:02:46.491251 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:02:46 crc kubenswrapper[4933]: E0122 07:02:46.494283 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:02:57 crc kubenswrapper[4933]: I0122 07:02:57.490639 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:02:57 crc kubenswrapper[4933]: E0122 07:02:57.491841 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:03:10 crc kubenswrapper[4933]: I0122 07:03:10.490809 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:03:10 crc kubenswrapper[4933]: E0122 07:03:10.491517 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:03:24 crc kubenswrapper[4933]: I0122 07:03:24.490389 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:03:24 crc kubenswrapper[4933]: E0122 07:03:24.491226 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:03:39 crc kubenswrapper[4933]: I0122 07:03:39.490391 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:03:39 crc kubenswrapper[4933]: E0122 07:03:39.491128 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:03:54 crc kubenswrapper[4933]: I0122 07:03:54.491365 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:03:54 crc kubenswrapper[4933]: E0122 07:03:54.492187 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:04:06 crc kubenswrapper[4933]: I0122 07:04:06.491771 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:04:06 crc kubenswrapper[4933]: E0122 07:04:06.494688 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:04:20 crc kubenswrapper[4933]: I0122 07:04:20.490826 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:04:20 crc kubenswrapper[4933]: E0122 07:04:20.491864 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:04:33 crc kubenswrapper[4933]: I0122 07:04:33.490696 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:04:33 crc kubenswrapper[4933]: E0122 07:04:33.491425 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.298610 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-qktzs"] Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.305050 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-qktzs"] Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.432851 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-fsf6b"] Jan 22 07:04:35 crc kubenswrapper[4933]: E0122 07:04:35.433155 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91ecac75-7375-46f4-ab03-d11965c60ca7" containerName="collect-profiles" Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.433172 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="91ecac75-7375-46f4-ab03-d11965c60ca7" containerName="collect-profiles" Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.433329 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="91ecac75-7375-46f4-ab03-d11965c60ca7" containerName="collect-profiles" Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.433872 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-fsf6b" Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.436091 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.436181 4933 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-ctsm4" Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.436185 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.436423 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.444210 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-fsf6b"] Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.605822 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/4edb75c2-59a9-4b69-85b8-de9f004c04e2-node-mnt\") pod \"crc-storage-crc-fsf6b\" (UID: \"4edb75c2-59a9-4b69-85b8-de9f004c04e2\") " pod="crc-storage/crc-storage-crc-fsf6b" Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.605888 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xk7nf\" (UniqueName: \"kubernetes.io/projected/4edb75c2-59a9-4b69-85b8-de9f004c04e2-kube-api-access-xk7nf\") pod \"crc-storage-crc-fsf6b\" (UID: \"4edb75c2-59a9-4b69-85b8-de9f004c04e2\") " pod="crc-storage/crc-storage-crc-fsf6b" Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.606013 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/4edb75c2-59a9-4b69-85b8-de9f004c04e2-crc-storage\") pod \"crc-storage-crc-fsf6b\" (UID: \"4edb75c2-59a9-4b69-85b8-de9f004c04e2\") " pod="crc-storage/crc-storage-crc-fsf6b" Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.707561 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/4edb75c2-59a9-4b69-85b8-de9f004c04e2-crc-storage\") pod \"crc-storage-crc-fsf6b\" (UID: \"4edb75c2-59a9-4b69-85b8-de9f004c04e2\") " pod="crc-storage/crc-storage-crc-fsf6b" Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.707652 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/4edb75c2-59a9-4b69-85b8-de9f004c04e2-node-mnt\") pod \"crc-storage-crc-fsf6b\" (UID: \"4edb75c2-59a9-4b69-85b8-de9f004c04e2\") " pod="crc-storage/crc-storage-crc-fsf6b" Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.707681 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xk7nf\" (UniqueName: \"kubernetes.io/projected/4edb75c2-59a9-4b69-85b8-de9f004c04e2-kube-api-access-xk7nf\") pod \"crc-storage-crc-fsf6b\" (UID: \"4edb75c2-59a9-4b69-85b8-de9f004c04e2\") " pod="crc-storage/crc-storage-crc-fsf6b" Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.708260 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/4edb75c2-59a9-4b69-85b8-de9f004c04e2-node-mnt\") pod \"crc-storage-crc-fsf6b\" (UID: \"4edb75c2-59a9-4b69-85b8-de9f004c04e2\") " pod="crc-storage/crc-storage-crc-fsf6b" Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.708559 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/4edb75c2-59a9-4b69-85b8-de9f004c04e2-crc-storage\") pod \"crc-storage-crc-fsf6b\" (UID: \"4edb75c2-59a9-4b69-85b8-de9f004c04e2\") " pod="crc-storage/crc-storage-crc-fsf6b" Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.731626 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xk7nf\" (UniqueName: \"kubernetes.io/projected/4edb75c2-59a9-4b69-85b8-de9f004c04e2-kube-api-access-xk7nf\") pod \"crc-storage-crc-fsf6b\" (UID: \"4edb75c2-59a9-4b69-85b8-de9f004c04e2\") " pod="crc-storage/crc-storage-crc-fsf6b" Jan 22 07:04:35 crc kubenswrapper[4933]: I0122 07:04:35.757041 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-fsf6b" Jan 22 07:04:36 crc kubenswrapper[4933]: I0122 07:04:36.208554 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-fsf6b"] Jan 22 07:04:36 crc kubenswrapper[4933]: I0122 07:04:36.217702 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 07:04:36 crc kubenswrapper[4933]: I0122 07:04:36.500690 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="283136c3-ccc7-42bd-82a7-d079877057ba" path="/var/lib/kubelet/pods/283136c3-ccc7-42bd-82a7-d079877057ba/volumes" Jan 22 07:04:37 crc kubenswrapper[4933]: I0122 07:04:37.049275 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-fsf6b" event={"ID":"4edb75c2-59a9-4b69-85b8-de9f004c04e2","Type":"ContainerStarted","Data":"b7d391b51c9490040c297945037d870c065bdf14f2854a7dcfef5d30c4f9e8f3"} Jan 22 07:04:40 crc kubenswrapper[4933]: I0122 07:04:40.070251 4933 generic.go:334] "Generic (PLEG): container finished" podID="4edb75c2-59a9-4b69-85b8-de9f004c04e2" containerID="0972bb802bd4712043d9bca6bc6af6f2bfc714aed6fedc420d92641019e3dc0d" exitCode=0 Jan 22 07:04:40 crc kubenswrapper[4933]: I0122 07:04:40.070302 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-fsf6b" event={"ID":"4edb75c2-59a9-4b69-85b8-de9f004c04e2","Type":"ContainerDied","Data":"0972bb802bd4712043d9bca6bc6af6f2bfc714aed6fedc420d92641019e3dc0d"} Jan 22 07:04:41 crc kubenswrapper[4933]: I0122 07:04:41.373882 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-fsf6b" Jan 22 07:04:41 crc kubenswrapper[4933]: I0122 07:04:41.495750 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/4edb75c2-59a9-4b69-85b8-de9f004c04e2-node-mnt\") pod \"4edb75c2-59a9-4b69-85b8-de9f004c04e2\" (UID: \"4edb75c2-59a9-4b69-85b8-de9f004c04e2\") " Jan 22 07:04:41 crc kubenswrapper[4933]: I0122 07:04:41.495810 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xk7nf\" (UniqueName: \"kubernetes.io/projected/4edb75c2-59a9-4b69-85b8-de9f004c04e2-kube-api-access-xk7nf\") pod \"4edb75c2-59a9-4b69-85b8-de9f004c04e2\" (UID: \"4edb75c2-59a9-4b69-85b8-de9f004c04e2\") " Jan 22 07:04:41 crc kubenswrapper[4933]: I0122 07:04:41.495844 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/4edb75c2-59a9-4b69-85b8-de9f004c04e2-crc-storage\") pod \"4edb75c2-59a9-4b69-85b8-de9f004c04e2\" (UID: \"4edb75c2-59a9-4b69-85b8-de9f004c04e2\") " Jan 22 07:04:41 crc kubenswrapper[4933]: I0122 07:04:41.495924 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4edb75c2-59a9-4b69-85b8-de9f004c04e2-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "4edb75c2-59a9-4b69-85b8-de9f004c04e2" (UID: "4edb75c2-59a9-4b69-85b8-de9f004c04e2"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 07:04:41 crc kubenswrapper[4933]: I0122 07:04:41.496374 4933 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/4edb75c2-59a9-4b69-85b8-de9f004c04e2-node-mnt\") on node \"crc\" DevicePath \"\"" Jan 22 07:04:41 crc kubenswrapper[4933]: I0122 07:04:41.501035 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4edb75c2-59a9-4b69-85b8-de9f004c04e2-kube-api-access-xk7nf" (OuterVolumeSpecName: "kube-api-access-xk7nf") pod "4edb75c2-59a9-4b69-85b8-de9f004c04e2" (UID: "4edb75c2-59a9-4b69-85b8-de9f004c04e2"). InnerVolumeSpecName "kube-api-access-xk7nf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:04:41 crc kubenswrapper[4933]: I0122 07:04:41.517514 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4edb75c2-59a9-4b69-85b8-de9f004c04e2-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "4edb75c2-59a9-4b69-85b8-de9f004c04e2" (UID: "4edb75c2-59a9-4b69-85b8-de9f004c04e2"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:04:41 crc kubenswrapper[4933]: I0122 07:04:41.598267 4933 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/4edb75c2-59a9-4b69-85b8-de9f004c04e2-crc-storage\") on node \"crc\" DevicePath \"\"" Jan 22 07:04:41 crc kubenswrapper[4933]: I0122 07:04:41.598305 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xk7nf\" (UniqueName: \"kubernetes.io/projected/4edb75c2-59a9-4b69-85b8-de9f004c04e2-kube-api-access-xk7nf\") on node \"crc\" DevicePath \"\"" Jan 22 07:04:42 crc kubenswrapper[4933]: I0122 07:04:42.087099 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-fsf6b" event={"ID":"4edb75c2-59a9-4b69-85b8-de9f004c04e2","Type":"ContainerDied","Data":"b7d391b51c9490040c297945037d870c065bdf14f2854a7dcfef5d30c4f9e8f3"} Jan 22 07:04:42 crc kubenswrapper[4933]: I0122 07:04:42.087510 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b7d391b51c9490040c297945037d870c065bdf14f2854a7dcfef5d30c4f9e8f3" Jan 22 07:04:42 crc kubenswrapper[4933]: I0122 07:04:42.087189 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-fsf6b" Jan 22 07:04:43 crc kubenswrapper[4933]: I0122 07:04:43.722204 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-fsf6b"] Jan 22 07:04:43 crc kubenswrapper[4933]: I0122 07:04:43.727669 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-fsf6b"] Jan 22 07:04:43 crc kubenswrapper[4933]: I0122 07:04:43.861636 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-8dtwd"] Jan 22 07:04:43 crc kubenswrapper[4933]: E0122 07:04:43.861977 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4edb75c2-59a9-4b69-85b8-de9f004c04e2" containerName="storage" Jan 22 07:04:43 crc kubenswrapper[4933]: I0122 07:04:43.861995 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4edb75c2-59a9-4b69-85b8-de9f004c04e2" containerName="storage" Jan 22 07:04:43 crc kubenswrapper[4933]: I0122 07:04:43.862159 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4edb75c2-59a9-4b69-85b8-de9f004c04e2" containerName="storage" Jan 22 07:04:43 crc kubenswrapper[4933]: I0122 07:04:43.862732 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8dtwd" Jan 22 07:04:43 crc kubenswrapper[4933]: I0122 07:04:43.865206 4933 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-ctsm4" Jan 22 07:04:43 crc kubenswrapper[4933]: I0122 07:04:43.865251 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Jan 22 07:04:43 crc kubenswrapper[4933]: I0122 07:04:43.867482 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Jan 22 07:04:43 crc kubenswrapper[4933]: I0122 07:04:43.867534 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-8dtwd"] Jan 22 07:04:43 crc kubenswrapper[4933]: I0122 07:04:43.868046 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Jan 22 07:04:44 crc kubenswrapper[4933]: I0122 07:04:44.038600 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtj9g\" (UniqueName: \"kubernetes.io/projected/158b6927-7025-49b8-accf-208e75c7d542-kube-api-access-wtj9g\") pod \"crc-storage-crc-8dtwd\" (UID: \"158b6927-7025-49b8-accf-208e75c7d542\") " pod="crc-storage/crc-storage-crc-8dtwd" Jan 22 07:04:44 crc kubenswrapper[4933]: I0122 07:04:44.038654 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/158b6927-7025-49b8-accf-208e75c7d542-node-mnt\") pod \"crc-storage-crc-8dtwd\" (UID: \"158b6927-7025-49b8-accf-208e75c7d542\") " pod="crc-storage/crc-storage-crc-8dtwd" Jan 22 07:04:44 crc kubenswrapper[4933]: I0122 07:04:44.038787 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/158b6927-7025-49b8-accf-208e75c7d542-crc-storage\") pod \"crc-storage-crc-8dtwd\" (UID: \"158b6927-7025-49b8-accf-208e75c7d542\") " pod="crc-storage/crc-storage-crc-8dtwd" Jan 22 07:04:44 crc kubenswrapper[4933]: I0122 07:04:44.140583 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtj9g\" (UniqueName: \"kubernetes.io/projected/158b6927-7025-49b8-accf-208e75c7d542-kube-api-access-wtj9g\") pod \"crc-storage-crc-8dtwd\" (UID: \"158b6927-7025-49b8-accf-208e75c7d542\") " pod="crc-storage/crc-storage-crc-8dtwd" Jan 22 07:04:44 crc kubenswrapper[4933]: I0122 07:04:44.140631 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/158b6927-7025-49b8-accf-208e75c7d542-node-mnt\") pod \"crc-storage-crc-8dtwd\" (UID: \"158b6927-7025-49b8-accf-208e75c7d542\") " pod="crc-storage/crc-storage-crc-8dtwd" Jan 22 07:04:44 crc kubenswrapper[4933]: I0122 07:04:44.140668 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/158b6927-7025-49b8-accf-208e75c7d542-crc-storage\") pod \"crc-storage-crc-8dtwd\" (UID: \"158b6927-7025-49b8-accf-208e75c7d542\") " pod="crc-storage/crc-storage-crc-8dtwd" Jan 22 07:04:44 crc kubenswrapper[4933]: I0122 07:04:44.140970 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/158b6927-7025-49b8-accf-208e75c7d542-node-mnt\") pod \"crc-storage-crc-8dtwd\" (UID: \"158b6927-7025-49b8-accf-208e75c7d542\") " pod="crc-storage/crc-storage-crc-8dtwd" Jan 22 07:04:44 crc kubenswrapper[4933]: I0122 07:04:44.141389 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/158b6927-7025-49b8-accf-208e75c7d542-crc-storage\") pod \"crc-storage-crc-8dtwd\" (UID: \"158b6927-7025-49b8-accf-208e75c7d542\") " pod="crc-storage/crc-storage-crc-8dtwd" Jan 22 07:04:44 crc kubenswrapper[4933]: I0122 07:04:44.161855 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtj9g\" (UniqueName: \"kubernetes.io/projected/158b6927-7025-49b8-accf-208e75c7d542-kube-api-access-wtj9g\") pod \"crc-storage-crc-8dtwd\" (UID: \"158b6927-7025-49b8-accf-208e75c7d542\") " pod="crc-storage/crc-storage-crc-8dtwd" Jan 22 07:04:44 crc kubenswrapper[4933]: I0122 07:04:44.189555 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8dtwd" Jan 22 07:04:44 crc kubenswrapper[4933]: I0122 07:04:44.405781 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-8dtwd"] Jan 22 07:04:44 crc kubenswrapper[4933]: I0122 07:04:44.502701 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4edb75c2-59a9-4b69-85b8-de9f004c04e2" path="/var/lib/kubelet/pods/4edb75c2-59a9-4b69-85b8-de9f004c04e2/volumes" Jan 22 07:04:45 crc kubenswrapper[4933]: I0122 07:04:45.106875 4933 generic.go:334] "Generic (PLEG): container finished" podID="158b6927-7025-49b8-accf-208e75c7d542" containerID="48cf5a3512cfbdbd2e30de864315cb034df187de5101c340587136ba670d3cc3" exitCode=0 Jan 22 07:04:45 crc kubenswrapper[4933]: I0122 07:04:45.106921 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-8dtwd" event={"ID":"158b6927-7025-49b8-accf-208e75c7d542","Type":"ContainerDied","Data":"48cf5a3512cfbdbd2e30de864315cb034df187de5101c340587136ba670d3cc3"} Jan 22 07:04:45 crc kubenswrapper[4933]: I0122 07:04:45.107160 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-8dtwd" event={"ID":"158b6927-7025-49b8-accf-208e75c7d542","Type":"ContainerStarted","Data":"bf678e936a92acd547385931c9a32a8a2dae21453a404f3c655a5178fe424428"} Jan 22 07:04:45 crc kubenswrapper[4933]: I0122 07:04:45.491006 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:04:45 crc kubenswrapper[4933]: E0122 07:04:45.491221 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:04:46 crc kubenswrapper[4933]: I0122 07:04:46.419771 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8dtwd" Jan 22 07:04:46 crc kubenswrapper[4933]: I0122 07:04:46.574841 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wtj9g\" (UniqueName: \"kubernetes.io/projected/158b6927-7025-49b8-accf-208e75c7d542-kube-api-access-wtj9g\") pod \"158b6927-7025-49b8-accf-208e75c7d542\" (UID: \"158b6927-7025-49b8-accf-208e75c7d542\") " Jan 22 07:04:46 crc kubenswrapper[4933]: I0122 07:04:46.575068 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/158b6927-7025-49b8-accf-208e75c7d542-node-mnt\") pod \"158b6927-7025-49b8-accf-208e75c7d542\" (UID: \"158b6927-7025-49b8-accf-208e75c7d542\") " Jan 22 07:04:46 crc kubenswrapper[4933]: I0122 07:04:46.575113 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/158b6927-7025-49b8-accf-208e75c7d542-crc-storage\") pod \"158b6927-7025-49b8-accf-208e75c7d542\" (UID: \"158b6927-7025-49b8-accf-208e75c7d542\") " Jan 22 07:04:46 crc kubenswrapper[4933]: I0122 07:04:46.575284 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/158b6927-7025-49b8-accf-208e75c7d542-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "158b6927-7025-49b8-accf-208e75c7d542" (UID: "158b6927-7025-49b8-accf-208e75c7d542"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 07:04:46 crc kubenswrapper[4933]: I0122 07:04:46.575531 4933 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/158b6927-7025-49b8-accf-208e75c7d542-node-mnt\") on node \"crc\" DevicePath \"\"" Jan 22 07:04:46 crc kubenswrapper[4933]: I0122 07:04:46.581365 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/158b6927-7025-49b8-accf-208e75c7d542-kube-api-access-wtj9g" (OuterVolumeSpecName: "kube-api-access-wtj9g") pod "158b6927-7025-49b8-accf-208e75c7d542" (UID: "158b6927-7025-49b8-accf-208e75c7d542"). InnerVolumeSpecName "kube-api-access-wtj9g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:04:46 crc kubenswrapper[4933]: I0122 07:04:46.604752 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/158b6927-7025-49b8-accf-208e75c7d542-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "158b6927-7025-49b8-accf-208e75c7d542" (UID: "158b6927-7025-49b8-accf-208e75c7d542"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:04:46 crc kubenswrapper[4933]: I0122 07:04:46.676665 4933 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/158b6927-7025-49b8-accf-208e75c7d542-crc-storage\") on node \"crc\" DevicePath \"\"" Jan 22 07:04:46 crc kubenswrapper[4933]: I0122 07:04:46.676709 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wtj9g\" (UniqueName: \"kubernetes.io/projected/158b6927-7025-49b8-accf-208e75c7d542-kube-api-access-wtj9g\") on node \"crc\" DevicePath \"\"" Jan 22 07:04:47 crc kubenswrapper[4933]: I0122 07:04:47.124296 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-8dtwd" event={"ID":"158b6927-7025-49b8-accf-208e75c7d542","Type":"ContainerDied","Data":"bf678e936a92acd547385931c9a32a8a2dae21453a404f3c655a5178fe424428"} Jan 22 07:04:47 crc kubenswrapper[4933]: I0122 07:04:47.124335 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf678e936a92acd547385931c9a32a8a2dae21453a404f3c655a5178fe424428" Jan 22 07:04:47 crc kubenswrapper[4933]: I0122 07:04:47.124371 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-8dtwd" Jan 22 07:04:58 crc kubenswrapper[4933]: I0122 07:04:58.490643 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:04:58 crc kubenswrapper[4933]: E0122 07:04:58.491449 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:05:03 crc kubenswrapper[4933]: I0122 07:05:03.679697 4933 scope.go:117] "RemoveContainer" containerID="63c807d0dd0c3714a46805051cc6edfeb6def19be4f76aedf63d38371adbb050" Jan 22 07:05:11 crc kubenswrapper[4933]: I0122 07:05:11.490469 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:05:11 crc kubenswrapper[4933]: E0122 07:05:11.491045 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:05:22 crc kubenswrapper[4933]: I0122 07:05:22.496983 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:05:22 crc kubenswrapper[4933]: E0122 07:05:22.497940 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:05:33 crc kubenswrapper[4933]: I0122 07:05:33.490929 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:05:33 crc kubenswrapper[4933]: E0122 07:05:33.491546 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:05:46 crc kubenswrapper[4933]: I0122 07:05:46.491557 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:05:46 crc kubenswrapper[4933]: E0122 07:05:46.492196 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:05:57 crc kubenswrapper[4933]: I0122 07:05:57.817405 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kvtfw"] Jan 22 07:05:57 crc kubenswrapper[4933]: E0122 07:05:57.818411 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="158b6927-7025-49b8-accf-208e75c7d542" containerName="storage" Jan 22 07:05:57 crc kubenswrapper[4933]: I0122 07:05:57.818430 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="158b6927-7025-49b8-accf-208e75c7d542" containerName="storage" Jan 22 07:05:57 crc kubenswrapper[4933]: I0122 07:05:57.818674 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="158b6927-7025-49b8-accf-208e75c7d542" containerName="storage" Jan 22 07:05:57 crc kubenswrapper[4933]: I0122 07:05:57.820164 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kvtfw" Jan 22 07:05:57 crc kubenswrapper[4933]: I0122 07:05:57.835207 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kvtfw"] Jan 22 07:05:57 crc kubenswrapper[4933]: I0122 07:05:57.972686 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d194729-1060-4ca8-b0ac-2fb696cafb27-utilities\") pod \"certified-operators-kvtfw\" (UID: \"7d194729-1060-4ca8-b0ac-2fb696cafb27\") " pod="openshift-marketplace/certified-operators-kvtfw" Jan 22 07:05:57 crc kubenswrapper[4933]: I0122 07:05:57.972749 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d194729-1060-4ca8-b0ac-2fb696cafb27-catalog-content\") pod \"certified-operators-kvtfw\" (UID: \"7d194729-1060-4ca8-b0ac-2fb696cafb27\") " pod="openshift-marketplace/certified-operators-kvtfw" Jan 22 07:05:57 crc kubenswrapper[4933]: I0122 07:05:57.972792 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bb4hz\" (UniqueName: \"kubernetes.io/projected/7d194729-1060-4ca8-b0ac-2fb696cafb27-kube-api-access-bb4hz\") pod \"certified-operators-kvtfw\" (UID: \"7d194729-1060-4ca8-b0ac-2fb696cafb27\") " pod="openshift-marketplace/certified-operators-kvtfw" Jan 22 07:05:58 crc kubenswrapper[4933]: I0122 07:05:58.074360 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d194729-1060-4ca8-b0ac-2fb696cafb27-utilities\") pod \"certified-operators-kvtfw\" (UID: \"7d194729-1060-4ca8-b0ac-2fb696cafb27\") " pod="openshift-marketplace/certified-operators-kvtfw" Jan 22 07:05:58 crc kubenswrapper[4933]: I0122 07:05:58.074412 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d194729-1060-4ca8-b0ac-2fb696cafb27-catalog-content\") pod \"certified-operators-kvtfw\" (UID: \"7d194729-1060-4ca8-b0ac-2fb696cafb27\") " pod="openshift-marketplace/certified-operators-kvtfw" Jan 22 07:05:58 crc kubenswrapper[4933]: I0122 07:05:58.074455 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bb4hz\" (UniqueName: \"kubernetes.io/projected/7d194729-1060-4ca8-b0ac-2fb696cafb27-kube-api-access-bb4hz\") pod \"certified-operators-kvtfw\" (UID: \"7d194729-1060-4ca8-b0ac-2fb696cafb27\") " pod="openshift-marketplace/certified-operators-kvtfw" Jan 22 07:05:58 crc kubenswrapper[4933]: I0122 07:05:58.074945 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d194729-1060-4ca8-b0ac-2fb696cafb27-utilities\") pod \"certified-operators-kvtfw\" (UID: \"7d194729-1060-4ca8-b0ac-2fb696cafb27\") " pod="openshift-marketplace/certified-operators-kvtfw" Jan 22 07:05:58 crc kubenswrapper[4933]: I0122 07:05:58.074966 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d194729-1060-4ca8-b0ac-2fb696cafb27-catalog-content\") pod \"certified-operators-kvtfw\" (UID: \"7d194729-1060-4ca8-b0ac-2fb696cafb27\") " pod="openshift-marketplace/certified-operators-kvtfw" Jan 22 07:05:58 crc kubenswrapper[4933]: I0122 07:05:58.104247 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bb4hz\" (UniqueName: \"kubernetes.io/projected/7d194729-1060-4ca8-b0ac-2fb696cafb27-kube-api-access-bb4hz\") pod \"certified-operators-kvtfw\" (UID: \"7d194729-1060-4ca8-b0ac-2fb696cafb27\") " pod="openshift-marketplace/certified-operators-kvtfw" Jan 22 07:05:58 crc kubenswrapper[4933]: I0122 07:05:58.145260 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kvtfw" Jan 22 07:05:58 crc kubenswrapper[4933]: I0122 07:05:58.635123 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kvtfw"] Jan 22 07:05:58 crc kubenswrapper[4933]: I0122 07:05:58.684558 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kvtfw" event={"ID":"7d194729-1060-4ca8-b0ac-2fb696cafb27","Type":"ContainerStarted","Data":"5564dfec04bfb01cd007a99ad4e546a39db1e5c11df64cdf1333c5f4393f6fd7"} Jan 22 07:05:59 crc kubenswrapper[4933]: I0122 07:05:59.693852 4933 generic.go:334] "Generic (PLEG): container finished" podID="7d194729-1060-4ca8-b0ac-2fb696cafb27" containerID="45a16ac0827e16c6d71d4fb15d1a08d9bb6d85837bbe69c26855090c07430000" exitCode=0 Jan 22 07:05:59 crc kubenswrapper[4933]: I0122 07:05:59.693897 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kvtfw" event={"ID":"7d194729-1060-4ca8-b0ac-2fb696cafb27","Type":"ContainerDied","Data":"45a16ac0827e16c6d71d4fb15d1a08d9bb6d85837bbe69c26855090c07430000"} Jan 22 07:06:00 crc kubenswrapper[4933]: I0122 07:06:00.490969 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:06:00 crc kubenswrapper[4933]: E0122 07:06:00.491677 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:06:00 crc kubenswrapper[4933]: I0122 07:06:00.723550 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kvtfw" event={"ID":"7d194729-1060-4ca8-b0ac-2fb696cafb27","Type":"ContainerStarted","Data":"83c627dcafbc1e25217dee6b30e15823e016b55890bf440807ae6424e7cd7817"} Jan 22 07:06:01 crc kubenswrapper[4933]: I0122 07:06:01.734686 4933 generic.go:334] "Generic (PLEG): container finished" podID="7d194729-1060-4ca8-b0ac-2fb696cafb27" containerID="83c627dcafbc1e25217dee6b30e15823e016b55890bf440807ae6424e7cd7817" exitCode=0 Jan 22 07:06:01 crc kubenswrapper[4933]: I0122 07:06:01.734772 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kvtfw" event={"ID":"7d194729-1060-4ca8-b0ac-2fb696cafb27","Type":"ContainerDied","Data":"83c627dcafbc1e25217dee6b30e15823e016b55890bf440807ae6424e7cd7817"} Jan 22 07:06:02 crc kubenswrapper[4933]: I0122 07:06:02.744616 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kvtfw" event={"ID":"7d194729-1060-4ca8-b0ac-2fb696cafb27","Type":"ContainerStarted","Data":"1a9fdb9da78c6e872930263fc48ebd661f2e5ec47c3a135ee24c5b9bf13e9723"} Jan 22 07:06:02 crc kubenswrapper[4933]: I0122 07:06:02.767227 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kvtfw" podStartSLOduration=3.288388892 podStartE2EDuration="5.767207178s" podCreationTimestamp="2026-01-22 07:05:57 +0000 UTC" firstStartedPulling="2026-01-22 07:05:59.69562692 +0000 UTC m=+4807.532752273" lastFinishedPulling="2026-01-22 07:06:02.174445206 +0000 UTC m=+4810.011570559" observedRunningTime="2026-01-22 07:06:02.763430586 +0000 UTC m=+4810.600555959" watchObservedRunningTime="2026-01-22 07:06:02.767207178 +0000 UTC m=+4810.604332551" Jan 22 07:06:08 crc kubenswrapper[4933]: I0122 07:06:08.146596 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kvtfw" Jan 22 07:06:08 crc kubenswrapper[4933]: I0122 07:06:08.147130 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kvtfw" Jan 22 07:06:08 crc kubenswrapper[4933]: I0122 07:06:08.195430 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kvtfw" Jan 22 07:06:08 crc kubenswrapper[4933]: I0122 07:06:08.858299 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kvtfw" Jan 22 07:06:08 crc kubenswrapper[4933]: I0122 07:06:08.914470 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kvtfw"] Jan 22 07:06:10 crc kubenswrapper[4933]: I0122 07:06:10.814860 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kvtfw" podUID="7d194729-1060-4ca8-b0ac-2fb696cafb27" containerName="registry-server" containerID="cri-o://1a9fdb9da78c6e872930263fc48ebd661f2e5ec47c3a135ee24c5b9bf13e9723" gracePeriod=2 Jan 22 07:06:11 crc kubenswrapper[4933]: I0122 07:06:11.828932 4933 generic.go:334] "Generic (PLEG): container finished" podID="7d194729-1060-4ca8-b0ac-2fb696cafb27" containerID="1a9fdb9da78c6e872930263fc48ebd661f2e5ec47c3a135ee24c5b9bf13e9723" exitCode=0 Jan 22 07:06:11 crc kubenswrapper[4933]: I0122 07:06:11.829346 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kvtfw" event={"ID":"7d194729-1060-4ca8-b0ac-2fb696cafb27","Type":"ContainerDied","Data":"1a9fdb9da78c6e872930263fc48ebd661f2e5ec47c3a135ee24c5b9bf13e9723"} Jan 22 07:06:12 crc kubenswrapper[4933]: I0122 07:06:12.761239 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kvtfw" Jan 22 07:06:12 crc kubenswrapper[4933]: I0122 07:06:12.837512 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kvtfw" event={"ID":"7d194729-1060-4ca8-b0ac-2fb696cafb27","Type":"ContainerDied","Data":"5564dfec04bfb01cd007a99ad4e546a39db1e5c11df64cdf1333c5f4393f6fd7"} Jan 22 07:06:12 crc kubenswrapper[4933]: I0122 07:06:12.837561 4933 scope.go:117] "RemoveContainer" containerID="1a9fdb9da78c6e872930263fc48ebd661f2e5ec47c3a135ee24c5b9bf13e9723" Jan 22 07:06:12 crc kubenswrapper[4933]: I0122 07:06:12.837618 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kvtfw" Jan 22 07:06:12 crc kubenswrapper[4933]: I0122 07:06:12.855396 4933 scope.go:117] "RemoveContainer" containerID="83c627dcafbc1e25217dee6b30e15823e016b55890bf440807ae6424e7cd7817" Jan 22 07:06:12 crc kubenswrapper[4933]: I0122 07:06:12.881940 4933 scope.go:117] "RemoveContainer" containerID="45a16ac0827e16c6d71d4fb15d1a08d9bb6d85837bbe69c26855090c07430000" Jan 22 07:06:12 crc kubenswrapper[4933]: I0122 07:06:12.893673 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d194729-1060-4ca8-b0ac-2fb696cafb27-utilities\") pod \"7d194729-1060-4ca8-b0ac-2fb696cafb27\" (UID: \"7d194729-1060-4ca8-b0ac-2fb696cafb27\") " Jan 22 07:06:12 crc kubenswrapper[4933]: I0122 07:06:12.893778 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d194729-1060-4ca8-b0ac-2fb696cafb27-catalog-content\") pod \"7d194729-1060-4ca8-b0ac-2fb696cafb27\" (UID: \"7d194729-1060-4ca8-b0ac-2fb696cafb27\") " Jan 22 07:06:12 crc kubenswrapper[4933]: I0122 07:06:12.893831 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bb4hz\" (UniqueName: \"kubernetes.io/projected/7d194729-1060-4ca8-b0ac-2fb696cafb27-kube-api-access-bb4hz\") pod \"7d194729-1060-4ca8-b0ac-2fb696cafb27\" (UID: \"7d194729-1060-4ca8-b0ac-2fb696cafb27\") " Jan 22 07:06:12 crc kubenswrapper[4933]: I0122 07:06:12.896300 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d194729-1060-4ca8-b0ac-2fb696cafb27-utilities" (OuterVolumeSpecName: "utilities") pod "7d194729-1060-4ca8-b0ac-2fb696cafb27" (UID: "7d194729-1060-4ca8-b0ac-2fb696cafb27"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:06:12 crc kubenswrapper[4933]: I0122 07:06:12.901787 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d194729-1060-4ca8-b0ac-2fb696cafb27-kube-api-access-bb4hz" (OuterVolumeSpecName: "kube-api-access-bb4hz") pod "7d194729-1060-4ca8-b0ac-2fb696cafb27" (UID: "7d194729-1060-4ca8-b0ac-2fb696cafb27"). InnerVolumeSpecName "kube-api-access-bb4hz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:06:12 crc kubenswrapper[4933]: I0122 07:06:12.943380 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d194729-1060-4ca8-b0ac-2fb696cafb27-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7d194729-1060-4ca8-b0ac-2fb696cafb27" (UID: "7d194729-1060-4ca8-b0ac-2fb696cafb27"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:06:12 crc kubenswrapper[4933]: I0122 07:06:12.995686 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d194729-1060-4ca8-b0ac-2fb696cafb27-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:06:12 crc kubenswrapper[4933]: I0122 07:06:12.995728 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d194729-1060-4ca8-b0ac-2fb696cafb27-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:06:12 crc kubenswrapper[4933]: I0122 07:06:12.995740 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bb4hz\" (UniqueName: \"kubernetes.io/projected/7d194729-1060-4ca8-b0ac-2fb696cafb27-kube-api-access-bb4hz\") on node \"crc\" DevicePath \"\"" Jan 22 07:06:13 crc kubenswrapper[4933]: I0122 07:06:13.188146 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kvtfw"] Jan 22 07:06:13 crc kubenswrapper[4933]: I0122 07:06:13.200565 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kvtfw"] Jan 22 07:06:13 crc kubenswrapper[4933]: I0122 07:06:13.490963 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:06:13 crc kubenswrapper[4933]: E0122 07:06:13.491294 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:06:14 crc kubenswrapper[4933]: I0122 07:06:14.503670 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d194729-1060-4ca8-b0ac-2fb696cafb27" path="/var/lib/kubelet/pods/7d194729-1060-4ca8-b0ac-2fb696cafb27/volumes" Jan 22 07:06:24 crc kubenswrapper[4933]: I0122 07:06:24.491062 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:06:24 crc kubenswrapper[4933]: E0122 07:06:24.491824 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:06:36 crc kubenswrapper[4933]: I0122 07:06:36.490871 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:06:36 crc kubenswrapper[4933]: E0122 07:06:36.491793 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.202486 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56bbd59dc5-6xmgk"] Jan 22 07:06:47 crc kubenswrapper[4933]: E0122 07:06:47.203388 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d194729-1060-4ca8-b0ac-2fb696cafb27" containerName="extract-content" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.203404 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d194729-1060-4ca8-b0ac-2fb696cafb27" containerName="extract-content" Jan 22 07:06:47 crc kubenswrapper[4933]: E0122 07:06:47.203423 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d194729-1060-4ca8-b0ac-2fb696cafb27" containerName="extract-utilities" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.203431 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d194729-1060-4ca8-b0ac-2fb696cafb27" containerName="extract-utilities" Jan 22 07:06:47 crc kubenswrapper[4933]: E0122 07:06:47.203452 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d194729-1060-4ca8-b0ac-2fb696cafb27" containerName="registry-server" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.203460 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d194729-1060-4ca8-b0ac-2fb696cafb27" containerName="registry-server" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.203634 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d194729-1060-4ca8-b0ac-2fb696cafb27" containerName="registry-server" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.204530 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56bbd59dc5-6xmgk" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.207724 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-t59lc" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.207957 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.208183 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.211205 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.211221 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.214620 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5986db9b4f-xzbm5"] Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.216415 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5986db9b4f-xzbm5" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.230145 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56bbd59dc5-6xmgk"] Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.242803 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5986db9b4f-xzbm5"] Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.281201 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pc4r5\" (UniqueName: \"kubernetes.io/projected/c790bfe9-d3ba-48b9-9afa-b22398bac778-kube-api-access-pc4r5\") pod \"dnsmasq-dns-5986db9b4f-xzbm5\" (UID: \"c790bfe9-d3ba-48b9-9afa-b22398bac778\") " pod="openstack/dnsmasq-dns-5986db9b4f-xzbm5" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.281254 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tb75v\" (UniqueName: \"kubernetes.io/projected/a42a0f99-db04-4785-81b9-79f395134e77-kube-api-access-tb75v\") pod \"dnsmasq-dns-56bbd59dc5-6xmgk\" (UID: \"a42a0f99-db04-4785-81b9-79f395134e77\") " pod="openstack/dnsmasq-dns-56bbd59dc5-6xmgk" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.281332 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a42a0f99-db04-4785-81b9-79f395134e77-dns-svc\") pod \"dnsmasq-dns-56bbd59dc5-6xmgk\" (UID: \"a42a0f99-db04-4785-81b9-79f395134e77\") " pod="openstack/dnsmasq-dns-56bbd59dc5-6xmgk" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.281372 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a42a0f99-db04-4785-81b9-79f395134e77-config\") pod \"dnsmasq-dns-56bbd59dc5-6xmgk\" (UID: \"a42a0f99-db04-4785-81b9-79f395134e77\") " pod="openstack/dnsmasq-dns-56bbd59dc5-6xmgk" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.281402 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c790bfe9-d3ba-48b9-9afa-b22398bac778-config\") pod \"dnsmasq-dns-5986db9b4f-xzbm5\" (UID: \"c790bfe9-d3ba-48b9-9afa-b22398bac778\") " pod="openstack/dnsmasq-dns-5986db9b4f-xzbm5" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.379769 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56bbd59dc5-6xmgk"] Jan 22 07:06:47 crc kubenswrapper[4933]: E0122 07:06:47.380329 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc kube-api-access-tb75v], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-56bbd59dc5-6xmgk" podUID="a42a0f99-db04-4785-81b9-79f395134e77" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.383771 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tb75v\" (UniqueName: \"kubernetes.io/projected/a42a0f99-db04-4785-81b9-79f395134e77-kube-api-access-tb75v\") pod \"dnsmasq-dns-56bbd59dc5-6xmgk\" (UID: \"a42a0f99-db04-4785-81b9-79f395134e77\") " pod="openstack/dnsmasq-dns-56bbd59dc5-6xmgk" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.383821 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pc4r5\" (UniqueName: \"kubernetes.io/projected/c790bfe9-d3ba-48b9-9afa-b22398bac778-kube-api-access-pc4r5\") pod \"dnsmasq-dns-5986db9b4f-xzbm5\" (UID: \"c790bfe9-d3ba-48b9-9afa-b22398bac778\") " pod="openstack/dnsmasq-dns-5986db9b4f-xzbm5" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.383894 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a42a0f99-db04-4785-81b9-79f395134e77-dns-svc\") pod \"dnsmasq-dns-56bbd59dc5-6xmgk\" (UID: \"a42a0f99-db04-4785-81b9-79f395134e77\") " pod="openstack/dnsmasq-dns-56bbd59dc5-6xmgk" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.383940 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a42a0f99-db04-4785-81b9-79f395134e77-config\") pod \"dnsmasq-dns-56bbd59dc5-6xmgk\" (UID: \"a42a0f99-db04-4785-81b9-79f395134e77\") " pod="openstack/dnsmasq-dns-56bbd59dc5-6xmgk" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.383972 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c790bfe9-d3ba-48b9-9afa-b22398bac778-config\") pod \"dnsmasq-dns-5986db9b4f-xzbm5\" (UID: \"c790bfe9-d3ba-48b9-9afa-b22398bac778\") " pod="openstack/dnsmasq-dns-5986db9b4f-xzbm5" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.384812 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c790bfe9-d3ba-48b9-9afa-b22398bac778-config\") pod \"dnsmasq-dns-5986db9b4f-xzbm5\" (UID: \"c790bfe9-d3ba-48b9-9afa-b22398bac778\") " pod="openstack/dnsmasq-dns-5986db9b4f-xzbm5" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.385756 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a42a0f99-db04-4785-81b9-79f395134e77-dns-svc\") pod \"dnsmasq-dns-56bbd59dc5-6xmgk\" (UID: \"a42a0f99-db04-4785-81b9-79f395134e77\") " pod="openstack/dnsmasq-dns-56bbd59dc5-6xmgk" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.386233 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a42a0f99-db04-4785-81b9-79f395134e77-config\") pod \"dnsmasq-dns-56bbd59dc5-6xmgk\" (UID: \"a42a0f99-db04-4785-81b9-79f395134e77\") " pod="openstack/dnsmasq-dns-56bbd59dc5-6xmgk" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.414754 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tb75v\" (UniqueName: \"kubernetes.io/projected/a42a0f99-db04-4785-81b9-79f395134e77-kube-api-access-tb75v\") pod \"dnsmasq-dns-56bbd59dc5-6xmgk\" (UID: \"a42a0f99-db04-4785-81b9-79f395134e77\") " pod="openstack/dnsmasq-dns-56bbd59dc5-6xmgk" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.422601 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-95587bc99-h476n"] Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.423861 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pc4r5\" (UniqueName: \"kubernetes.io/projected/c790bfe9-d3ba-48b9-9afa-b22398bac778-kube-api-access-pc4r5\") pod \"dnsmasq-dns-5986db9b4f-xzbm5\" (UID: \"c790bfe9-d3ba-48b9-9afa-b22398bac778\") " pod="openstack/dnsmasq-dns-5986db9b4f-xzbm5" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.424200 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95587bc99-h476n" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.434185 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95587bc99-h476n"] Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.485531 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgqdv\" (UniqueName: \"kubernetes.io/projected/3d7721a2-7381-4d86-b65c-a068329996ac-kube-api-access-fgqdv\") pod \"dnsmasq-dns-95587bc99-h476n\" (UID: \"3d7721a2-7381-4d86-b65c-a068329996ac\") " pod="openstack/dnsmasq-dns-95587bc99-h476n" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.485607 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d7721a2-7381-4d86-b65c-a068329996ac-config\") pod \"dnsmasq-dns-95587bc99-h476n\" (UID: \"3d7721a2-7381-4d86-b65c-a068329996ac\") " pod="openstack/dnsmasq-dns-95587bc99-h476n" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.485631 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d7721a2-7381-4d86-b65c-a068329996ac-dns-svc\") pod \"dnsmasq-dns-95587bc99-h476n\" (UID: \"3d7721a2-7381-4d86-b65c-a068329996ac\") " pod="openstack/dnsmasq-dns-95587bc99-h476n" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.537876 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5986db9b4f-xzbm5" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.587437 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgqdv\" (UniqueName: \"kubernetes.io/projected/3d7721a2-7381-4d86-b65c-a068329996ac-kube-api-access-fgqdv\") pod \"dnsmasq-dns-95587bc99-h476n\" (UID: \"3d7721a2-7381-4d86-b65c-a068329996ac\") " pod="openstack/dnsmasq-dns-95587bc99-h476n" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.587576 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d7721a2-7381-4d86-b65c-a068329996ac-config\") pod \"dnsmasq-dns-95587bc99-h476n\" (UID: \"3d7721a2-7381-4d86-b65c-a068329996ac\") " pod="openstack/dnsmasq-dns-95587bc99-h476n" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.587603 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d7721a2-7381-4d86-b65c-a068329996ac-dns-svc\") pod \"dnsmasq-dns-95587bc99-h476n\" (UID: \"3d7721a2-7381-4d86-b65c-a068329996ac\") " pod="openstack/dnsmasq-dns-95587bc99-h476n" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.588680 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d7721a2-7381-4d86-b65c-a068329996ac-config\") pod \"dnsmasq-dns-95587bc99-h476n\" (UID: \"3d7721a2-7381-4d86-b65c-a068329996ac\") " pod="openstack/dnsmasq-dns-95587bc99-h476n" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.588693 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d7721a2-7381-4d86-b65c-a068329996ac-dns-svc\") pod \"dnsmasq-dns-95587bc99-h476n\" (UID: \"3d7721a2-7381-4d86-b65c-a068329996ac\") " pod="openstack/dnsmasq-dns-95587bc99-h476n" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.612672 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgqdv\" (UniqueName: \"kubernetes.io/projected/3d7721a2-7381-4d86-b65c-a068329996ac-kube-api-access-fgqdv\") pod \"dnsmasq-dns-95587bc99-h476n\" (UID: \"3d7721a2-7381-4d86-b65c-a068329996ac\") " pod="openstack/dnsmasq-dns-95587bc99-h476n" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.771510 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95587bc99-h476n" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.871226 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5986db9b4f-xzbm5"] Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.928874 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5986db9b4f-xzbm5"] Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.960765 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-2fl57"] Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.961899 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" Jan 22 07:06:47 crc kubenswrapper[4933]: I0122 07:06:47.981702 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-2fl57"] Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.093736 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5235dda8-909b-415c-b226-b0376d439555-dns-svc\") pod \"dnsmasq-dns-5d79f765b5-2fl57\" (UID: \"5235dda8-909b-415c-b226-b0376d439555\") " pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.094088 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqwqq\" (UniqueName: \"kubernetes.io/projected/5235dda8-909b-415c-b226-b0376d439555-kube-api-access-nqwqq\") pod \"dnsmasq-dns-5d79f765b5-2fl57\" (UID: \"5235dda8-909b-415c-b226-b0376d439555\") " pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.094152 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5235dda8-909b-415c-b226-b0376d439555-config\") pod \"dnsmasq-dns-5d79f765b5-2fl57\" (UID: \"5235dda8-909b-415c-b226-b0376d439555\") " pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.134364 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56bbd59dc5-6xmgk" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.134960 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5986db9b4f-xzbm5" event={"ID":"c790bfe9-d3ba-48b9-9afa-b22398bac778","Type":"ContainerStarted","Data":"82c8973b8d5ba676cd92234e6cb21a561190e1925da311500c707b4a83c3a00c"} Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.152155 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56bbd59dc5-6xmgk" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.197485 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a42a0f99-db04-4785-81b9-79f395134e77-dns-svc\") pod \"a42a0f99-db04-4785-81b9-79f395134e77\" (UID: \"a42a0f99-db04-4785-81b9-79f395134e77\") " Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.197567 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a42a0f99-db04-4785-81b9-79f395134e77-config\") pod \"a42a0f99-db04-4785-81b9-79f395134e77\" (UID: \"a42a0f99-db04-4785-81b9-79f395134e77\") " Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.197672 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tb75v\" (UniqueName: \"kubernetes.io/projected/a42a0f99-db04-4785-81b9-79f395134e77-kube-api-access-tb75v\") pod \"a42a0f99-db04-4785-81b9-79f395134e77\" (UID: \"a42a0f99-db04-4785-81b9-79f395134e77\") " Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.197822 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqwqq\" (UniqueName: \"kubernetes.io/projected/5235dda8-909b-415c-b226-b0376d439555-kube-api-access-nqwqq\") pod \"dnsmasq-dns-5d79f765b5-2fl57\" (UID: \"5235dda8-909b-415c-b226-b0376d439555\") " pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.197898 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5235dda8-909b-415c-b226-b0376d439555-config\") pod \"dnsmasq-dns-5d79f765b5-2fl57\" (UID: \"5235dda8-909b-415c-b226-b0376d439555\") " pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.197944 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5235dda8-909b-415c-b226-b0376d439555-dns-svc\") pod \"dnsmasq-dns-5d79f765b5-2fl57\" (UID: \"5235dda8-909b-415c-b226-b0376d439555\") " pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.198714 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5235dda8-909b-415c-b226-b0376d439555-dns-svc\") pod \"dnsmasq-dns-5d79f765b5-2fl57\" (UID: \"5235dda8-909b-415c-b226-b0376d439555\") " pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.198744 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a42a0f99-db04-4785-81b9-79f395134e77-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a42a0f99-db04-4785-81b9-79f395134e77" (UID: "a42a0f99-db04-4785-81b9-79f395134e77"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.199465 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a42a0f99-db04-4785-81b9-79f395134e77-config" (OuterVolumeSpecName: "config") pod "a42a0f99-db04-4785-81b9-79f395134e77" (UID: "a42a0f99-db04-4785-81b9-79f395134e77"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.200201 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5235dda8-909b-415c-b226-b0376d439555-config\") pod \"dnsmasq-dns-5d79f765b5-2fl57\" (UID: \"5235dda8-909b-415c-b226-b0376d439555\") " pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.207707 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a42a0f99-db04-4785-81b9-79f395134e77-kube-api-access-tb75v" (OuterVolumeSpecName: "kube-api-access-tb75v") pod "a42a0f99-db04-4785-81b9-79f395134e77" (UID: "a42a0f99-db04-4785-81b9-79f395134e77"). InnerVolumeSpecName "kube-api-access-tb75v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.230581 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95587bc99-h476n"] Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.231192 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqwqq\" (UniqueName: \"kubernetes.io/projected/5235dda8-909b-415c-b226-b0376d439555-kube-api-access-nqwqq\") pod \"dnsmasq-dns-5d79f765b5-2fl57\" (UID: \"5235dda8-909b-415c-b226-b0376d439555\") " pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.289508 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.299194 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a42a0f99-db04-4785-81b9-79f395134e77-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.299229 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a42a0f99-db04-4785-81b9-79f395134e77-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.299242 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tb75v\" (UniqueName: \"kubernetes.io/projected/a42a0f99-db04-4785-81b9-79f395134e77-kube-api-access-tb75v\") on node \"crc\" DevicePath \"\"" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.490742 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:06:48 crc kubenswrapper[4933]: E0122 07:06:48.491160 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.549837 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.551681 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.553765 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.554640 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-gk2f5" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.554730 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.558789 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.559270 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.559996 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.560800 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.571064 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.607726 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-pod-info\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.607780 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.607803 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.607827 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.607871 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.607961 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-config-data\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.608014 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.608041 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.608097 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.608165 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-server-conf\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.608222 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8n9xj\" (UniqueName: \"kubernetes.io/projected/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-kube-api-access-8n9xj\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.709390 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.709467 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-config-data\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.709501 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.709517 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.709551 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.709580 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-server-conf\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.709605 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8n9xj\" (UniqueName: \"kubernetes.io/projected/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-kube-api-access-8n9xj\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.709623 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-pod-info\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.709637 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.709654 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.709672 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.710617 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-2fl57"] Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.711201 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-config-data\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.711463 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.711800 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.713474 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.714176 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.715554 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-pod-info\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.715718 4933 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.715752 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/40a731fe5b6e4e94ff1971f7fab1d8154f20c057332612743110a6ff8d1c2b32/globalmount\"" pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.716043 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.716256 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-server-conf\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.716709 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.731539 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8n9xj\" (UniqueName: \"kubernetes.io/projected/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-kube-api-access-8n9xj\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.750301 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\") pod \"rabbitmq-server-0\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " pod="openstack/rabbitmq-server-0" Jan 22 07:06:48 crc kubenswrapper[4933]: I0122 07:06:48.902818 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.067814 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.073807 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.076343 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.076376 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-8tlnl" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.076510 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.077710 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.078342 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.078959 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.082857 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.086822 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.114797 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.114870 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.114938 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.114958 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.115021 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.115128 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.115162 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.115188 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hk7qq\" (UniqueName: \"kubernetes.io/projected/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-kube-api-access-hk7qq\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.115250 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.115293 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.115314 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.135306 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.150912 4933 generic.go:334] "Generic (PLEG): container finished" podID="c790bfe9-d3ba-48b9-9afa-b22398bac778" containerID="6decac47190b8fb502ae29d7ded367809876e84a4dd7a348e8b6cf8555fc04e8" exitCode=0 Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.151008 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5986db9b4f-xzbm5" event={"ID":"c790bfe9-d3ba-48b9-9afa-b22398bac778","Type":"ContainerDied","Data":"6decac47190b8fb502ae29d7ded367809876e84a4dd7a348e8b6cf8555fc04e8"} Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.154217 4933 generic.go:334] "Generic (PLEG): container finished" podID="3d7721a2-7381-4d86-b65c-a068329996ac" containerID="8785bf24cffc0148e40baa8a855c842fee0bb6e7e106055b1441cf0375ebc3ef" exitCode=0 Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.154301 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95587bc99-h476n" event={"ID":"3d7721a2-7381-4d86-b65c-a068329996ac","Type":"ContainerDied","Data":"8785bf24cffc0148e40baa8a855c842fee0bb6e7e106055b1441cf0375ebc3ef"} Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.154338 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95587bc99-h476n" event={"ID":"3d7721a2-7381-4d86-b65c-a068329996ac","Type":"ContainerStarted","Data":"db1e3d53ef8f806c37706cbb0cae9768d4ac3e3dcdc119fc05238e7fb83d2cc7"} Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.157632 4933 generic.go:334] "Generic (PLEG): container finished" podID="5235dda8-909b-415c-b226-b0376d439555" containerID="a58a7452f96be96db72f32cfed0cfe8a581186cc350e4b0fe45ff219ff3442a1" exitCode=0 Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.157723 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56bbd59dc5-6xmgk" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.157711 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" event={"ID":"5235dda8-909b-415c-b226-b0376d439555","Type":"ContainerDied","Data":"a58a7452f96be96db72f32cfed0cfe8a581186cc350e4b0fe45ff219ff3442a1"} Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.157844 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" event={"ID":"5235dda8-909b-415c-b226-b0376d439555","Type":"ContainerStarted","Data":"b8eb7395daabe677dab67cbc945f1e59763fba22c854a63e0487ddd72bd0b683"} Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.216487 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.216822 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.216847 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.216884 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.216979 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.217006 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.217025 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hk7qq\" (UniqueName: \"kubernetes.io/projected/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-kube-api-access-hk7qq\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.217044 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.217120 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.217146 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.217173 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.219043 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.219908 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.220338 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.220375 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.220911 4933 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.220955 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1aff8d075433c95ba6037a96dcf0081d327d095bb9ffbb2ab80a9de2848fd3e3/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.226653 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.248959 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56bbd59dc5-6xmgk"] Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.254713 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56bbd59dc5-6xmgk"] Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.503853 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.503857 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.504964 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.505169 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.506365 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hk7qq\" (UniqueName: \"kubernetes.io/projected/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-kube-api-access-hk7qq\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.619944 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\") pod \"rabbitmq-cell1-server-0\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.674608 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5986db9b4f-xzbm5" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.695575 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.724190 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pc4r5\" (UniqueName: \"kubernetes.io/projected/c790bfe9-d3ba-48b9-9afa-b22398bac778-kube-api-access-pc4r5\") pod \"c790bfe9-d3ba-48b9-9afa-b22398bac778\" (UID: \"c790bfe9-d3ba-48b9-9afa-b22398bac778\") " Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.724338 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c790bfe9-d3ba-48b9-9afa-b22398bac778-config\") pod \"c790bfe9-d3ba-48b9-9afa-b22398bac778\" (UID: \"c790bfe9-d3ba-48b9-9afa-b22398bac778\") " Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.805363 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c790bfe9-d3ba-48b9-9afa-b22398bac778-kube-api-access-pc4r5" (OuterVolumeSpecName: "kube-api-access-pc4r5") pod "c790bfe9-d3ba-48b9-9afa-b22398bac778" (UID: "c790bfe9-d3ba-48b9-9afa-b22398bac778"). InnerVolumeSpecName "kube-api-access-pc4r5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.826221 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pc4r5\" (UniqueName: \"kubernetes.io/projected/c790bfe9-d3ba-48b9-9afa-b22398bac778-kube-api-access-pc4r5\") on node \"crc\" DevicePath \"\"" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.836350 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c790bfe9-d3ba-48b9-9afa-b22398bac778-config" (OuterVolumeSpecName: "config") pod "c790bfe9-d3ba-48b9-9afa-b22398bac778" (UID: "c790bfe9-d3ba-48b9-9afa-b22398bac778"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.864488 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 22 07:06:49 crc kubenswrapper[4933]: E0122 07:06:49.865319 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c790bfe9-d3ba-48b9-9afa-b22398bac778" containerName="init" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.865359 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c790bfe9-d3ba-48b9-9afa-b22398bac778" containerName="init" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.865666 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="c790bfe9-d3ba-48b9-9afa-b22398bac778" containerName="init" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.867070 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.872013 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.872046 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-mfzd5" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.872569 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.872663 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.881598 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.882537 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.931142 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:49 crc kubenswrapper[4933]: E0122 07:06:49.931235 4933 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Jan 22 07:06:49 crc kubenswrapper[4933]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/3d7721a2-7381-4d86-b65c-a068329996ac/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 22 07:06:49 crc kubenswrapper[4933]: > podSandboxID="db1e3d53ef8f806c37706cbb0cae9768d4ac3e3dcdc119fc05238e7fb83d2cc7" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.931298 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-operator-scripts\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.931391 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-kolla-config\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:49 crc kubenswrapper[4933]: E0122 07:06:49.931411 4933 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 22 07:06:49 crc kubenswrapper[4933]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n8chc6h5bh56fh546hb7hc8h67h5bchffh577h697h5b5h5bdh59bhf6hf4h558hb5h578h595h5cchfbh644h59ch7fh654h547h587h5cbh5d5h8fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fgqdv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-95587bc99-h476n_openstack(3d7721a2-7381-4d86-b65c-a068329996ac): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/3d7721a2-7381-4d86-b65c-a068329996ac/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Jan 22 07:06:49 crc kubenswrapper[4933]: > logger="UnhandledError" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.931433 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hd4h7\" (UniqueName: \"kubernetes.io/projected/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-kube-api-access-hd4h7\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.931493 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.931581 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-config-data-generated\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.931645 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7d9c7f58-5bf9-40df-9446-eaff29ecd5e9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7d9c7f58-5bf9-40df-9446-eaff29ecd5e9\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.931682 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-config-data-default\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:49 crc kubenswrapper[4933]: I0122 07:06:49.931910 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c790bfe9-d3ba-48b9-9afa-b22398bac778-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:06:49 crc kubenswrapper[4933]: E0122 07:06:49.933144 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/3d7721a2-7381-4d86-b65c-a068329996ac/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-95587bc99-h476n" podUID="3d7721a2-7381-4d86-b65c-a068329996ac" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.032873 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-config-data-default\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.032943 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7d9c7f58-5bf9-40df-9446-eaff29ecd5e9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7d9c7f58-5bf9-40df-9446-eaff29ecd5e9\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.033015 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.033192 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-operator-scripts\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.033265 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-kolla-config\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.033299 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hd4h7\" (UniqueName: \"kubernetes.io/projected/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-kube-api-access-hd4h7\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.033347 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.033412 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-config-data-generated\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.035129 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-kolla-config\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.035361 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-config-data-generated\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.036679 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-operator-scripts\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.037353 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-config-data-default\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.043644 4933 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.043678 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7d9c7f58-5bf9-40df-9446-eaff29ecd5e9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7d9c7f58-5bf9-40df-9446-eaff29ecd5e9\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0a2a77633cb7d5f6d80de2208e05f81ab95c3998f6824f05123d2a5280fd3977/globalmount\"" pod="openstack/openstack-galera-0" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.047706 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.049817 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.056289 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hd4h7\" (UniqueName: \"kubernetes.io/projected/7d7f26cd-e0e4-414c-96a2-55ce3f6495af-kube-api-access-hd4h7\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.074127 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7d9c7f58-5bf9-40df-9446-eaff29ecd5e9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7d9c7f58-5bf9-40df-9446-eaff29ecd5e9\") pod \"openstack-galera-0\" (UID: \"7d7f26cd-e0e4-414c-96a2-55ce3f6495af\") " pod="openstack/openstack-galera-0" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.169355 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5986db9b4f-xzbm5" event={"ID":"c790bfe9-d3ba-48b9-9afa-b22398bac778","Type":"ContainerDied","Data":"82c8973b8d5ba676cd92234e6cb21a561190e1925da311500c707b4a83c3a00c"} Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.169410 4933 scope.go:117] "RemoveContainer" containerID="6decac47190b8fb502ae29d7ded367809876e84a4dd7a348e8b6cf8555fc04e8" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.169419 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5986db9b4f-xzbm5" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.175020 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" event={"ID":"5235dda8-909b-415c-b226-b0376d439555","Type":"ContainerStarted","Data":"fe82a5213f67e01e95def3bb6d552cc7cd565448c238f9cdacc3041d48404c95"} Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.175463 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.177601 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806","Type":"ContainerStarted","Data":"371542dfb79fc90dee567a599c8bbc2d8e943c92c2986e3818927494c7be78a3"} Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.208467 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" podStartSLOduration=3.208447229 podStartE2EDuration="3.208447229s" podCreationTimestamp="2026-01-22 07:06:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:06:50.197857653 +0000 UTC m=+4858.034983016" watchObservedRunningTime="2026-01-22 07:06:50.208447229 +0000 UTC m=+4858.045572582" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.283235 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5986db9b4f-xzbm5"] Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.296984 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5986db9b4f-xzbm5"] Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.318897 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.368582 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.514739 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a42a0f99-db04-4785-81b9-79f395134e77" path="/var/lib/kubelet/pods/a42a0f99-db04-4785-81b9-79f395134e77/volumes" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.515068 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c790bfe9-d3ba-48b9-9afa-b22398bac778" path="/var/lib/kubelet/pods/c790bfe9-d3ba-48b9-9afa-b22398bac778/volumes" Jan 22 07:06:50 crc kubenswrapper[4933]: I0122 07:06:50.857146 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 22 07:06:51 crc kubenswrapper[4933]: W0122 07:06:51.107319 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7d7f26cd_e0e4_414c_96a2_55ce3f6495af.slice/crio-64a3902c491ee3ed9c77394e8e75a1ea13c26f086360b863d486216037f4d128 WatchSource:0}: Error finding container 64a3902c491ee3ed9c77394e8e75a1ea13c26f086360b863d486216037f4d128: Status 404 returned error can't find the container with id 64a3902c491ee3ed9c77394e8e75a1ea13c26f086360b863d486216037f4d128 Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.185639 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28","Type":"ContainerStarted","Data":"98c7ae00f5d21e73b35646042184c5ebb1fa29996e763be15042ea933e314a24"} Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.190152 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806","Type":"ContainerStarted","Data":"6bcb2eb32e3438f433faa55236d648ae8199e7f276cc4eb1a18b21f08a68455b"} Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.191556 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7d7f26cd-e0e4-414c-96a2-55ce3f6495af","Type":"ContainerStarted","Data":"64a3902c491ee3ed9c77394e8e75a1ea13c26f086360b863d486216037f4d128"} Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.196177 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95587bc99-h476n" event={"ID":"3d7721a2-7381-4d86-b65c-a068329996ac","Type":"ContainerStarted","Data":"0220a6ad7266c2ac2ddab7d915f1b31cbe6fd360871920e001422f83ff2a20fb"} Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.196422 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-95587bc99-h476n" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.223882 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-95587bc99-h476n" podStartSLOduration=4.223855294 podStartE2EDuration="4.223855294s" podCreationTimestamp="2026-01-22 07:06:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:06:51.214317523 +0000 UTC m=+4859.051442896" watchObservedRunningTime="2026-01-22 07:06:51.223855294 +0000 UTC m=+4859.060980667" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.470726 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.472259 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.474775 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.475156 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.475280 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-wdhtv" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.475304 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.487620 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.661111 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.661236 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.661314 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.661382 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.661523 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-78435ec5-4bb8-4ed9-831a-0a5bd3b0aa28\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-78435ec5-4bb8-4ed9-831a-0a5bd3b0aa28\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.661645 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.661680 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5z82\" (UniqueName: \"kubernetes.io/projected/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-kube-api-access-p5z82\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.661804 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.737917 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.739572 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.742167 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-hwhm5" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.742340 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.745246 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.758728 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.765203 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-78435ec5-4bb8-4ed9-831a-0a5bd3b0aa28\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-78435ec5-4bb8-4ed9-831a-0a5bd3b0aa28\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.765689 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.765724 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5z82\" (UniqueName: \"kubernetes.io/projected/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-kube-api-access-p5z82\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.766111 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.766210 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.766800 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.766980 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.767024 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.767156 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.767730 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.767804 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.768137 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.772278 4933 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.772329 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-78435ec5-4bb8-4ed9-831a-0a5bd3b0aa28\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-78435ec5-4bb8-4ed9-831a-0a5bd3b0aa28\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f3c2d68724d1c08831810a4432e1edec90fd94951d0c706d460bbe1d367fad92/globalmount\"" pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.776141 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.783995 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.787470 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5z82\" (UniqueName: \"kubernetes.io/projected/cb145d5a-1e4b-43f9-8a29-009a5a89ea2f-kube-api-access-p5z82\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.813006 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-78435ec5-4bb8-4ed9-831a-0a5bd3b0aa28\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-78435ec5-4bb8-4ed9-831a-0a5bd3b0aa28\") pod \"openstack-cell1-galera-0\" (UID: \"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f\") " pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.868242 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/74eb606e-de69-4502-b07d-e11628d32afc-kolla-config\") pod \"memcached-0\" (UID: \"74eb606e-de69-4502-b07d-e11628d32afc\") " pod="openstack/memcached-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.868308 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/74eb606e-de69-4502-b07d-e11628d32afc-memcached-tls-certs\") pod \"memcached-0\" (UID: \"74eb606e-de69-4502-b07d-e11628d32afc\") " pod="openstack/memcached-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.868373 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkgfj\" (UniqueName: \"kubernetes.io/projected/74eb606e-de69-4502-b07d-e11628d32afc-kube-api-access-qkgfj\") pod \"memcached-0\" (UID: \"74eb606e-de69-4502-b07d-e11628d32afc\") " pod="openstack/memcached-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.868449 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/74eb606e-de69-4502-b07d-e11628d32afc-config-data\") pod \"memcached-0\" (UID: \"74eb606e-de69-4502-b07d-e11628d32afc\") " pod="openstack/memcached-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.868506 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74eb606e-de69-4502-b07d-e11628d32afc-combined-ca-bundle\") pod \"memcached-0\" (UID: \"74eb606e-de69-4502-b07d-e11628d32afc\") " pod="openstack/memcached-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.969988 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/74eb606e-de69-4502-b07d-e11628d32afc-kolla-config\") pod \"memcached-0\" (UID: \"74eb606e-de69-4502-b07d-e11628d32afc\") " pod="openstack/memcached-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.970439 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/74eb606e-de69-4502-b07d-e11628d32afc-memcached-tls-certs\") pod \"memcached-0\" (UID: \"74eb606e-de69-4502-b07d-e11628d32afc\") " pod="openstack/memcached-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.970466 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkgfj\" (UniqueName: \"kubernetes.io/projected/74eb606e-de69-4502-b07d-e11628d32afc-kube-api-access-qkgfj\") pod \"memcached-0\" (UID: \"74eb606e-de69-4502-b07d-e11628d32afc\") " pod="openstack/memcached-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.970520 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/74eb606e-de69-4502-b07d-e11628d32afc-config-data\") pod \"memcached-0\" (UID: \"74eb606e-de69-4502-b07d-e11628d32afc\") " pod="openstack/memcached-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.970588 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74eb606e-de69-4502-b07d-e11628d32afc-combined-ca-bundle\") pod \"memcached-0\" (UID: \"74eb606e-de69-4502-b07d-e11628d32afc\") " pod="openstack/memcached-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.970788 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/74eb606e-de69-4502-b07d-e11628d32afc-kolla-config\") pod \"memcached-0\" (UID: \"74eb606e-de69-4502-b07d-e11628d32afc\") " pod="openstack/memcached-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.972713 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/74eb606e-de69-4502-b07d-e11628d32afc-config-data\") pod \"memcached-0\" (UID: \"74eb606e-de69-4502-b07d-e11628d32afc\") " pod="openstack/memcached-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.975484 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74eb606e-de69-4502-b07d-e11628d32afc-combined-ca-bundle\") pod \"memcached-0\" (UID: \"74eb606e-de69-4502-b07d-e11628d32afc\") " pod="openstack/memcached-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.984611 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/74eb606e-de69-4502-b07d-e11628d32afc-memcached-tls-certs\") pod \"memcached-0\" (UID: \"74eb606e-de69-4502-b07d-e11628d32afc\") " pod="openstack/memcached-0" Jan 22 07:06:51 crc kubenswrapper[4933]: I0122 07:06:51.991845 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkgfj\" (UniqueName: \"kubernetes.io/projected/74eb606e-de69-4502-b07d-e11628d32afc-kube-api-access-qkgfj\") pod \"memcached-0\" (UID: \"74eb606e-de69-4502-b07d-e11628d32afc\") " pod="openstack/memcached-0" Jan 22 07:06:52 crc kubenswrapper[4933]: I0122 07:06:52.065156 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 22 07:06:52 crc kubenswrapper[4933]: I0122 07:06:52.089155 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 22 07:06:52 crc kubenswrapper[4933]: I0122 07:06:52.213340 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28","Type":"ContainerStarted","Data":"1429dd7777095f7bbc1180ea270b0ac7d5de196f925d714efce8f9139d4829f1"} Jan 22 07:06:52 crc kubenswrapper[4933]: I0122 07:06:52.216107 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7d7f26cd-e0e4-414c-96a2-55ce3f6495af","Type":"ContainerStarted","Data":"2266607e24f05202f51a3f27a6e0f6baddc251331a709fadffe17827154263a7"} Jan 22 07:06:52 crc kubenswrapper[4933]: I0122 07:06:52.553390 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 22 07:06:52 crc kubenswrapper[4933]: W0122 07:06:52.553434 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod74eb606e_de69_4502_b07d_e11628d32afc.slice/crio-89e9ba991a5cd506f75ce4062dc4397d14abd760bb4cfe8c72a9986d06678420 WatchSource:0}: Error finding container 89e9ba991a5cd506f75ce4062dc4397d14abd760bb4cfe8c72a9986d06678420: Status 404 returned error can't find the container with id 89e9ba991a5cd506f75ce4062dc4397d14abd760bb4cfe8c72a9986d06678420 Jan 22 07:06:52 crc kubenswrapper[4933]: I0122 07:06:52.620984 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 22 07:06:52 crc kubenswrapper[4933]: W0122 07:06:52.623958 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcb145d5a_1e4b_43f9_8a29_009a5a89ea2f.slice/crio-341e62eb4c6a49a58898b73fd79c75c768e2a26e98a88622e57b5d19ca218d35 WatchSource:0}: Error finding container 341e62eb4c6a49a58898b73fd79c75c768e2a26e98a88622e57b5d19ca218d35: Status 404 returned error can't find the container with id 341e62eb4c6a49a58898b73fd79c75c768e2a26e98a88622e57b5d19ca218d35 Jan 22 07:06:53 crc kubenswrapper[4933]: I0122 07:06:53.223457 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"74eb606e-de69-4502-b07d-e11628d32afc","Type":"ContainerStarted","Data":"6e5a2e92373ec297cba85b1c3c0d25e274e955444e3c80ce4dc7eeacd844187a"} Jan 22 07:06:53 crc kubenswrapper[4933]: I0122 07:06:53.223802 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 22 07:06:53 crc kubenswrapper[4933]: I0122 07:06:53.223815 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"74eb606e-de69-4502-b07d-e11628d32afc","Type":"ContainerStarted","Data":"89e9ba991a5cd506f75ce4062dc4397d14abd760bb4cfe8c72a9986d06678420"} Jan 22 07:06:53 crc kubenswrapper[4933]: I0122 07:06:53.225668 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f","Type":"ContainerStarted","Data":"4c3a4868c91419b5c0465e9d917771d2e18602aa113e8896aed2f424f09276c7"} Jan 22 07:06:53 crc kubenswrapper[4933]: I0122 07:06:53.225751 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f","Type":"ContainerStarted","Data":"341e62eb4c6a49a58898b73fd79c75c768e2a26e98a88622e57b5d19ca218d35"} Jan 22 07:06:53 crc kubenswrapper[4933]: I0122 07:06:53.263782 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=2.263758773 podStartE2EDuration="2.263758773s" podCreationTimestamp="2026-01-22 07:06:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:06:53.243681056 +0000 UTC m=+4861.080806419" watchObservedRunningTime="2026-01-22 07:06:53.263758773 +0000 UTC m=+4861.100884136" Jan 22 07:06:56 crc kubenswrapper[4933]: I0122 07:06:56.253596 4933 generic.go:334] "Generic (PLEG): container finished" podID="7d7f26cd-e0e4-414c-96a2-55ce3f6495af" containerID="2266607e24f05202f51a3f27a6e0f6baddc251331a709fadffe17827154263a7" exitCode=0 Jan 22 07:06:56 crc kubenswrapper[4933]: I0122 07:06:56.253725 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7d7f26cd-e0e4-414c-96a2-55ce3f6495af","Type":"ContainerDied","Data":"2266607e24f05202f51a3f27a6e0f6baddc251331a709fadffe17827154263a7"} Jan 22 07:06:57 crc kubenswrapper[4933]: I0122 07:06:57.069749 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 22 07:06:57 crc kubenswrapper[4933]: I0122 07:06:57.272195 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"7d7f26cd-e0e4-414c-96a2-55ce3f6495af","Type":"ContainerStarted","Data":"2c7427bed9256ceb7fc38092de7603b06fccfc6f11e16771fa6b7007843abb19"} Jan 22 07:06:57 crc kubenswrapper[4933]: I0122 07:06:57.275716 4933 generic.go:334] "Generic (PLEG): container finished" podID="cb145d5a-1e4b-43f9-8a29-009a5a89ea2f" containerID="4c3a4868c91419b5c0465e9d917771d2e18602aa113e8896aed2f424f09276c7" exitCode=0 Jan 22 07:06:57 crc kubenswrapper[4933]: I0122 07:06:57.275754 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f","Type":"ContainerDied","Data":"4c3a4868c91419b5c0465e9d917771d2e18602aa113e8896aed2f424f09276c7"} Jan 22 07:06:57 crc kubenswrapper[4933]: I0122 07:06:57.300225 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=9.30020504 podStartE2EDuration="9.30020504s" podCreationTimestamp="2026-01-22 07:06:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:06:57.295606758 +0000 UTC m=+4865.132732121" watchObservedRunningTime="2026-01-22 07:06:57.30020504 +0000 UTC m=+4865.137330393" Jan 22 07:06:57 crc kubenswrapper[4933]: I0122 07:06:57.773368 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-95587bc99-h476n" Jan 22 07:06:58 crc kubenswrapper[4933]: I0122 07:06:58.284862 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"cb145d5a-1e4b-43f9-8a29-009a5a89ea2f","Type":"ContainerStarted","Data":"0fad2697640a46572e46680d0d1debbefc0247ddab232a481993089cbf95683d"} Jan 22 07:06:58 crc kubenswrapper[4933]: I0122 07:06:58.292206 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" Jan 22 07:06:58 crc kubenswrapper[4933]: I0122 07:06:58.305696 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=8.305680794 podStartE2EDuration="8.305680794s" podCreationTimestamp="2026-01-22 07:06:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:06:58.302058195 +0000 UTC m=+4866.139183568" watchObservedRunningTime="2026-01-22 07:06:58.305680794 +0000 UTC m=+4866.142806147" Jan 22 07:06:58 crc kubenswrapper[4933]: I0122 07:06:58.355153 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95587bc99-h476n"] Jan 22 07:06:58 crc kubenswrapper[4933]: I0122 07:06:58.355822 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-95587bc99-h476n" podUID="3d7721a2-7381-4d86-b65c-a068329996ac" containerName="dnsmasq-dns" containerID="cri-o://0220a6ad7266c2ac2ddab7d915f1b31cbe6fd360871920e001422f83ff2a20fb" gracePeriod=10 Jan 22 07:06:58 crc kubenswrapper[4933]: I0122 07:06:58.962824 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95587bc99-h476n" Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.083368 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fgqdv\" (UniqueName: \"kubernetes.io/projected/3d7721a2-7381-4d86-b65c-a068329996ac-kube-api-access-fgqdv\") pod \"3d7721a2-7381-4d86-b65c-a068329996ac\" (UID: \"3d7721a2-7381-4d86-b65c-a068329996ac\") " Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.083523 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d7721a2-7381-4d86-b65c-a068329996ac-config\") pod \"3d7721a2-7381-4d86-b65c-a068329996ac\" (UID: \"3d7721a2-7381-4d86-b65c-a068329996ac\") " Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.083600 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d7721a2-7381-4d86-b65c-a068329996ac-dns-svc\") pod \"3d7721a2-7381-4d86-b65c-a068329996ac\" (UID: \"3d7721a2-7381-4d86-b65c-a068329996ac\") " Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.090241 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d7721a2-7381-4d86-b65c-a068329996ac-kube-api-access-fgqdv" (OuterVolumeSpecName: "kube-api-access-fgqdv") pod "3d7721a2-7381-4d86-b65c-a068329996ac" (UID: "3d7721a2-7381-4d86-b65c-a068329996ac"). InnerVolumeSpecName "kube-api-access-fgqdv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.117247 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d7721a2-7381-4d86-b65c-a068329996ac-config" (OuterVolumeSpecName: "config") pod "3d7721a2-7381-4d86-b65c-a068329996ac" (UID: "3d7721a2-7381-4d86-b65c-a068329996ac"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.118423 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d7721a2-7381-4d86-b65c-a068329996ac-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3d7721a2-7381-4d86-b65c-a068329996ac" (UID: "3d7721a2-7381-4d86-b65c-a068329996ac"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.185717 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d7721a2-7381-4d86-b65c-a068329996ac-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.185758 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fgqdv\" (UniqueName: \"kubernetes.io/projected/3d7721a2-7381-4d86-b65c-a068329996ac-kube-api-access-fgqdv\") on node \"crc\" DevicePath \"\"" Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.185769 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d7721a2-7381-4d86-b65c-a068329996ac-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.293479 4933 generic.go:334] "Generic (PLEG): container finished" podID="3d7721a2-7381-4d86-b65c-a068329996ac" containerID="0220a6ad7266c2ac2ddab7d915f1b31cbe6fd360871920e001422f83ff2a20fb" exitCode=0 Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.293538 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95587bc99-h476n" event={"ID":"3d7721a2-7381-4d86-b65c-a068329996ac","Type":"ContainerDied","Data":"0220a6ad7266c2ac2ddab7d915f1b31cbe6fd360871920e001422f83ff2a20fb"} Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.293562 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95587bc99-h476n" event={"ID":"3d7721a2-7381-4d86-b65c-a068329996ac","Type":"ContainerDied","Data":"db1e3d53ef8f806c37706cbb0cae9768d4ac3e3dcdc119fc05238e7fb83d2cc7"} Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.293579 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95587bc99-h476n" Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.293598 4933 scope.go:117] "RemoveContainer" containerID="0220a6ad7266c2ac2ddab7d915f1b31cbe6fd360871920e001422f83ff2a20fb" Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.314508 4933 scope.go:117] "RemoveContainer" containerID="8785bf24cffc0148e40baa8a855c842fee0bb6e7e106055b1441cf0375ebc3ef" Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.331748 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95587bc99-h476n"] Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.335992 4933 scope.go:117] "RemoveContainer" containerID="0220a6ad7266c2ac2ddab7d915f1b31cbe6fd360871920e001422f83ff2a20fb" Jan 22 07:06:59 crc kubenswrapper[4933]: E0122 07:06:59.336485 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0220a6ad7266c2ac2ddab7d915f1b31cbe6fd360871920e001422f83ff2a20fb\": container with ID starting with 0220a6ad7266c2ac2ddab7d915f1b31cbe6fd360871920e001422f83ff2a20fb not found: ID does not exist" containerID="0220a6ad7266c2ac2ddab7d915f1b31cbe6fd360871920e001422f83ff2a20fb" Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.336526 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0220a6ad7266c2ac2ddab7d915f1b31cbe6fd360871920e001422f83ff2a20fb"} err="failed to get container status \"0220a6ad7266c2ac2ddab7d915f1b31cbe6fd360871920e001422f83ff2a20fb\": rpc error: code = NotFound desc = could not find container \"0220a6ad7266c2ac2ddab7d915f1b31cbe6fd360871920e001422f83ff2a20fb\": container with ID starting with 0220a6ad7266c2ac2ddab7d915f1b31cbe6fd360871920e001422f83ff2a20fb not found: ID does not exist" Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.336551 4933 scope.go:117] "RemoveContainer" containerID="8785bf24cffc0148e40baa8a855c842fee0bb6e7e106055b1441cf0375ebc3ef" Jan 22 07:06:59 crc kubenswrapper[4933]: E0122 07:06:59.337109 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8785bf24cffc0148e40baa8a855c842fee0bb6e7e106055b1441cf0375ebc3ef\": container with ID starting with 8785bf24cffc0148e40baa8a855c842fee0bb6e7e106055b1441cf0375ebc3ef not found: ID does not exist" containerID="8785bf24cffc0148e40baa8a855c842fee0bb6e7e106055b1441cf0375ebc3ef" Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.337148 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8785bf24cffc0148e40baa8a855c842fee0bb6e7e106055b1441cf0375ebc3ef"} err="failed to get container status \"8785bf24cffc0148e40baa8a855c842fee0bb6e7e106055b1441cf0375ebc3ef\": rpc error: code = NotFound desc = could not find container \"8785bf24cffc0148e40baa8a855c842fee0bb6e7e106055b1441cf0375ebc3ef\": container with ID starting with 8785bf24cffc0148e40baa8a855c842fee0bb6e7e106055b1441cf0375ebc3ef not found: ID does not exist" Jan 22 07:06:59 crc kubenswrapper[4933]: I0122 07:06:59.338314 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-95587bc99-h476n"] Jan 22 07:07:00 crc kubenswrapper[4933]: I0122 07:07:00.368699 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 22 07:07:00 crc kubenswrapper[4933]: I0122 07:07:00.369067 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 22 07:07:00 crc kubenswrapper[4933]: I0122 07:07:00.500774 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d7721a2-7381-4d86-b65c-a068329996ac" path="/var/lib/kubelet/pods/3d7721a2-7381-4d86-b65c-a068329996ac/volumes" Jan 22 07:07:02 crc kubenswrapper[4933]: I0122 07:07:02.089883 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 22 07:07:02 crc kubenswrapper[4933]: I0122 07:07:02.089940 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 22 07:07:02 crc kubenswrapper[4933]: I0122 07:07:02.495931 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:07:02 crc kubenswrapper[4933]: E0122 07:07:02.496228 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:07:02 crc kubenswrapper[4933]: I0122 07:07:02.704176 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 22 07:07:02 crc kubenswrapper[4933]: I0122 07:07:02.777018 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 22 07:07:03 crc kubenswrapper[4933]: I0122 07:07:03.051619 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 22 07:07:03 crc kubenswrapper[4933]: I0122 07:07:03.132651 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 22 07:07:08 crc kubenswrapper[4933]: I0122 07:07:08.834018 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-zlqqp"] Jan 22 07:07:08 crc kubenswrapper[4933]: E0122 07:07:08.835205 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d7721a2-7381-4d86-b65c-a068329996ac" containerName="init" Jan 22 07:07:08 crc kubenswrapper[4933]: I0122 07:07:08.835231 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d7721a2-7381-4d86-b65c-a068329996ac" containerName="init" Jan 22 07:07:08 crc kubenswrapper[4933]: E0122 07:07:08.835249 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d7721a2-7381-4d86-b65c-a068329996ac" containerName="dnsmasq-dns" Jan 22 07:07:08 crc kubenswrapper[4933]: I0122 07:07:08.835261 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d7721a2-7381-4d86-b65c-a068329996ac" containerName="dnsmasq-dns" Jan 22 07:07:08 crc kubenswrapper[4933]: I0122 07:07:08.835576 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d7721a2-7381-4d86-b65c-a068329996ac" containerName="dnsmasq-dns" Jan 22 07:07:08 crc kubenswrapper[4933]: I0122 07:07:08.836407 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zlqqp" Jan 22 07:07:08 crc kubenswrapper[4933]: I0122 07:07:08.840039 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-zlqqp"] Jan 22 07:07:08 crc kubenswrapper[4933]: I0122 07:07:08.895480 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 22 07:07:08 crc kubenswrapper[4933]: I0122 07:07:08.931009 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnw48\" (UniqueName: \"kubernetes.io/projected/d84517d7-8000-4674-854f-7adb800aeb94-kube-api-access-qnw48\") pod \"root-account-create-update-zlqqp\" (UID: \"d84517d7-8000-4674-854f-7adb800aeb94\") " pod="openstack/root-account-create-update-zlqqp" Jan 22 07:07:08 crc kubenswrapper[4933]: I0122 07:07:08.931234 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d84517d7-8000-4674-854f-7adb800aeb94-operator-scripts\") pod \"root-account-create-update-zlqqp\" (UID: \"d84517d7-8000-4674-854f-7adb800aeb94\") " pod="openstack/root-account-create-update-zlqqp" Jan 22 07:07:09 crc kubenswrapper[4933]: I0122 07:07:09.032159 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d84517d7-8000-4674-854f-7adb800aeb94-operator-scripts\") pod \"root-account-create-update-zlqqp\" (UID: \"d84517d7-8000-4674-854f-7adb800aeb94\") " pod="openstack/root-account-create-update-zlqqp" Jan 22 07:07:09 crc kubenswrapper[4933]: I0122 07:07:09.032218 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnw48\" (UniqueName: \"kubernetes.io/projected/d84517d7-8000-4674-854f-7adb800aeb94-kube-api-access-qnw48\") pod \"root-account-create-update-zlqqp\" (UID: \"d84517d7-8000-4674-854f-7adb800aeb94\") " pod="openstack/root-account-create-update-zlqqp" Jan 22 07:07:09 crc kubenswrapper[4933]: I0122 07:07:09.033054 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d84517d7-8000-4674-854f-7adb800aeb94-operator-scripts\") pod \"root-account-create-update-zlqqp\" (UID: \"d84517d7-8000-4674-854f-7adb800aeb94\") " pod="openstack/root-account-create-update-zlqqp" Jan 22 07:07:09 crc kubenswrapper[4933]: I0122 07:07:09.056919 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnw48\" (UniqueName: \"kubernetes.io/projected/d84517d7-8000-4674-854f-7adb800aeb94-kube-api-access-qnw48\") pod \"root-account-create-update-zlqqp\" (UID: \"d84517d7-8000-4674-854f-7adb800aeb94\") " pod="openstack/root-account-create-update-zlqqp" Jan 22 07:07:09 crc kubenswrapper[4933]: I0122 07:07:09.227578 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zlqqp" Jan 22 07:07:09 crc kubenswrapper[4933]: I0122 07:07:09.703802 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-zlqqp"] Jan 22 07:07:10 crc kubenswrapper[4933]: I0122 07:07:10.388151 4933 generic.go:334] "Generic (PLEG): container finished" podID="d84517d7-8000-4674-854f-7adb800aeb94" containerID="5381aaf25fa5616528f037f82beab498d79d2050b61e79dc53b5e8b38779d6b0" exitCode=0 Jan 22 07:07:10 crc kubenswrapper[4933]: I0122 07:07:10.388238 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-zlqqp" event={"ID":"d84517d7-8000-4674-854f-7adb800aeb94","Type":"ContainerDied","Data":"5381aaf25fa5616528f037f82beab498d79d2050b61e79dc53b5e8b38779d6b0"} Jan 22 07:07:10 crc kubenswrapper[4933]: I0122 07:07:10.388482 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-zlqqp" event={"ID":"d84517d7-8000-4674-854f-7adb800aeb94","Type":"ContainerStarted","Data":"d4ad8f50ae7c54ff5957805458e60a1f454a92985c01630990a7fad5a3e8fa44"} Jan 22 07:07:11 crc kubenswrapper[4933]: I0122 07:07:11.750676 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zlqqp" Jan 22 07:07:11 crc kubenswrapper[4933]: I0122 07:07:11.875335 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qnw48\" (UniqueName: \"kubernetes.io/projected/d84517d7-8000-4674-854f-7adb800aeb94-kube-api-access-qnw48\") pod \"d84517d7-8000-4674-854f-7adb800aeb94\" (UID: \"d84517d7-8000-4674-854f-7adb800aeb94\") " Jan 22 07:07:11 crc kubenswrapper[4933]: I0122 07:07:11.875551 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d84517d7-8000-4674-854f-7adb800aeb94-operator-scripts\") pod \"d84517d7-8000-4674-854f-7adb800aeb94\" (UID: \"d84517d7-8000-4674-854f-7adb800aeb94\") " Jan 22 07:07:11 crc kubenswrapper[4933]: I0122 07:07:11.876104 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d84517d7-8000-4674-854f-7adb800aeb94-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d84517d7-8000-4674-854f-7adb800aeb94" (UID: "d84517d7-8000-4674-854f-7adb800aeb94"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:07:11 crc kubenswrapper[4933]: I0122 07:07:11.880744 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d84517d7-8000-4674-854f-7adb800aeb94-kube-api-access-qnw48" (OuterVolumeSpecName: "kube-api-access-qnw48") pod "d84517d7-8000-4674-854f-7adb800aeb94" (UID: "d84517d7-8000-4674-854f-7adb800aeb94"). InnerVolumeSpecName "kube-api-access-qnw48". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:07:11 crc kubenswrapper[4933]: I0122 07:07:11.977475 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qnw48\" (UniqueName: \"kubernetes.io/projected/d84517d7-8000-4674-854f-7adb800aeb94-kube-api-access-qnw48\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:11 crc kubenswrapper[4933]: I0122 07:07:11.977524 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d84517d7-8000-4674-854f-7adb800aeb94-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:12 crc kubenswrapper[4933]: I0122 07:07:12.406371 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-zlqqp" event={"ID":"d84517d7-8000-4674-854f-7adb800aeb94","Type":"ContainerDied","Data":"d4ad8f50ae7c54ff5957805458e60a1f454a92985c01630990a7fad5a3e8fa44"} Jan 22 07:07:12 crc kubenswrapper[4933]: I0122 07:07:12.406417 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d4ad8f50ae7c54ff5957805458e60a1f454a92985c01630990a7fad5a3e8fa44" Jan 22 07:07:12 crc kubenswrapper[4933]: I0122 07:07:12.406475 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-zlqqp" Jan 22 07:07:15 crc kubenswrapper[4933]: I0122 07:07:15.451041 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-zlqqp"] Jan 22 07:07:15 crc kubenswrapper[4933]: I0122 07:07:15.458455 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-zlqqp"] Jan 22 07:07:16 crc kubenswrapper[4933]: I0122 07:07:16.506626 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d84517d7-8000-4674-854f-7adb800aeb94" path="/var/lib/kubelet/pods/d84517d7-8000-4674-854f-7adb800aeb94/volumes" Jan 22 07:07:17 crc kubenswrapper[4933]: I0122 07:07:17.491291 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:07:18 crc kubenswrapper[4933]: I0122 07:07:18.456818 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"95f0ca789178e77053405c316da5ad73a9ba931191b3cf740ca8ae2078616f25"} Jan 22 07:07:20 crc kubenswrapper[4933]: I0122 07:07:20.457642 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-8wnkm"] Jan 22 07:07:20 crc kubenswrapper[4933]: E0122 07:07:20.458349 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d84517d7-8000-4674-854f-7adb800aeb94" containerName="mariadb-account-create-update" Jan 22 07:07:20 crc kubenswrapper[4933]: I0122 07:07:20.458364 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d84517d7-8000-4674-854f-7adb800aeb94" containerName="mariadb-account-create-update" Jan 22 07:07:20 crc kubenswrapper[4933]: I0122 07:07:20.458544 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="d84517d7-8000-4674-854f-7adb800aeb94" containerName="mariadb-account-create-update" Jan 22 07:07:20 crc kubenswrapper[4933]: I0122 07:07:20.459165 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8wnkm" Jan 22 07:07:20 crc kubenswrapper[4933]: I0122 07:07:20.461400 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 22 07:07:20 crc kubenswrapper[4933]: I0122 07:07:20.464345 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-8wnkm"] Jan 22 07:07:20 crc kubenswrapper[4933]: I0122 07:07:20.630946 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f5b934c-9efe-4baa-8eb9-993e1539d601-operator-scripts\") pod \"root-account-create-update-8wnkm\" (UID: \"6f5b934c-9efe-4baa-8eb9-993e1539d601\") " pod="openstack/root-account-create-update-8wnkm" Jan 22 07:07:20 crc kubenswrapper[4933]: I0122 07:07:20.631406 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnwlx\" (UniqueName: \"kubernetes.io/projected/6f5b934c-9efe-4baa-8eb9-993e1539d601-kube-api-access-rnwlx\") pod \"root-account-create-update-8wnkm\" (UID: \"6f5b934c-9efe-4baa-8eb9-993e1539d601\") " pod="openstack/root-account-create-update-8wnkm" Jan 22 07:07:20 crc kubenswrapper[4933]: I0122 07:07:20.732286 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f5b934c-9efe-4baa-8eb9-993e1539d601-operator-scripts\") pod \"root-account-create-update-8wnkm\" (UID: \"6f5b934c-9efe-4baa-8eb9-993e1539d601\") " pod="openstack/root-account-create-update-8wnkm" Jan 22 07:07:20 crc kubenswrapper[4933]: I0122 07:07:20.732332 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnwlx\" (UniqueName: \"kubernetes.io/projected/6f5b934c-9efe-4baa-8eb9-993e1539d601-kube-api-access-rnwlx\") pod \"root-account-create-update-8wnkm\" (UID: \"6f5b934c-9efe-4baa-8eb9-993e1539d601\") " pod="openstack/root-account-create-update-8wnkm" Jan 22 07:07:20 crc kubenswrapper[4933]: I0122 07:07:20.733257 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f5b934c-9efe-4baa-8eb9-993e1539d601-operator-scripts\") pod \"root-account-create-update-8wnkm\" (UID: \"6f5b934c-9efe-4baa-8eb9-993e1539d601\") " pod="openstack/root-account-create-update-8wnkm" Jan 22 07:07:20 crc kubenswrapper[4933]: I0122 07:07:20.762575 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnwlx\" (UniqueName: \"kubernetes.io/projected/6f5b934c-9efe-4baa-8eb9-993e1539d601-kube-api-access-rnwlx\") pod \"root-account-create-update-8wnkm\" (UID: \"6f5b934c-9efe-4baa-8eb9-993e1539d601\") " pod="openstack/root-account-create-update-8wnkm" Jan 22 07:07:20 crc kubenswrapper[4933]: I0122 07:07:20.774242 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8wnkm" Jan 22 07:07:21 crc kubenswrapper[4933]: I0122 07:07:21.229960 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-8wnkm"] Jan 22 07:07:21 crc kubenswrapper[4933]: I0122 07:07:21.476907 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-8wnkm" event={"ID":"6f5b934c-9efe-4baa-8eb9-993e1539d601","Type":"ContainerStarted","Data":"e6ce589238bebb9b42d774a4092b46b24aa2d1bab9aec7ef5f3f7ebb4c1bd621"} Jan 22 07:07:21 crc kubenswrapper[4933]: I0122 07:07:21.476952 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-8wnkm" event={"ID":"6f5b934c-9efe-4baa-8eb9-993e1539d601","Type":"ContainerStarted","Data":"1508bc6aac75e98ab378319f64ca633583e5d20fd642e14d73d183d5b3299c30"} Jan 22 07:07:21 crc kubenswrapper[4933]: I0122 07:07:21.497850 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-8wnkm" podStartSLOduration=1.497829718 podStartE2EDuration="1.497829718s" podCreationTimestamp="2026-01-22 07:07:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:07:21.492484348 +0000 UTC m=+4889.329609721" watchObservedRunningTime="2026-01-22 07:07:21.497829718 +0000 UTC m=+4889.334955071" Jan 22 07:07:22 crc kubenswrapper[4933]: I0122 07:07:22.484093 4933 generic.go:334] "Generic (PLEG): container finished" podID="6f5b934c-9efe-4baa-8eb9-993e1539d601" containerID="e6ce589238bebb9b42d774a4092b46b24aa2d1bab9aec7ef5f3f7ebb4c1bd621" exitCode=0 Jan 22 07:07:22 crc kubenswrapper[4933]: I0122 07:07:22.484161 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-8wnkm" event={"ID":"6f5b934c-9efe-4baa-8eb9-993e1539d601","Type":"ContainerDied","Data":"e6ce589238bebb9b42d774a4092b46b24aa2d1bab9aec7ef5f3f7ebb4c1bd621"} Jan 22 07:07:23 crc kubenswrapper[4933]: I0122 07:07:23.492622 4933 generic.go:334] "Generic (PLEG): container finished" podID="e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" containerID="6bcb2eb32e3438f433faa55236d648ae8199e7f276cc4eb1a18b21f08a68455b" exitCode=0 Jan 22 07:07:23 crc kubenswrapper[4933]: I0122 07:07:23.492762 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806","Type":"ContainerDied","Data":"6bcb2eb32e3438f433faa55236d648ae8199e7f276cc4eb1a18b21f08a68455b"} Jan 22 07:07:23 crc kubenswrapper[4933]: I0122 07:07:23.494106 4933 generic.go:334] "Generic (PLEG): container finished" podID="5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" containerID="1429dd7777095f7bbc1180ea270b0ac7d5de196f925d714efce8f9139d4829f1" exitCode=0 Jan 22 07:07:23 crc kubenswrapper[4933]: I0122 07:07:23.494129 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28","Type":"ContainerDied","Data":"1429dd7777095f7bbc1180ea270b0ac7d5de196f925d714efce8f9139d4829f1"} Jan 22 07:07:23 crc kubenswrapper[4933]: I0122 07:07:23.778437 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8wnkm" Jan 22 07:07:23 crc kubenswrapper[4933]: I0122 07:07:23.879038 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f5b934c-9efe-4baa-8eb9-993e1539d601-operator-scripts\") pod \"6f5b934c-9efe-4baa-8eb9-993e1539d601\" (UID: \"6f5b934c-9efe-4baa-8eb9-993e1539d601\") " Jan 22 07:07:23 crc kubenswrapper[4933]: I0122 07:07:23.879146 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnwlx\" (UniqueName: \"kubernetes.io/projected/6f5b934c-9efe-4baa-8eb9-993e1539d601-kube-api-access-rnwlx\") pod \"6f5b934c-9efe-4baa-8eb9-993e1539d601\" (UID: \"6f5b934c-9efe-4baa-8eb9-993e1539d601\") " Jan 22 07:07:23 crc kubenswrapper[4933]: I0122 07:07:23.879584 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f5b934c-9efe-4baa-8eb9-993e1539d601-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6f5b934c-9efe-4baa-8eb9-993e1539d601" (UID: "6f5b934c-9efe-4baa-8eb9-993e1539d601"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:07:23 crc kubenswrapper[4933]: I0122 07:07:23.882860 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f5b934c-9efe-4baa-8eb9-993e1539d601-kube-api-access-rnwlx" (OuterVolumeSpecName: "kube-api-access-rnwlx") pod "6f5b934c-9efe-4baa-8eb9-993e1539d601" (UID: "6f5b934c-9efe-4baa-8eb9-993e1539d601"). InnerVolumeSpecName "kube-api-access-rnwlx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:07:23 crc kubenswrapper[4933]: I0122 07:07:23.981244 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f5b934c-9efe-4baa-8eb9-993e1539d601-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:23 crc kubenswrapper[4933]: I0122 07:07:23.981278 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnwlx\" (UniqueName: \"kubernetes.io/projected/6f5b934c-9efe-4baa-8eb9-993e1539d601-kube-api-access-rnwlx\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:24 crc kubenswrapper[4933]: I0122 07:07:24.512702 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8wnkm" Jan 22 07:07:24 crc kubenswrapper[4933]: I0122 07:07:24.518823 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-8wnkm" event={"ID":"6f5b934c-9efe-4baa-8eb9-993e1539d601","Type":"ContainerDied","Data":"1508bc6aac75e98ab378319f64ca633583e5d20fd642e14d73d183d5b3299c30"} Jan 22 07:07:24 crc kubenswrapper[4933]: I0122 07:07:24.518913 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1508bc6aac75e98ab378319f64ca633583e5d20fd642e14d73d183d5b3299c30" Jan 22 07:07:24 crc kubenswrapper[4933]: I0122 07:07:24.538907 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28","Type":"ContainerStarted","Data":"a74a8d6129db9e83ba8ae182876ac5c545055b8271f7ddce2db6f0b1fa8883d8"} Jan 22 07:07:24 crc kubenswrapper[4933]: I0122 07:07:24.540543 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:24 crc kubenswrapper[4933]: I0122 07:07:24.548896 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806","Type":"ContainerStarted","Data":"bfe85a79e7c4ffb3129fa161e7232b3c7fa180a757708baa4fa72f7ccce387af"} Jan 22 07:07:24 crc kubenswrapper[4933]: I0122 07:07:24.549790 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 22 07:07:24 crc kubenswrapper[4933]: I0122 07:07:24.574728 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=36.574704835 podStartE2EDuration="36.574704835s" podCreationTimestamp="2026-01-22 07:06:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:07:24.571777844 +0000 UTC m=+4892.408903217" watchObservedRunningTime="2026-01-22 07:07:24.574704835 +0000 UTC m=+4892.411830188" Jan 22 07:07:38 crc kubenswrapper[4933]: I0122 07:07:38.907308 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 22 07:07:38 crc kubenswrapper[4933]: I0122 07:07:38.945875 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=51.9458569 podStartE2EDuration="51.9458569s" podCreationTimestamp="2026-01-22 07:06:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:07:24.600540663 +0000 UTC m=+4892.437666026" watchObservedRunningTime="2026-01-22 07:07:38.9458569 +0000 UTC m=+4906.782982243" Jan 22 07:07:39 crc kubenswrapper[4933]: I0122 07:07:39.699311 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:43 crc kubenswrapper[4933]: I0122 07:07:43.917842 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-699964fbc-sf4ct"] Jan 22 07:07:43 crc kubenswrapper[4933]: E0122 07:07:43.918623 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f5b934c-9efe-4baa-8eb9-993e1539d601" containerName="mariadb-account-create-update" Jan 22 07:07:43 crc kubenswrapper[4933]: I0122 07:07:43.918636 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f5b934c-9efe-4baa-8eb9-993e1539d601" containerName="mariadb-account-create-update" Jan 22 07:07:43 crc kubenswrapper[4933]: I0122 07:07:43.918782 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f5b934c-9efe-4baa-8eb9-993e1539d601" containerName="mariadb-account-create-update" Jan 22 07:07:43 crc kubenswrapper[4933]: I0122 07:07:43.919485 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699964fbc-sf4ct" Jan 22 07:07:43 crc kubenswrapper[4933]: I0122 07:07:43.930322 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-sf4ct"] Jan 22 07:07:43 crc kubenswrapper[4933]: I0122 07:07:43.990640 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2kfs\" (UniqueName: \"kubernetes.io/projected/31f1a4e0-2ca2-41ee-8c42-48a7144e1e58-kube-api-access-n2kfs\") pod \"dnsmasq-dns-699964fbc-sf4ct\" (UID: \"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58\") " pod="openstack/dnsmasq-dns-699964fbc-sf4ct" Jan 22 07:07:43 crc kubenswrapper[4933]: I0122 07:07:43.990697 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31f1a4e0-2ca2-41ee-8c42-48a7144e1e58-config\") pod \"dnsmasq-dns-699964fbc-sf4ct\" (UID: \"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58\") " pod="openstack/dnsmasq-dns-699964fbc-sf4ct" Jan 22 07:07:43 crc kubenswrapper[4933]: I0122 07:07:43.990773 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31f1a4e0-2ca2-41ee-8c42-48a7144e1e58-dns-svc\") pod \"dnsmasq-dns-699964fbc-sf4ct\" (UID: \"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58\") " pod="openstack/dnsmasq-dns-699964fbc-sf4ct" Jan 22 07:07:44 crc kubenswrapper[4933]: I0122 07:07:44.092362 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31f1a4e0-2ca2-41ee-8c42-48a7144e1e58-dns-svc\") pod \"dnsmasq-dns-699964fbc-sf4ct\" (UID: \"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58\") " pod="openstack/dnsmasq-dns-699964fbc-sf4ct" Jan 22 07:07:44 crc kubenswrapper[4933]: I0122 07:07:44.092465 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2kfs\" (UniqueName: \"kubernetes.io/projected/31f1a4e0-2ca2-41ee-8c42-48a7144e1e58-kube-api-access-n2kfs\") pod \"dnsmasq-dns-699964fbc-sf4ct\" (UID: \"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58\") " pod="openstack/dnsmasq-dns-699964fbc-sf4ct" Jan 22 07:07:44 crc kubenswrapper[4933]: I0122 07:07:44.092497 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31f1a4e0-2ca2-41ee-8c42-48a7144e1e58-config\") pod \"dnsmasq-dns-699964fbc-sf4ct\" (UID: \"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58\") " pod="openstack/dnsmasq-dns-699964fbc-sf4ct" Jan 22 07:07:44 crc kubenswrapper[4933]: I0122 07:07:44.093357 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31f1a4e0-2ca2-41ee-8c42-48a7144e1e58-config\") pod \"dnsmasq-dns-699964fbc-sf4ct\" (UID: \"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58\") " pod="openstack/dnsmasq-dns-699964fbc-sf4ct" Jan 22 07:07:44 crc kubenswrapper[4933]: I0122 07:07:44.093391 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31f1a4e0-2ca2-41ee-8c42-48a7144e1e58-dns-svc\") pod \"dnsmasq-dns-699964fbc-sf4ct\" (UID: \"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58\") " pod="openstack/dnsmasq-dns-699964fbc-sf4ct" Jan 22 07:07:44 crc kubenswrapper[4933]: I0122 07:07:44.111305 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2kfs\" (UniqueName: \"kubernetes.io/projected/31f1a4e0-2ca2-41ee-8c42-48a7144e1e58-kube-api-access-n2kfs\") pod \"dnsmasq-dns-699964fbc-sf4ct\" (UID: \"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58\") " pod="openstack/dnsmasq-dns-699964fbc-sf4ct" Jan 22 07:07:44 crc kubenswrapper[4933]: I0122 07:07:44.261748 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699964fbc-sf4ct" Jan 22 07:07:44 crc kubenswrapper[4933]: I0122 07:07:44.698779 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-sf4ct"] Jan 22 07:07:44 crc kubenswrapper[4933]: W0122 07:07:44.711829 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod31f1a4e0_2ca2_41ee_8c42_48a7144e1e58.slice/crio-0c028689b1970118dc2cd2530070d8f8d79d501216231c20d5f1d8424f7fbbda WatchSource:0}: Error finding container 0c028689b1970118dc2cd2530070d8f8d79d501216231c20d5f1d8424f7fbbda: Status 404 returned error can't find the container with id 0c028689b1970118dc2cd2530070d8f8d79d501216231c20d5f1d8424f7fbbda Jan 22 07:07:44 crc kubenswrapper[4933]: I0122 07:07:44.764980 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 07:07:45 crc kubenswrapper[4933]: I0122 07:07:45.452000 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 07:07:45 crc kubenswrapper[4933]: I0122 07:07:45.694971 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-sf4ct" event={"ID":"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58","Type":"ContainerStarted","Data":"0c028689b1970118dc2cd2530070d8f8d79d501216231c20d5f1d8424f7fbbda"} Jan 22 07:07:46 crc kubenswrapper[4933]: I0122 07:07:46.701725 4933 generic.go:334] "Generic (PLEG): container finished" podID="31f1a4e0-2ca2-41ee-8c42-48a7144e1e58" containerID="6b8a00340b9a4cd0e1a3542db801f746b4ae971d4e66511d861615eaff3b4ea5" exitCode=0 Jan 22 07:07:46 crc kubenswrapper[4933]: I0122 07:07:46.702074 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-sf4ct" event={"ID":"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58","Type":"ContainerDied","Data":"6b8a00340b9a4cd0e1a3542db801f746b4ae971d4e66511d861615eaff3b4ea5"} Jan 22 07:07:47 crc kubenswrapper[4933]: I0122 07:07:47.718600 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-sf4ct" event={"ID":"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58","Type":"ContainerStarted","Data":"1c718a4131b53ab8a1b84949a51ecaa237e7f4c4fba0d82248229b7b4a686274"} Jan 22 07:07:47 crc kubenswrapper[4933]: I0122 07:07:47.719986 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-699964fbc-sf4ct" Jan 22 07:07:47 crc kubenswrapper[4933]: I0122 07:07:47.739016 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-699964fbc-sf4ct" podStartSLOduration=4.73899394 podStartE2EDuration="4.73899394s" podCreationTimestamp="2026-01-22 07:07:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:07:47.735345342 +0000 UTC m=+4915.572470695" watchObservedRunningTime="2026-01-22 07:07:47.73899394 +0000 UTC m=+4915.576119293" Jan 22 07:07:49 crc kubenswrapper[4933]: I0122 07:07:49.000333 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" containerName="rabbitmq" containerID="cri-o://bfe85a79e7c4ffb3129fa161e7232b3c7fa180a757708baa4fa72f7ccce387af" gracePeriod=604796 Jan 22 07:07:49 crc kubenswrapper[4933]: I0122 07:07:49.874118 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" containerName="rabbitmq" containerID="cri-o://a74a8d6129db9e83ba8ae182876ac5c545055b8271f7ddce2db6f0b1fa8883d8" gracePeriod=604796 Jan 22 07:07:54 crc kubenswrapper[4933]: I0122 07:07:54.264265 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-699964fbc-sf4ct" Jan 22 07:07:54 crc kubenswrapper[4933]: I0122 07:07:54.320833 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-2fl57"] Jan 22 07:07:54 crc kubenswrapper[4933]: I0122 07:07:54.321455 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" podUID="5235dda8-909b-415c-b226-b0376d439555" containerName="dnsmasq-dns" containerID="cri-o://fe82a5213f67e01e95def3bb6d552cc7cd565448c238f9cdacc3041d48404c95" gracePeriod=10 Jan 22 07:07:54 crc kubenswrapper[4933]: I0122 07:07:54.769889 4933 generic.go:334] "Generic (PLEG): container finished" podID="5235dda8-909b-415c-b226-b0376d439555" containerID="fe82a5213f67e01e95def3bb6d552cc7cd565448c238f9cdacc3041d48404c95" exitCode=0 Jan 22 07:07:54 crc kubenswrapper[4933]: I0122 07:07:54.769940 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" event={"ID":"5235dda8-909b-415c-b226-b0376d439555","Type":"ContainerDied","Data":"fe82a5213f67e01e95def3bb6d552cc7cd565448c238f9cdacc3041d48404c95"} Jan 22 07:07:54 crc kubenswrapper[4933]: I0122 07:07:54.769968 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" event={"ID":"5235dda8-909b-415c-b226-b0376d439555","Type":"ContainerDied","Data":"b8eb7395daabe677dab67cbc945f1e59763fba22c854a63e0487ddd72bd0b683"} Jan 22 07:07:54 crc kubenswrapper[4933]: I0122 07:07:54.769981 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b8eb7395daabe677dab67cbc945f1e59763fba22c854a63e0487ddd72bd0b683" Jan 22 07:07:54 crc kubenswrapper[4933]: I0122 07:07:54.782939 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" Jan 22 07:07:54 crc kubenswrapper[4933]: I0122 07:07:54.865726 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqwqq\" (UniqueName: \"kubernetes.io/projected/5235dda8-909b-415c-b226-b0376d439555-kube-api-access-nqwqq\") pod \"5235dda8-909b-415c-b226-b0376d439555\" (UID: \"5235dda8-909b-415c-b226-b0376d439555\") " Jan 22 07:07:54 crc kubenswrapper[4933]: I0122 07:07:54.865793 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5235dda8-909b-415c-b226-b0376d439555-dns-svc\") pod \"5235dda8-909b-415c-b226-b0376d439555\" (UID: \"5235dda8-909b-415c-b226-b0376d439555\") " Jan 22 07:07:54 crc kubenswrapper[4933]: I0122 07:07:54.865847 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5235dda8-909b-415c-b226-b0376d439555-config\") pod \"5235dda8-909b-415c-b226-b0376d439555\" (UID: \"5235dda8-909b-415c-b226-b0376d439555\") " Jan 22 07:07:54 crc kubenswrapper[4933]: I0122 07:07:54.870764 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5235dda8-909b-415c-b226-b0376d439555-kube-api-access-nqwqq" (OuterVolumeSpecName: "kube-api-access-nqwqq") pod "5235dda8-909b-415c-b226-b0376d439555" (UID: "5235dda8-909b-415c-b226-b0376d439555"). InnerVolumeSpecName "kube-api-access-nqwqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:07:54 crc kubenswrapper[4933]: I0122 07:07:54.896821 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5235dda8-909b-415c-b226-b0376d439555-config" (OuterVolumeSpecName: "config") pod "5235dda8-909b-415c-b226-b0376d439555" (UID: "5235dda8-909b-415c-b226-b0376d439555"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:07:54 crc kubenswrapper[4933]: I0122 07:07:54.896904 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5235dda8-909b-415c-b226-b0376d439555-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5235dda8-909b-415c-b226-b0376d439555" (UID: "5235dda8-909b-415c-b226-b0376d439555"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:07:54 crc kubenswrapper[4933]: I0122 07:07:54.967021 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5235dda8-909b-415c-b226-b0376d439555-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:54 crc kubenswrapper[4933]: I0122 07:07:54.967052 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqwqq\" (UniqueName: \"kubernetes.io/projected/5235dda8-909b-415c-b226-b0376d439555-kube-api-access-nqwqq\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:54 crc kubenswrapper[4933]: I0122 07:07:54.967063 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5235dda8-909b-415c-b226-b0376d439555-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.779064 4933 generic.go:334] "Generic (PLEG): container finished" podID="e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" containerID="bfe85a79e7c4ffb3129fa161e7232b3c7fa180a757708baa4fa72f7ccce387af" exitCode=0 Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.779117 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806","Type":"ContainerDied","Data":"bfe85a79e7c4ffb3129fa161e7232b3c7fa180a757708baa4fa72f7ccce387af"} Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.779461 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d79f765b5-2fl57" Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.815129 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-2fl57"] Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.820111 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5d79f765b5-2fl57"] Jan 22 07:07:55 crc kubenswrapper[4933]: E0122 07:07:55.827555 4933 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5235dda8_909b_415c_b226_b0376d439555.slice\": RecentStats: unable to find data in memory cache]" Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.884272 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.981720 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\") pod \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.982049 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-plugins\") pod \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.982103 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-confd\") pod \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.982139 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-erlang-cookie-secret\") pod \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.982191 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8n9xj\" (UniqueName: \"kubernetes.io/projected/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-kube-api-access-8n9xj\") pod \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.982209 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-config-data\") pod \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.982231 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-plugins-conf\") pod \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.982254 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-tls\") pod \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.982304 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-pod-info\") pod \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.982357 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-erlang-cookie\") pod \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.982388 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-server-conf\") pod \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.982984 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" (UID: "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.983484 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" (UID: "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.983536 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" (UID: "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.987573 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" (UID: "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.987930 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-kube-api-access-8n9xj" (OuterVolumeSpecName: "kube-api-access-8n9xj") pod "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" (UID: "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806"). InnerVolumeSpecName "kube-api-access-8n9xj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.987989 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" (UID: "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:07:55 crc kubenswrapper[4933]: I0122 07:07:55.996252 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-pod-info" (OuterVolumeSpecName: "pod-info") pod "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" (UID: "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 22 07:07:56 crc kubenswrapper[4933]: E0122 07:07:56.003621 4933 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293 podName:e6f8ecee-ccdc-44d8-a781-2d8e8cb16806 nodeName:}" failed. No retries permitted until 2026-01-22 07:07:56.503599798 +0000 UTC m=+4924.340725151 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "persistence" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293") pod "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" (UID: "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806") : kubernetes.io/csi: Unmounter.TearDownAt failed: rpc error: code = Unknown desc = check target path: could not get consistent content of /proc/mounts after 3 attempts Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.003954 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-config-data" (OuterVolumeSpecName: "config-data") pod "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" (UID: "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.022680 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-server-conf" (OuterVolumeSpecName: "server-conf") pod "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" (UID: "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.054293 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" (UID: "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.084366 4933 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.084405 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8n9xj\" (UniqueName: \"kubernetes.io/projected/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-kube-api-access-8n9xj\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.084418 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.084430 4933 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.084442 4933 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.084453 4933 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-pod-info\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.084465 4933 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.084477 4933 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-server-conf\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.084488 4933 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.084499 4933 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.449168 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.489679 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-pod-info\") pod \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.489875 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-server-conf\") pod \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.489909 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-plugins\") pod \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.489975 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-tls\") pod \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.490020 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-erlang-cookie-secret\") pod \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.490045 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-config-data\") pod \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.490068 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-confd\") pod \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.490190 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\") pod \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.490266 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-plugins-conf\") pod \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.490300 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hk7qq\" (UniqueName: \"kubernetes.io/projected/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-kube-api-access-hk7qq\") pod \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.490331 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-erlang-cookie\") pod \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\" (UID: \"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28\") " Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.491102 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" (UID: "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.492589 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" (UID: "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.494945 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" (UID: "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.500197 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-pod-info" (OuterVolumeSpecName: "pod-info") pod "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" (UID: "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.503391 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" (UID: "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.503580 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" (UID: "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.506484 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-kube-api-access-hk7qq" (OuterVolumeSpecName: "kube-api-access-hk7qq") pod "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" (UID: "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28"). InnerVolumeSpecName "kube-api-access-hk7qq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.508301 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5235dda8-909b-415c-b226-b0376d439555" path="/var/lib/kubelet/pods/5235dda8-909b-415c-b226-b0376d439555/volumes" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.528940 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161" (OuterVolumeSpecName: "persistence") pod "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" (UID: "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28"). InnerVolumeSpecName "pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.538281 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-config-data" (OuterVolumeSpecName: "config-data") pod "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" (UID: "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.558067 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-server-conf" (OuterVolumeSpecName: "server-conf") pod "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" (UID: "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.588518 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" (UID: "5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.592071 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\") pod \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\" (UID: \"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806\") " Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.592432 4933 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-server-conf\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.592450 4933 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.592462 4933 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.592471 4933 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.592481 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.592489 4933 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.592513 4933 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\") on node \"crc\" " Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.592522 4933 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.592533 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hk7qq\" (UniqueName: \"kubernetes.io/projected/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-kube-api-access-hk7qq\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.592544 4933 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.592552 4933 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28-pod-info\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.603330 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293" (OuterVolumeSpecName: "persistence") pod "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" (UID: "e6f8ecee-ccdc-44d8-a781-2d8e8cb16806"). InnerVolumeSpecName "pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.614504 4933 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.614832 4933 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161") on node "crc" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.693788 4933 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\") on node \"crc\" " Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.693829 4933 reconciler_common.go:293] "Volume detached for volume \"pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.707785 4933 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.707935 4933 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293") on node "crc" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.788645 4933 generic.go:334] "Generic (PLEG): container finished" podID="5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" containerID="a74a8d6129db9e83ba8ae182876ac5c545055b8271f7ddce2db6f0b1fa8883d8" exitCode=0 Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.788731 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.788734 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28","Type":"ContainerDied","Data":"a74a8d6129db9e83ba8ae182876ac5c545055b8271f7ddce2db6f0b1fa8883d8"} Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.788854 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28","Type":"ContainerDied","Data":"98c7ae00f5d21e73b35646042184c5ebb1fa29996e763be15042ea933e314a24"} Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.788875 4933 scope.go:117] "RemoveContainer" containerID="a74a8d6129db9e83ba8ae182876ac5c545055b8271f7ddce2db6f0b1fa8883d8" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.791903 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"e6f8ecee-ccdc-44d8-a781-2d8e8cb16806","Type":"ContainerDied","Data":"371542dfb79fc90dee567a599c8bbc2d8e943c92c2986e3818927494c7be78a3"} Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.792139 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.795062 4933 reconciler_common.go:293] "Volume detached for volume \"pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.820618 4933 scope.go:117] "RemoveContainer" containerID="1429dd7777095f7bbc1180ea270b0ac7d5de196f925d714efce8f9139d4829f1" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.820616 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.839336 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.842826 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.849054 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.860840 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 07:07:56 crc kubenswrapper[4933]: E0122 07:07:56.861256 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5235dda8-909b-415c-b226-b0376d439555" containerName="dnsmasq-dns" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.861308 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="5235dda8-909b-415c-b226-b0376d439555" containerName="dnsmasq-dns" Jan 22 07:07:56 crc kubenswrapper[4933]: E0122 07:07:56.861349 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" containerName="setup-container" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.861358 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" containerName="setup-container" Jan 22 07:07:56 crc kubenswrapper[4933]: E0122 07:07:56.861369 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" containerName="rabbitmq" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.861377 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" containerName="rabbitmq" Jan 22 07:07:56 crc kubenswrapper[4933]: E0122 07:07:56.861389 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5235dda8-909b-415c-b226-b0376d439555" containerName="init" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.861396 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="5235dda8-909b-415c-b226-b0376d439555" containerName="init" Jan 22 07:07:56 crc kubenswrapper[4933]: E0122 07:07:56.861410 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" containerName="setup-container" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.861418 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" containerName="setup-container" Jan 22 07:07:56 crc kubenswrapper[4933]: E0122 07:07:56.861429 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" containerName="rabbitmq" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.861436 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" containerName="rabbitmq" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.861645 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" containerName="rabbitmq" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.861662 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="5235dda8-909b-415c-b226-b0376d439555" containerName="dnsmasq-dns" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.861672 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" containerName="rabbitmq" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.862698 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.867695 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.873134 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.873472 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-8tlnl" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.873536 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.873650 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.873214 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.873254 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.873311 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.880311 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.881520 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.881810 4933 scope.go:117] "RemoveContainer" containerID="a74a8d6129db9e83ba8ae182876ac5c545055b8271f7ddce2db6f0b1fa8883d8" Jan 22 07:07:56 crc kubenswrapper[4933]: E0122 07:07:56.882758 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a74a8d6129db9e83ba8ae182876ac5c545055b8271f7ddce2db6f0b1fa8883d8\": container with ID starting with a74a8d6129db9e83ba8ae182876ac5c545055b8271f7ddce2db6f0b1fa8883d8 not found: ID does not exist" containerID="a74a8d6129db9e83ba8ae182876ac5c545055b8271f7ddce2db6f0b1fa8883d8" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.882817 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a74a8d6129db9e83ba8ae182876ac5c545055b8271f7ddce2db6f0b1fa8883d8"} err="failed to get container status \"a74a8d6129db9e83ba8ae182876ac5c545055b8271f7ddce2db6f0b1fa8883d8\": rpc error: code = NotFound desc = could not find container \"a74a8d6129db9e83ba8ae182876ac5c545055b8271f7ddce2db6f0b1fa8883d8\": container with ID starting with a74a8d6129db9e83ba8ae182876ac5c545055b8271f7ddce2db6f0b1fa8883d8 not found: ID does not exist" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.882842 4933 scope.go:117] "RemoveContainer" containerID="1429dd7777095f7bbc1180ea270b0ac7d5de196f925d714efce8f9139d4829f1" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.883388 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.883695 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 22 07:07:56 crc kubenswrapper[4933]: E0122 07:07:56.883858 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1429dd7777095f7bbc1180ea270b0ac7d5de196f925d714efce8f9139d4829f1\": container with ID starting with 1429dd7777095f7bbc1180ea270b0ac7d5de196f925d714efce8f9139d4829f1 not found: ID does not exist" containerID="1429dd7777095f7bbc1180ea270b0ac7d5de196f925d714efce8f9139d4829f1" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.883917 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1429dd7777095f7bbc1180ea270b0ac7d5de196f925d714efce8f9139d4829f1"} err="failed to get container status \"1429dd7777095f7bbc1180ea270b0ac7d5de196f925d714efce8f9139d4829f1\": rpc error: code = NotFound desc = could not find container \"1429dd7777095f7bbc1180ea270b0ac7d5de196f925d714efce8f9139d4829f1\": container with ID starting with 1429dd7777095f7bbc1180ea270b0ac7d5de196f925d714efce8f9139d4829f1 not found: ID does not exist" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.883952 4933 scope.go:117] "RemoveContainer" containerID="bfe85a79e7c4ffb3129fa161e7232b3c7fa180a757708baa4fa72f7ccce387af" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.883887 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.883919 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.883939 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.883987 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-gk2f5" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.884291 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.896280 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/200c4924-621c-4dbc-8b31-626f46b61d15-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.896337 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.896359 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/200c4924-621c-4dbc-8b31-626f46b61d15-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.896388 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/200c4924-621c-4dbc-8b31-626f46b61d15-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.896427 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/200c4924-621c-4dbc-8b31-626f46b61d15-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.896538 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/200c4924-621c-4dbc-8b31-626f46b61d15-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.896640 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/200c4924-621c-4dbc-8b31-626f46b61d15-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.896694 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/200c4924-621c-4dbc-8b31-626f46b61d15-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.896733 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/200c4924-621c-4dbc-8b31-626f46b61d15-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.896780 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvrcz\" (UniqueName: \"kubernetes.io/projected/200c4924-621c-4dbc-8b31-626f46b61d15-kube-api-access-dvrcz\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.896835 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/200c4924-621c-4dbc-8b31-626f46b61d15-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.920319 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.923393 4933 scope.go:117] "RemoveContainer" containerID="6bcb2eb32e3438f433faa55236d648ae8199e7f276cc4eb1a18b21f08a68455b" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997477 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/200c4924-621c-4dbc-8b31-626f46b61d15-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997529 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfrjq\" (UniqueName: \"kubernetes.io/projected/c6e1501e-9b96-439d-8f12-26df5455b9d1-kube-api-access-wfrjq\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997553 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/200c4924-621c-4dbc-8b31-626f46b61d15-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997575 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c6e1501e-9b96-439d-8f12-26df5455b9d1-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997596 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvrcz\" (UniqueName: \"kubernetes.io/projected/200c4924-621c-4dbc-8b31-626f46b61d15-kube-api-access-dvrcz\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997619 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/200c4924-621c-4dbc-8b31-626f46b61d15-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997636 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c6e1501e-9b96-439d-8f12-26df5455b9d1-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997655 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c6e1501e-9b96-439d-8f12-26df5455b9d1-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997674 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c6e1501e-9b96-439d-8f12-26df5455b9d1-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997688 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c6e1501e-9b96-439d-8f12-26df5455b9d1-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997712 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/200c4924-621c-4dbc-8b31-626f46b61d15-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997730 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c6e1501e-9b96-439d-8f12-26df5455b9d1-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997751 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997766 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/200c4924-621c-4dbc-8b31-626f46b61d15-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997789 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/200c4924-621c-4dbc-8b31-626f46b61d15-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997817 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/200c4924-621c-4dbc-8b31-626f46b61d15-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997838 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997859 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c6e1501e-9b96-439d-8f12-26df5455b9d1-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997874 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c6e1501e-9b96-439d-8f12-26df5455b9d1-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997904 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/200c4924-621c-4dbc-8b31-626f46b61d15-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997919 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c6e1501e-9b96-439d-8f12-26df5455b9d1-config-data\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.997942 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/200c4924-621c-4dbc-8b31-626f46b61d15-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.998011 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/200c4924-621c-4dbc-8b31-626f46b61d15-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.998789 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/200c4924-621c-4dbc-8b31-626f46b61d15-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:56 crc kubenswrapper[4933]: I0122 07:07:56.998828 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/200c4924-621c-4dbc-8b31-626f46b61d15-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:56.999089 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/200c4924-621c-4dbc-8b31-626f46b61d15-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.000542 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/200c4924-621c-4dbc-8b31-626f46b61d15-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.001667 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/200c4924-621c-4dbc-8b31-626f46b61d15-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.002345 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/200c4924-621c-4dbc-8b31-626f46b61d15-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.002706 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/200c4924-621c-4dbc-8b31-626f46b61d15-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.004547 4933 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.004587 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1aff8d075433c95ba6037a96dcf0081d327d095bb9ffbb2ab80a9de2848fd3e3/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.005549 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/200c4924-621c-4dbc-8b31-626f46b61d15-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.014328 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvrcz\" (UniqueName: \"kubernetes.io/projected/200c4924-621c-4dbc-8b31-626f46b61d15-kube-api-access-dvrcz\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.031403 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5659b60f-f93b-4d5a-a69b-b389e1d2b161\") pod \"rabbitmq-cell1-server-0\" (UID: \"200c4924-621c-4dbc-8b31-626f46b61d15\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.099270 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c6e1501e-9b96-439d-8f12-26df5455b9d1-config-data\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.099359 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfrjq\" (UniqueName: \"kubernetes.io/projected/c6e1501e-9b96-439d-8f12-26df5455b9d1-kube-api-access-wfrjq\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.099389 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c6e1501e-9b96-439d-8f12-26df5455b9d1-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.099422 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c6e1501e-9b96-439d-8f12-26df5455b9d1-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.099446 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c6e1501e-9b96-439d-8f12-26df5455b9d1-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.099466 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c6e1501e-9b96-439d-8f12-26df5455b9d1-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.099485 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c6e1501e-9b96-439d-8f12-26df5455b9d1-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.099515 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c6e1501e-9b96-439d-8f12-26df5455b9d1-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.099559 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.099582 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c6e1501e-9b96-439d-8f12-26df5455b9d1-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.099600 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c6e1501e-9b96-439d-8f12-26df5455b9d1-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.100780 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c6e1501e-9b96-439d-8f12-26df5455b9d1-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.101612 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c6e1501e-9b96-439d-8f12-26df5455b9d1-server-conf\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.101822 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c6e1501e-9b96-439d-8f12-26df5455b9d1-config-data\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.103292 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c6e1501e-9b96-439d-8f12-26df5455b9d1-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.103437 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c6e1501e-9b96-439d-8f12-26df5455b9d1-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.103701 4933 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.103739 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/40a731fe5b6e4e94ff1971f7fab1d8154f20c057332612743110a6ff8d1c2b32/globalmount\"" pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.104905 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c6e1501e-9b96-439d-8f12-26df5455b9d1-pod-info\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.105294 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c6e1501e-9b96-439d-8f12-26df5455b9d1-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.106064 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c6e1501e-9b96-439d-8f12-26df5455b9d1-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.108751 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c6e1501e-9b96-439d-8f12-26df5455b9d1-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.118961 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfrjq\" (UniqueName: \"kubernetes.io/projected/c6e1501e-9b96-439d-8f12-26df5455b9d1-kube-api-access-wfrjq\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.134667 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-25d25dfd-e27f-4f7a-bc88-ad1a44590293\") pod \"rabbitmq-server-0\" (UID: \"c6e1501e-9b96-439d-8f12-26df5455b9d1\") " pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.202237 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.215030 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.627527 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.682194 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.799852 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"200c4924-621c-4dbc-8b31-626f46b61d15","Type":"ContainerStarted","Data":"c6de173dbf53f4780a3b5e28e56c38dfd746a7eabea6f5e8f2eba3824d97ec7a"} Jan 22 07:07:57 crc kubenswrapper[4933]: I0122 07:07:57.801100 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c6e1501e-9b96-439d-8f12-26df5455b9d1","Type":"ContainerStarted","Data":"335ec7cb90d21d7d48ff1ef343871154274ccddff57d7af4b47f0e3e16dd3b0c"} Jan 22 07:07:58 crc kubenswrapper[4933]: I0122 07:07:58.503522 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28" path="/var/lib/kubelet/pods/5eb3100b-df0f-4a1c-9e0e-1fd20cdedf28/volumes" Jan 22 07:07:58 crc kubenswrapper[4933]: I0122 07:07:58.504605 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6f8ecee-ccdc-44d8-a781-2d8e8cb16806" path="/var/lib/kubelet/pods/e6f8ecee-ccdc-44d8-a781-2d8e8cb16806/volumes" Jan 22 07:07:59 crc kubenswrapper[4933]: I0122 07:07:59.823460 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"200c4924-621c-4dbc-8b31-626f46b61d15","Type":"ContainerStarted","Data":"0e56183fe60a5578c33c07f5ceb1534ef8790b21c7b105db2415d890b9fbd4fc"} Jan 22 07:07:59 crc kubenswrapper[4933]: I0122 07:07:59.826524 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c6e1501e-9b96-439d-8f12-26df5455b9d1","Type":"ContainerStarted","Data":"9e40283437488bc72bf3a04f613627febb7e0beaccc1af120267aeb74e6762e4"} Jan 22 07:08:32 crc kubenswrapper[4933]: I0122 07:08:32.098553 4933 generic.go:334] "Generic (PLEG): container finished" podID="c6e1501e-9b96-439d-8f12-26df5455b9d1" containerID="9e40283437488bc72bf3a04f613627febb7e0beaccc1af120267aeb74e6762e4" exitCode=0 Jan 22 07:08:32 crc kubenswrapper[4933]: I0122 07:08:32.099203 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c6e1501e-9b96-439d-8f12-26df5455b9d1","Type":"ContainerDied","Data":"9e40283437488bc72bf3a04f613627febb7e0beaccc1af120267aeb74e6762e4"} Jan 22 07:08:32 crc kubenswrapper[4933]: I0122 07:08:32.113673 4933 generic.go:334] "Generic (PLEG): container finished" podID="200c4924-621c-4dbc-8b31-626f46b61d15" containerID="0e56183fe60a5578c33c07f5ceb1534ef8790b21c7b105db2415d890b9fbd4fc" exitCode=0 Jan 22 07:08:32 crc kubenswrapper[4933]: I0122 07:08:32.113717 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"200c4924-621c-4dbc-8b31-626f46b61d15","Type":"ContainerDied","Data":"0e56183fe60a5578c33c07f5ceb1534ef8790b21c7b105db2415d890b9fbd4fc"} Jan 22 07:08:33 crc kubenswrapper[4933]: I0122 07:08:33.123383 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"200c4924-621c-4dbc-8b31-626f46b61d15","Type":"ContainerStarted","Data":"a604838603f49e2a60e9c71199062aff290752999aab235cca76b355d978cf70"} Jan 22 07:08:33 crc kubenswrapper[4933]: I0122 07:08:33.123926 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:08:33 crc kubenswrapper[4933]: I0122 07:08:33.126531 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"c6e1501e-9b96-439d-8f12-26df5455b9d1","Type":"ContainerStarted","Data":"02b663536eaf41c3112285c162300bebf98fc0f8fc7d80fd636a72c89b93dbed"} Jan 22 07:08:33 crc kubenswrapper[4933]: I0122 07:08:33.126774 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 22 07:08:33 crc kubenswrapper[4933]: I0122 07:08:33.149683 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.149641052 podStartE2EDuration="37.149641052s" podCreationTimestamp="2026-01-22 07:07:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:08:33.146975948 +0000 UTC m=+4960.984101341" watchObservedRunningTime="2026-01-22 07:08:33.149641052 +0000 UTC m=+4960.986766395" Jan 22 07:08:33 crc kubenswrapper[4933]: I0122 07:08:33.173262 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.173232235 podStartE2EDuration="37.173232235s" podCreationTimestamp="2026-01-22 07:07:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:08:33.170802316 +0000 UTC m=+4961.007927669" watchObservedRunningTime="2026-01-22 07:08:33.173232235 +0000 UTC m=+4961.010357588" Jan 22 07:08:47 crc kubenswrapper[4933]: I0122 07:08:47.205237 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 22 07:08:47 crc kubenswrapper[4933]: I0122 07:08:47.219313 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 22 07:08:51 crc kubenswrapper[4933]: I0122 07:08:51.584596 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Jan 22 07:08:51 crc kubenswrapper[4933]: I0122 07:08:51.587118 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 22 07:08:51 crc kubenswrapper[4933]: I0122 07:08:51.594420 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-v4vkg" Jan 22 07:08:51 crc kubenswrapper[4933]: I0122 07:08:51.594973 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 22 07:08:51 crc kubenswrapper[4933]: I0122 07:08:51.680325 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fv2zw\" (UniqueName: \"kubernetes.io/projected/98dc9044-4a59-4eca-bec1-3657ebe9aeb5-kube-api-access-fv2zw\") pod \"mariadb-client\" (UID: \"98dc9044-4a59-4eca-bec1-3657ebe9aeb5\") " pod="openstack/mariadb-client" Jan 22 07:08:51 crc kubenswrapper[4933]: I0122 07:08:51.782179 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fv2zw\" (UniqueName: \"kubernetes.io/projected/98dc9044-4a59-4eca-bec1-3657ebe9aeb5-kube-api-access-fv2zw\") pod \"mariadb-client\" (UID: \"98dc9044-4a59-4eca-bec1-3657ebe9aeb5\") " pod="openstack/mariadb-client" Jan 22 07:08:51 crc kubenswrapper[4933]: I0122 07:08:51.799009 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fv2zw\" (UniqueName: \"kubernetes.io/projected/98dc9044-4a59-4eca-bec1-3657ebe9aeb5-kube-api-access-fv2zw\") pod \"mariadb-client\" (UID: \"98dc9044-4a59-4eca-bec1-3657ebe9aeb5\") " pod="openstack/mariadb-client" Jan 22 07:08:51 crc kubenswrapper[4933]: I0122 07:08:51.917025 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 22 07:08:52 crc kubenswrapper[4933]: I0122 07:08:52.395106 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 22 07:08:53 crc kubenswrapper[4933]: I0122 07:08:53.289135 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"98dc9044-4a59-4eca-bec1-3657ebe9aeb5","Type":"ContainerStarted","Data":"640da6e0686bd95edaa263f2c551541ed441636af13b636f27c0d0f484cc92bf"} Jan 22 07:08:53 crc kubenswrapper[4933]: I0122 07:08:53.289676 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"98dc9044-4a59-4eca-bec1-3657ebe9aeb5","Type":"ContainerStarted","Data":"155db70936a8e4b33838020ffcf86554b911cd48ecf1a6259cc264254ee5ecaa"} Jan 22 07:08:53 crc kubenswrapper[4933]: I0122 07:08:53.328499 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-client" podStartSLOduration=1.84668195 podStartE2EDuration="2.328476288s" podCreationTimestamp="2026-01-22 07:08:51 +0000 UTC" firstStartedPulling="2026-01-22 07:08:52.391848968 +0000 UTC m=+4980.228974361" lastFinishedPulling="2026-01-22 07:08:52.873643346 +0000 UTC m=+4980.710768699" observedRunningTime="2026-01-22 07:08:53.304726702 +0000 UTC m=+4981.141852095" watchObservedRunningTime="2026-01-22 07:08:53.328476288 +0000 UTC m=+4981.165601631" Jan 22 07:09:01 crc kubenswrapper[4933]: E0122 07:09:01.758319 4933 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.163:36236->38.102.83.163:45627: write tcp 38.102.83.163:36236->38.102.83.163:45627: write: broken pipe Jan 22 07:09:06 crc kubenswrapper[4933]: I0122 07:09:06.203231 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Jan 22 07:09:06 crc kubenswrapper[4933]: I0122 07:09:06.203866 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mariadb-client" podUID="98dc9044-4a59-4eca-bec1-3657ebe9aeb5" containerName="mariadb-client" containerID="cri-o://640da6e0686bd95edaa263f2c551541ed441636af13b636f27c0d0f484cc92bf" gracePeriod=30 Jan 22 07:09:06 crc kubenswrapper[4933]: I0122 07:09:06.418539 4933 generic.go:334] "Generic (PLEG): container finished" podID="98dc9044-4a59-4eca-bec1-3657ebe9aeb5" containerID="640da6e0686bd95edaa263f2c551541ed441636af13b636f27c0d0f484cc92bf" exitCode=143 Jan 22 07:09:06 crc kubenswrapper[4933]: I0122 07:09:06.418579 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"98dc9044-4a59-4eca-bec1-3657ebe9aeb5","Type":"ContainerDied","Data":"640da6e0686bd95edaa263f2c551541ed441636af13b636f27c0d0f484cc92bf"} Jan 22 07:09:06 crc kubenswrapper[4933]: I0122 07:09:06.739338 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 22 07:09:06 crc kubenswrapper[4933]: I0122 07:09:06.937916 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fv2zw\" (UniqueName: \"kubernetes.io/projected/98dc9044-4a59-4eca-bec1-3657ebe9aeb5-kube-api-access-fv2zw\") pod \"98dc9044-4a59-4eca-bec1-3657ebe9aeb5\" (UID: \"98dc9044-4a59-4eca-bec1-3657ebe9aeb5\") " Jan 22 07:09:06 crc kubenswrapper[4933]: I0122 07:09:06.945751 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98dc9044-4a59-4eca-bec1-3657ebe9aeb5-kube-api-access-fv2zw" (OuterVolumeSpecName: "kube-api-access-fv2zw") pod "98dc9044-4a59-4eca-bec1-3657ebe9aeb5" (UID: "98dc9044-4a59-4eca-bec1-3657ebe9aeb5"). InnerVolumeSpecName "kube-api-access-fv2zw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:09:07 crc kubenswrapper[4933]: I0122 07:09:07.039755 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fv2zw\" (UniqueName: \"kubernetes.io/projected/98dc9044-4a59-4eca-bec1-3657ebe9aeb5-kube-api-access-fv2zw\") on node \"crc\" DevicePath \"\"" Jan 22 07:09:07 crc kubenswrapper[4933]: I0122 07:09:07.430112 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"98dc9044-4a59-4eca-bec1-3657ebe9aeb5","Type":"ContainerDied","Data":"155db70936a8e4b33838020ffcf86554b911cd48ecf1a6259cc264254ee5ecaa"} Jan 22 07:09:07 crc kubenswrapper[4933]: I0122 07:09:07.430176 4933 scope.go:117] "RemoveContainer" containerID="640da6e0686bd95edaa263f2c551541ed441636af13b636f27c0d0f484cc92bf" Jan 22 07:09:07 crc kubenswrapper[4933]: I0122 07:09:07.430184 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 22 07:09:07 crc kubenswrapper[4933]: I0122 07:09:07.464756 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Jan 22 07:09:07 crc kubenswrapper[4933]: I0122 07:09:07.470186 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Jan 22 07:09:08 crc kubenswrapper[4933]: I0122 07:09:08.500032 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98dc9044-4a59-4eca-bec1-3657ebe9aeb5" path="/var/lib/kubelet/pods/98dc9044-4a59-4eca-bec1-3657ebe9aeb5/volumes" Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.567895 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-t9847"] Jan 22 07:09:23 crc kubenswrapper[4933]: E0122 07:09:23.568678 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98dc9044-4a59-4eca-bec1-3657ebe9aeb5" containerName="mariadb-client" Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.568691 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="98dc9044-4a59-4eca-bec1-3657ebe9aeb5" containerName="mariadb-client" Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.568821 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="98dc9044-4a59-4eca-bec1-3657ebe9aeb5" containerName="mariadb-client" Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.570911 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t9847" Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.584932 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t9847"] Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.712001 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63835767-bf4f-467e-858c-6c6709407ac6-catalog-content\") pod \"community-operators-t9847\" (UID: \"63835767-bf4f-467e-858c-6c6709407ac6\") " pod="openshift-marketplace/community-operators-t9847" Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.712058 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dfxc\" (UniqueName: \"kubernetes.io/projected/63835767-bf4f-467e-858c-6c6709407ac6-kube-api-access-9dfxc\") pod \"community-operators-t9847\" (UID: \"63835767-bf4f-467e-858c-6c6709407ac6\") " pod="openshift-marketplace/community-operators-t9847" Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.712145 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63835767-bf4f-467e-858c-6c6709407ac6-utilities\") pod \"community-operators-t9847\" (UID: \"63835767-bf4f-467e-858c-6c6709407ac6\") " pod="openshift-marketplace/community-operators-t9847" Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.761343 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2blfn"] Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.763186 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2blfn" Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.774697 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2blfn"] Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.813818 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dfxc\" (UniqueName: \"kubernetes.io/projected/63835767-bf4f-467e-858c-6c6709407ac6-kube-api-access-9dfxc\") pod \"community-operators-t9847\" (UID: \"63835767-bf4f-467e-858c-6c6709407ac6\") " pod="openshift-marketplace/community-operators-t9847" Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.814107 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63835767-bf4f-467e-858c-6c6709407ac6-utilities\") pod \"community-operators-t9847\" (UID: \"63835767-bf4f-467e-858c-6c6709407ac6\") " pod="openshift-marketplace/community-operators-t9847" Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.814248 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63835767-bf4f-467e-858c-6c6709407ac6-catalog-content\") pod \"community-operators-t9847\" (UID: \"63835767-bf4f-467e-858c-6c6709407ac6\") " pod="openshift-marketplace/community-operators-t9847" Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.814678 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63835767-bf4f-467e-858c-6c6709407ac6-catalog-content\") pod \"community-operators-t9847\" (UID: \"63835767-bf4f-467e-858c-6c6709407ac6\") " pod="openshift-marketplace/community-operators-t9847" Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.814764 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63835767-bf4f-467e-858c-6c6709407ac6-utilities\") pod \"community-operators-t9847\" (UID: \"63835767-bf4f-467e-858c-6c6709407ac6\") " pod="openshift-marketplace/community-operators-t9847" Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.834761 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dfxc\" (UniqueName: \"kubernetes.io/projected/63835767-bf4f-467e-858c-6c6709407ac6-kube-api-access-9dfxc\") pod \"community-operators-t9847\" (UID: \"63835767-bf4f-467e-858c-6c6709407ac6\") " pod="openshift-marketplace/community-operators-t9847" Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.889912 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t9847" Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.915620 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzc4r\" (UniqueName: \"kubernetes.io/projected/8d06718f-3acc-4a7c-b279-9813d3997d8b-kube-api-access-gzc4r\") pod \"redhat-marketplace-2blfn\" (UID: \"8d06718f-3acc-4a7c-b279-9813d3997d8b\") " pod="openshift-marketplace/redhat-marketplace-2blfn" Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.915852 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d06718f-3acc-4a7c-b279-9813d3997d8b-catalog-content\") pod \"redhat-marketplace-2blfn\" (UID: \"8d06718f-3acc-4a7c-b279-9813d3997d8b\") " pod="openshift-marketplace/redhat-marketplace-2blfn" Jan 22 07:09:23 crc kubenswrapper[4933]: I0122 07:09:23.915980 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d06718f-3acc-4a7c-b279-9813d3997d8b-utilities\") pod \"redhat-marketplace-2blfn\" (UID: \"8d06718f-3acc-4a7c-b279-9813d3997d8b\") " pod="openshift-marketplace/redhat-marketplace-2blfn" Jan 22 07:09:24 crc kubenswrapper[4933]: I0122 07:09:24.020922 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d06718f-3acc-4a7c-b279-9813d3997d8b-catalog-content\") pod \"redhat-marketplace-2blfn\" (UID: \"8d06718f-3acc-4a7c-b279-9813d3997d8b\") " pod="openshift-marketplace/redhat-marketplace-2blfn" Jan 22 07:09:24 crc kubenswrapper[4933]: I0122 07:09:24.021226 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d06718f-3acc-4a7c-b279-9813d3997d8b-utilities\") pod \"redhat-marketplace-2blfn\" (UID: \"8d06718f-3acc-4a7c-b279-9813d3997d8b\") " pod="openshift-marketplace/redhat-marketplace-2blfn" Jan 22 07:09:24 crc kubenswrapper[4933]: I0122 07:09:24.021294 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzc4r\" (UniqueName: \"kubernetes.io/projected/8d06718f-3acc-4a7c-b279-9813d3997d8b-kube-api-access-gzc4r\") pod \"redhat-marketplace-2blfn\" (UID: \"8d06718f-3acc-4a7c-b279-9813d3997d8b\") " pod="openshift-marketplace/redhat-marketplace-2blfn" Jan 22 07:09:24 crc kubenswrapper[4933]: I0122 07:09:24.021447 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d06718f-3acc-4a7c-b279-9813d3997d8b-catalog-content\") pod \"redhat-marketplace-2blfn\" (UID: \"8d06718f-3acc-4a7c-b279-9813d3997d8b\") " pod="openshift-marketplace/redhat-marketplace-2blfn" Jan 22 07:09:24 crc kubenswrapper[4933]: I0122 07:09:24.022269 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d06718f-3acc-4a7c-b279-9813d3997d8b-utilities\") pod \"redhat-marketplace-2blfn\" (UID: \"8d06718f-3acc-4a7c-b279-9813d3997d8b\") " pod="openshift-marketplace/redhat-marketplace-2blfn" Jan 22 07:09:24 crc kubenswrapper[4933]: I0122 07:09:24.061241 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzc4r\" (UniqueName: \"kubernetes.io/projected/8d06718f-3acc-4a7c-b279-9813d3997d8b-kube-api-access-gzc4r\") pod \"redhat-marketplace-2blfn\" (UID: \"8d06718f-3acc-4a7c-b279-9813d3997d8b\") " pod="openshift-marketplace/redhat-marketplace-2blfn" Jan 22 07:09:24 crc kubenswrapper[4933]: I0122 07:09:24.080887 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2blfn" Jan 22 07:09:24 crc kubenswrapper[4933]: I0122 07:09:24.373685 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t9847"] Jan 22 07:09:24 crc kubenswrapper[4933]: I0122 07:09:24.569839 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9847" event={"ID":"63835767-bf4f-467e-858c-6c6709407ac6","Type":"ContainerStarted","Data":"519e2601237770354a92ddd33e9eeb8e592ed81c3c8a45fd6640e296a38514cd"} Jan 22 07:09:24 crc kubenswrapper[4933]: I0122 07:09:24.569905 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9847" event={"ID":"63835767-bf4f-467e-858c-6c6709407ac6","Type":"ContainerStarted","Data":"dad1cfc08201b8b42a55dfd8ce8dd66e71a91b055510ebebb397ccc9a6d3db28"} Jan 22 07:09:24 crc kubenswrapper[4933]: I0122 07:09:24.587638 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2blfn"] Jan 22 07:09:24 crc kubenswrapper[4933]: W0122 07:09:24.669194 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8d06718f_3acc_4a7c_b279_9813d3997d8b.slice/crio-c83bbd975301359eb514ac4ef5897c39c20ea892b1a21c79f3a3296f68092144 WatchSource:0}: Error finding container c83bbd975301359eb514ac4ef5897c39c20ea892b1a21c79f3a3296f68092144: Status 404 returned error can't find the container with id c83bbd975301359eb514ac4ef5897c39c20ea892b1a21c79f3a3296f68092144 Jan 22 07:09:25 crc kubenswrapper[4933]: I0122 07:09:25.578902 4933 generic.go:334] "Generic (PLEG): container finished" podID="63835767-bf4f-467e-858c-6c6709407ac6" containerID="519e2601237770354a92ddd33e9eeb8e592ed81c3c8a45fd6640e296a38514cd" exitCode=0 Jan 22 07:09:25 crc kubenswrapper[4933]: I0122 07:09:25.578965 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9847" event={"ID":"63835767-bf4f-467e-858c-6c6709407ac6","Type":"ContainerDied","Data":"519e2601237770354a92ddd33e9eeb8e592ed81c3c8a45fd6640e296a38514cd"} Jan 22 07:09:25 crc kubenswrapper[4933]: I0122 07:09:25.580951 4933 generic.go:334] "Generic (PLEG): container finished" podID="8d06718f-3acc-4a7c-b279-9813d3997d8b" containerID="513321e9d15273ce0953dee5b60418600257161389f370e4a24c3795fecf8b56" exitCode=0 Jan 22 07:09:25 crc kubenswrapper[4933]: I0122 07:09:25.581032 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2blfn" event={"ID":"8d06718f-3acc-4a7c-b279-9813d3997d8b","Type":"ContainerDied","Data":"513321e9d15273ce0953dee5b60418600257161389f370e4a24c3795fecf8b56"} Jan 22 07:09:25 crc kubenswrapper[4933]: I0122 07:09:25.581051 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2blfn" event={"ID":"8d06718f-3acc-4a7c-b279-9813d3997d8b","Type":"ContainerStarted","Data":"c83bbd975301359eb514ac4ef5897c39c20ea892b1a21c79f3a3296f68092144"} Jan 22 07:09:26 crc kubenswrapper[4933]: I0122 07:09:26.563642 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-69x8c"] Jan 22 07:09:26 crc kubenswrapper[4933]: I0122 07:09:26.567749 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-69x8c" Jan 22 07:09:26 crc kubenswrapper[4933]: I0122 07:09:26.576832 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-69x8c"] Jan 22 07:09:26 crc kubenswrapper[4933]: I0122 07:09:26.604855 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9847" event={"ID":"63835767-bf4f-467e-858c-6c6709407ac6","Type":"ContainerStarted","Data":"ab98eba2cb9192a580a39042e1c7ad09e005ecaab2f74d692a27de8885a92a5d"} Jan 22 07:09:26 crc kubenswrapper[4933]: I0122 07:09:26.662066 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99cadc0f-af5f-455a-8abe-07a207262b46-catalog-content\") pod \"redhat-operators-69x8c\" (UID: \"99cadc0f-af5f-455a-8abe-07a207262b46\") " pod="openshift-marketplace/redhat-operators-69x8c" Jan 22 07:09:26 crc kubenswrapper[4933]: I0122 07:09:26.662230 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnmvl\" (UniqueName: \"kubernetes.io/projected/99cadc0f-af5f-455a-8abe-07a207262b46-kube-api-access-hnmvl\") pod \"redhat-operators-69x8c\" (UID: \"99cadc0f-af5f-455a-8abe-07a207262b46\") " pod="openshift-marketplace/redhat-operators-69x8c" Jan 22 07:09:26 crc kubenswrapper[4933]: I0122 07:09:26.662274 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99cadc0f-af5f-455a-8abe-07a207262b46-utilities\") pod \"redhat-operators-69x8c\" (UID: \"99cadc0f-af5f-455a-8abe-07a207262b46\") " pod="openshift-marketplace/redhat-operators-69x8c" Jan 22 07:09:26 crc kubenswrapper[4933]: I0122 07:09:26.763344 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99cadc0f-af5f-455a-8abe-07a207262b46-catalog-content\") pod \"redhat-operators-69x8c\" (UID: \"99cadc0f-af5f-455a-8abe-07a207262b46\") " pod="openshift-marketplace/redhat-operators-69x8c" Jan 22 07:09:26 crc kubenswrapper[4933]: I0122 07:09:26.763452 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnmvl\" (UniqueName: \"kubernetes.io/projected/99cadc0f-af5f-455a-8abe-07a207262b46-kube-api-access-hnmvl\") pod \"redhat-operators-69x8c\" (UID: \"99cadc0f-af5f-455a-8abe-07a207262b46\") " pod="openshift-marketplace/redhat-operators-69x8c" Jan 22 07:09:26 crc kubenswrapper[4933]: I0122 07:09:26.763496 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99cadc0f-af5f-455a-8abe-07a207262b46-utilities\") pod \"redhat-operators-69x8c\" (UID: \"99cadc0f-af5f-455a-8abe-07a207262b46\") " pod="openshift-marketplace/redhat-operators-69x8c" Jan 22 07:09:26 crc kubenswrapper[4933]: I0122 07:09:26.764118 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99cadc0f-af5f-455a-8abe-07a207262b46-utilities\") pod \"redhat-operators-69x8c\" (UID: \"99cadc0f-af5f-455a-8abe-07a207262b46\") " pod="openshift-marketplace/redhat-operators-69x8c" Jan 22 07:09:26 crc kubenswrapper[4933]: I0122 07:09:26.764331 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99cadc0f-af5f-455a-8abe-07a207262b46-catalog-content\") pod \"redhat-operators-69x8c\" (UID: \"99cadc0f-af5f-455a-8abe-07a207262b46\") " pod="openshift-marketplace/redhat-operators-69x8c" Jan 22 07:09:26 crc kubenswrapper[4933]: I0122 07:09:26.783144 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnmvl\" (UniqueName: \"kubernetes.io/projected/99cadc0f-af5f-455a-8abe-07a207262b46-kube-api-access-hnmvl\") pod \"redhat-operators-69x8c\" (UID: \"99cadc0f-af5f-455a-8abe-07a207262b46\") " pod="openshift-marketplace/redhat-operators-69x8c" Jan 22 07:09:26 crc kubenswrapper[4933]: I0122 07:09:26.914136 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-69x8c" Jan 22 07:09:27 crc kubenswrapper[4933]: I0122 07:09:27.148935 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-69x8c"] Jan 22 07:09:27 crc kubenswrapper[4933]: I0122 07:09:27.616470 4933 generic.go:334] "Generic (PLEG): container finished" podID="8d06718f-3acc-4a7c-b279-9813d3997d8b" containerID="4df256d3c20409c999fcf6d8bdc1c264cf51f1e5f37c21720a192983c0a411eb" exitCode=0 Jan 22 07:09:27 crc kubenswrapper[4933]: I0122 07:09:27.616768 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2blfn" event={"ID":"8d06718f-3acc-4a7c-b279-9813d3997d8b","Type":"ContainerDied","Data":"4df256d3c20409c999fcf6d8bdc1c264cf51f1e5f37c21720a192983c0a411eb"} Jan 22 07:09:27 crc kubenswrapper[4933]: I0122 07:09:27.627187 4933 generic.go:334] "Generic (PLEG): container finished" podID="99cadc0f-af5f-455a-8abe-07a207262b46" containerID="fc41e095d04014944397f4e2e48c3c816fe2ee770bc4da4cb690ad2e541b1fe6" exitCode=0 Jan 22 07:09:27 crc kubenswrapper[4933]: I0122 07:09:27.627688 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-69x8c" event={"ID":"99cadc0f-af5f-455a-8abe-07a207262b46","Type":"ContainerDied","Data":"fc41e095d04014944397f4e2e48c3c816fe2ee770bc4da4cb690ad2e541b1fe6"} Jan 22 07:09:27 crc kubenswrapper[4933]: I0122 07:09:27.627772 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-69x8c" event={"ID":"99cadc0f-af5f-455a-8abe-07a207262b46","Type":"ContainerStarted","Data":"e569302fc53609d610fb881c24a03b32a7c369b49b9778160bdaa16209ce8366"} Jan 22 07:09:27 crc kubenswrapper[4933]: I0122 07:09:27.631975 4933 generic.go:334] "Generic (PLEG): container finished" podID="63835767-bf4f-467e-858c-6c6709407ac6" containerID="ab98eba2cb9192a580a39042e1c7ad09e005ecaab2f74d692a27de8885a92a5d" exitCode=0 Jan 22 07:09:27 crc kubenswrapper[4933]: I0122 07:09:27.632019 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9847" event={"ID":"63835767-bf4f-467e-858c-6c6709407ac6","Type":"ContainerDied","Data":"ab98eba2cb9192a580a39042e1c7ad09e005ecaab2f74d692a27de8885a92a5d"} Jan 22 07:09:28 crc kubenswrapper[4933]: I0122 07:09:28.646288 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2blfn" event={"ID":"8d06718f-3acc-4a7c-b279-9813d3997d8b","Type":"ContainerStarted","Data":"dad7778f00b9a643f5884f2b6c379179c1b73394b8d47bae4fc1744bc8f14fd1"} Jan 22 07:09:28 crc kubenswrapper[4933]: I0122 07:09:28.656613 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9847" event={"ID":"63835767-bf4f-467e-858c-6c6709407ac6","Type":"ContainerStarted","Data":"74e7d894f4d50b76e058dc85285c389af75f1ede3c4fe4c05f76f5f79c8e8739"} Jan 22 07:09:28 crc kubenswrapper[4933]: I0122 07:09:28.672250 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2blfn" podStartSLOduration=3.193178911 podStartE2EDuration="5.672224844s" podCreationTimestamp="2026-01-22 07:09:23 +0000 UTC" firstStartedPulling="2026-01-22 07:09:25.583639191 +0000 UTC m=+5013.420764544" lastFinishedPulling="2026-01-22 07:09:28.062685124 +0000 UTC m=+5015.899810477" observedRunningTime="2026-01-22 07:09:28.671000885 +0000 UTC m=+5016.508126268" watchObservedRunningTime="2026-01-22 07:09:28.672224844 +0000 UTC m=+5016.509350207" Jan 22 07:09:28 crc kubenswrapper[4933]: I0122 07:09:28.704723 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-t9847" podStartSLOduration=3.126715627 podStartE2EDuration="5.704703883s" podCreationTimestamp="2026-01-22 07:09:23 +0000 UTC" firstStartedPulling="2026-01-22 07:09:25.580615468 +0000 UTC m=+5013.417740821" lastFinishedPulling="2026-01-22 07:09:28.158603724 +0000 UTC m=+5015.995729077" observedRunningTime="2026-01-22 07:09:28.699754423 +0000 UTC m=+5016.536879776" watchObservedRunningTime="2026-01-22 07:09:28.704703883 +0000 UTC m=+5016.541829236" Jan 22 07:09:29 crc kubenswrapper[4933]: I0122 07:09:29.667837 4933 generic.go:334] "Generic (PLEG): container finished" podID="99cadc0f-af5f-455a-8abe-07a207262b46" containerID="7bebf2e131ab955f66bbefb05b478d091c2300137f728a0f905630e8f6210260" exitCode=0 Jan 22 07:09:29 crc kubenswrapper[4933]: I0122 07:09:29.667926 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-69x8c" event={"ID":"99cadc0f-af5f-455a-8abe-07a207262b46","Type":"ContainerDied","Data":"7bebf2e131ab955f66bbefb05b478d091c2300137f728a0f905630e8f6210260"} Jan 22 07:09:30 crc kubenswrapper[4933]: I0122 07:09:30.677245 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-69x8c" event={"ID":"99cadc0f-af5f-455a-8abe-07a207262b46","Type":"ContainerStarted","Data":"0f2b11656b9f0056db42ec621c3a6d2674db1d085cc160a8985909845855d3a3"} Jan 22 07:09:30 crc kubenswrapper[4933]: I0122 07:09:30.695185 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-69x8c" podStartSLOduration=2.122216801 podStartE2EDuration="4.695156562s" podCreationTimestamp="2026-01-22 07:09:26 +0000 UTC" firstStartedPulling="2026-01-22 07:09:27.631167697 +0000 UTC m=+5015.468293050" lastFinishedPulling="2026-01-22 07:09:30.204107458 +0000 UTC m=+5018.041232811" observedRunningTime="2026-01-22 07:09:30.693274466 +0000 UTC m=+5018.530399839" watchObservedRunningTime="2026-01-22 07:09:30.695156562 +0000 UTC m=+5018.532281955" Jan 22 07:09:33 crc kubenswrapper[4933]: I0122 07:09:33.890951 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-t9847" Jan 22 07:09:33 crc kubenswrapper[4933]: I0122 07:09:33.891306 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-t9847" Jan 22 07:09:33 crc kubenswrapper[4933]: I0122 07:09:33.929228 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-t9847" Jan 22 07:09:34 crc kubenswrapper[4933]: I0122 07:09:34.081581 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2blfn" Jan 22 07:09:34 crc kubenswrapper[4933]: I0122 07:09:34.081641 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2blfn" Jan 22 07:09:34 crc kubenswrapper[4933]: I0122 07:09:34.126184 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2blfn" Jan 22 07:09:34 crc kubenswrapper[4933]: I0122 07:09:34.807382 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-t9847" Jan 22 07:09:34 crc kubenswrapper[4933]: I0122 07:09:34.810753 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2blfn" Jan 22 07:09:36 crc kubenswrapper[4933]: I0122 07:09:36.915153 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-69x8c" Jan 22 07:09:36 crc kubenswrapper[4933]: I0122 07:09:36.915619 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-69x8c" Jan 22 07:09:36 crc kubenswrapper[4933]: I0122 07:09:36.989670 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-69x8c" Jan 22 07:09:37 crc kubenswrapper[4933]: I0122 07:09:37.761863 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2blfn"] Jan 22 07:09:37 crc kubenswrapper[4933]: I0122 07:09:37.762140 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2blfn" podUID="8d06718f-3acc-4a7c-b279-9813d3997d8b" containerName="registry-server" containerID="cri-o://dad7778f00b9a643f5884f2b6c379179c1b73394b8d47bae4fc1744bc8f14fd1" gracePeriod=2 Jan 22 07:09:37 crc kubenswrapper[4933]: I0122 07:09:37.815267 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-69x8c" Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.158190 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-69x8c"] Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.352415 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2blfn" Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.462610 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d06718f-3acc-4a7c-b279-9813d3997d8b-catalog-content\") pod \"8d06718f-3acc-4a7c-b279-9813d3997d8b\" (UID: \"8d06718f-3acc-4a7c-b279-9813d3997d8b\") " Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.463003 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d06718f-3acc-4a7c-b279-9813d3997d8b-utilities\") pod \"8d06718f-3acc-4a7c-b279-9813d3997d8b\" (UID: \"8d06718f-3acc-4a7c-b279-9813d3997d8b\") " Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.463131 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzc4r\" (UniqueName: \"kubernetes.io/projected/8d06718f-3acc-4a7c-b279-9813d3997d8b-kube-api-access-gzc4r\") pod \"8d06718f-3acc-4a7c-b279-9813d3997d8b\" (UID: \"8d06718f-3acc-4a7c-b279-9813d3997d8b\") " Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.463811 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d06718f-3acc-4a7c-b279-9813d3997d8b-utilities" (OuterVolumeSpecName: "utilities") pod "8d06718f-3acc-4a7c-b279-9813d3997d8b" (UID: "8d06718f-3acc-4a7c-b279-9813d3997d8b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.470273 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d06718f-3acc-4a7c-b279-9813d3997d8b-kube-api-access-gzc4r" (OuterVolumeSpecName: "kube-api-access-gzc4r") pod "8d06718f-3acc-4a7c-b279-9813d3997d8b" (UID: "8d06718f-3acc-4a7c-b279-9813d3997d8b"). InnerVolumeSpecName "kube-api-access-gzc4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.484465 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d06718f-3acc-4a7c-b279-9813d3997d8b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8d06718f-3acc-4a7c-b279-9813d3997d8b" (UID: "8d06718f-3acc-4a7c-b279-9813d3997d8b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.564468 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d06718f-3acc-4a7c-b279-9813d3997d8b-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.564495 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzc4r\" (UniqueName: \"kubernetes.io/projected/8d06718f-3acc-4a7c-b279-9813d3997d8b-kube-api-access-gzc4r\") on node \"crc\" DevicePath \"\"" Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.564506 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d06718f-3acc-4a7c-b279-9813d3997d8b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.790473 4933 generic.go:334] "Generic (PLEG): container finished" podID="8d06718f-3acc-4a7c-b279-9813d3997d8b" containerID="dad7778f00b9a643f5884f2b6c379179c1b73394b8d47bae4fc1744bc8f14fd1" exitCode=0 Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.790566 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2blfn" event={"ID":"8d06718f-3acc-4a7c-b279-9813d3997d8b","Type":"ContainerDied","Data":"dad7778f00b9a643f5884f2b6c379179c1b73394b8d47bae4fc1744bc8f14fd1"} Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.790628 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2blfn" Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.790666 4933 scope.go:117] "RemoveContainer" containerID="dad7778f00b9a643f5884f2b6c379179c1b73394b8d47bae4fc1744bc8f14fd1" Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.790646 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2blfn" event={"ID":"8d06718f-3acc-4a7c-b279-9813d3997d8b","Type":"ContainerDied","Data":"c83bbd975301359eb514ac4ef5897c39c20ea892b1a21c79f3a3296f68092144"} Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.790884 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-69x8c" podUID="99cadc0f-af5f-455a-8abe-07a207262b46" containerName="registry-server" containerID="cri-o://0f2b11656b9f0056db42ec621c3a6d2674db1d085cc160a8985909845855d3a3" gracePeriod=2 Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.838421 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2blfn"] Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.838585 4933 scope.go:117] "RemoveContainer" containerID="4df256d3c20409c999fcf6d8bdc1c264cf51f1e5f37c21720a192983c0a411eb" Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.851536 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2blfn"] Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.860408 4933 scope.go:117] "RemoveContainer" containerID="513321e9d15273ce0953dee5b60418600257161389f370e4a24c3795fecf8b56" Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.902332 4933 scope.go:117] "RemoveContainer" containerID="dad7778f00b9a643f5884f2b6c379179c1b73394b8d47bae4fc1744bc8f14fd1" Jan 22 07:09:40 crc kubenswrapper[4933]: E0122 07:09:39.902955 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dad7778f00b9a643f5884f2b6c379179c1b73394b8d47bae4fc1744bc8f14fd1\": container with ID starting with dad7778f00b9a643f5884f2b6c379179c1b73394b8d47bae4fc1744bc8f14fd1 not found: ID does not exist" containerID="dad7778f00b9a643f5884f2b6c379179c1b73394b8d47bae4fc1744bc8f14fd1" Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.903007 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dad7778f00b9a643f5884f2b6c379179c1b73394b8d47bae4fc1744bc8f14fd1"} err="failed to get container status \"dad7778f00b9a643f5884f2b6c379179c1b73394b8d47bae4fc1744bc8f14fd1\": rpc error: code = NotFound desc = could not find container \"dad7778f00b9a643f5884f2b6c379179c1b73394b8d47bae4fc1744bc8f14fd1\": container with ID starting with dad7778f00b9a643f5884f2b6c379179c1b73394b8d47bae4fc1744bc8f14fd1 not found: ID does not exist" Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.903041 4933 scope.go:117] "RemoveContainer" containerID="4df256d3c20409c999fcf6d8bdc1c264cf51f1e5f37c21720a192983c0a411eb" Jan 22 07:09:40 crc kubenswrapper[4933]: E0122 07:09:39.903660 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4df256d3c20409c999fcf6d8bdc1c264cf51f1e5f37c21720a192983c0a411eb\": container with ID starting with 4df256d3c20409c999fcf6d8bdc1c264cf51f1e5f37c21720a192983c0a411eb not found: ID does not exist" containerID="4df256d3c20409c999fcf6d8bdc1c264cf51f1e5f37c21720a192983c0a411eb" Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.903690 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4df256d3c20409c999fcf6d8bdc1c264cf51f1e5f37c21720a192983c0a411eb"} err="failed to get container status \"4df256d3c20409c999fcf6d8bdc1c264cf51f1e5f37c21720a192983c0a411eb\": rpc error: code = NotFound desc = could not find container \"4df256d3c20409c999fcf6d8bdc1c264cf51f1e5f37c21720a192983c0a411eb\": container with ID starting with 4df256d3c20409c999fcf6d8bdc1c264cf51f1e5f37c21720a192983c0a411eb not found: ID does not exist" Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.903734 4933 scope.go:117] "RemoveContainer" containerID="513321e9d15273ce0953dee5b60418600257161389f370e4a24c3795fecf8b56" Jan 22 07:09:40 crc kubenswrapper[4933]: E0122 07:09:39.904153 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"513321e9d15273ce0953dee5b60418600257161389f370e4a24c3795fecf8b56\": container with ID starting with 513321e9d15273ce0953dee5b60418600257161389f370e4a24c3795fecf8b56 not found: ID does not exist" containerID="513321e9d15273ce0953dee5b60418600257161389f370e4a24c3795fecf8b56" Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:39.904173 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"513321e9d15273ce0953dee5b60418600257161389f370e4a24c3795fecf8b56"} err="failed to get container status \"513321e9d15273ce0953dee5b60418600257161389f370e4a24c3795fecf8b56\": rpc error: code = NotFound desc = could not find container \"513321e9d15273ce0953dee5b60418600257161389f370e4a24c3795fecf8b56\": container with ID starting with 513321e9d15273ce0953dee5b60418600257161389f370e4a24c3795fecf8b56 not found: ID does not exist" Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:40.501094 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d06718f-3acc-4a7c-b279-9813d3997d8b" path="/var/lib/kubelet/pods/8d06718f-3acc-4a7c-b279-9813d3997d8b/volumes" Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:40.943648 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:09:40 crc kubenswrapper[4933]: I0122 07:09:40.944116 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:09:41 crc kubenswrapper[4933]: I0122 07:09:41.818768 4933 generic.go:334] "Generic (PLEG): container finished" podID="99cadc0f-af5f-455a-8abe-07a207262b46" containerID="0f2b11656b9f0056db42ec621c3a6d2674db1d085cc160a8985909845855d3a3" exitCode=0 Jan 22 07:09:41 crc kubenswrapper[4933]: I0122 07:09:41.818831 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-69x8c" event={"ID":"99cadc0f-af5f-455a-8abe-07a207262b46","Type":"ContainerDied","Data":"0f2b11656b9f0056db42ec621c3a6d2674db1d085cc160a8985909845855d3a3"} Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.127108 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-69x8c" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.165696 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t9847"] Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.165970 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-t9847" podUID="63835767-bf4f-467e-858c-6c6709407ac6" containerName="registry-server" containerID="cri-o://74e7d894f4d50b76e058dc85285c389af75f1ede3c4fe4c05f76f5f79c8e8739" gracePeriod=2 Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.313654 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99cadc0f-af5f-455a-8abe-07a207262b46-utilities\") pod \"99cadc0f-af5f-455a-8abe-07a207262b46\" (UID: \"99cadc0f-af5f-455a-8abe-07a207262b46\") " Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.313828 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99cadc0f-af5f-455a-8abe-07a207262b46-catalog-content\") pod \"99cadc0f-af5f-455a-8abe-07a207262b46\" (UID: \"99cadc0f-af5f-455a-8abe-07a207262b46\") " Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.313998 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnmvl\" (UniqueName: \"kubernetes.io/projected/99cadc0f-af5f-455a-8abe-07a207262b46-kube-api-access-hnmvl\") pod \"99cadc0f-af5f-455a-8abe-07a207262b46\" (UID: \"99cadc0f-af5f-455a-8abe-07a207262b46\") " Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.315373 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99cadc0f-af5f-455a-8abe-07a207262b46-utilities" (OuterVolumeSpecName: "utilities") pod "99cadc0f-af5f-455a-8abe-07a207262b46" (UID: "99cadc0f-af5f-455a-8abe-07a207262b46"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.321365 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99cadc0f-af5f-455a-8abe-07a207262b46-kube-api-access-hnmvl" (OuterVolumeSpecName: "kube-api-access-hnmvl") pod "99cadc0f-af5f-455a-8abe-07a207262b46" (UID: "99cadc0f-af5f-455a-8abe-07a207262b46"). InnerVolumeSpecName "kube-api-access-hnmvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.416416 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/99cadc0f-af5f-455a-8abe-07a207262b46-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.416450 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnmvl\" (UniqueName: \"kubernetes.io/projected/99cadc0f-af5f-455a-8abe-07a207262b46-kube-api-access-hnmvl\") on node \"crc\" DevicePath \"\"" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.465668 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99cadc0f-af5f-455a-8abe-07a207262b46-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "99cadc0f-af5f-455a-8abe-07a207262b46" (UID: "99cadc0f-af5f-455a-8abe-07a207262b46"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.517419 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/99cadc0f-af5f-455a-8abe-07a207262b46-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.555550 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t9847" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.719503 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63835767-bf4f-467e-858c-6c6709407ac6-catalog-content\") pod \"63835767-bf4f-467e-858c-6c6709407ac6\" (UID: \"63835767-bf4f-467e-858c-6c6709407ac6\") " Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.719603 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dfxc\" (UniqueName: \"kubernetes.io/projected/63835767-bf4f-467e-858c-6c6709407ac6-kube-api-access-9dfxc\") pod \"63835767-bf4f-467e-858c-6c6709407ac6\" (UID: \"63835767-bf4f-467e-858c-6c6709407ac6\") " Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.719641 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63835767-bf4f-467e-858c-6c6709407ac6-utilities\") pod \"63835767-bf4f-467e-858c-6c6709407ac6\" (UID: \"63835767-bf4f-467e-858c-6c6709407ac6\") " Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.721234 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63835767-bf4f-467e-858c-6c6709407ac6-utilities" (OuterVolumeSpecName: "utilities") pod "63835767-bf4f-467e-858c-6c6709407ac6" (UID: "63835767-bf4f-467e-858c-6c6709407ac6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.723759 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63835767-bf4f-467e-858c-6c6709407ac6-kube-api-access-9dfxc" (OuterVolumeSpecName: "kube-api-access-9dfxc") pod "63835767-bf4f-467e-858c-6c6709407ac6" (UID: "63835767-bf4f-467e-858c-6c6709407ac6"). InnerVolumeSpecName "kube-api-access-9dfxc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.803478 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63835767-bf4f-467e-858c-6c6709407ac6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "63835767-bf4f-467e-858c-6c6709407ac6" (UID: "63835767-bf4f-467e-858c-6c6709407ac6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.822006 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63835767-bf4f-467e-858c-6c6709407ac6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.822043 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dfxc\" (UniqueName: \"kubernetes.io/projected/63835767-bf4f-467e-858c-6c6709407ac6-kube-api-access-9dfxc\") on node \"crc\" DevicePath \"\"" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.822103 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63835767-bf4f-467e-858c-6c6709407ac6-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.832574 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-69x8c" event={"ID":"99cadc0f-af5f-455a-8abe-07a207262b46","Type":"ContainerDied","Data":"e569302fc53609d610fb881c24a03b32a7c369b49b9778160bdaa16209ce8366"} Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.832656 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-69x8c" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.832828 4933 scope.go:117] "RemoveContainer" containerID="0f2b11656b9f0056db42ec621c3a6d2674db1d085cc160a8985909845855d3a3" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.837042 4933 generic.go:334] "Generic (PLEG): container finished" podID="63835767-bf4f-467e-858c-6c6709407ac6" containerID="74e7d894f4d50b76e058dc85285c389af75f1ede3c4fe4c05f76f5f79c8e8739" exitCode=0 Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.837112 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9847" event={"ID":"63835767-bf4f-467e-858c-6c6709407ac6","Type":"ContainerDied","Data":"74e7d894f4d50b76e058dc85285c389af75f1ede3c4fe4c05f76f5f79c8e8739"} Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.837153 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9847" event={"ID":"63835767-bf4f-467e-858c-6c6709407ac6","Type":"ContainerDied","Data":"dad1cfc08201b8b42a55dfd8ce8dd66e71a91b055510ebebb397ccc9a6d3db28"} Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.837152 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t9847" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.870476 4933 scope.go:117] "RemoveContainer" containerID="7bebf2e131ab955f66bbefb05b478d091c2300137f728a0f905630e8f6210260" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.870617 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-69x8c"] Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.886436 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-69x8c"] Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.896135 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t9847"] Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.904687 4933 scope.go:117] "RemoveContainer" containerID="fc41e095d04014944397f4e2e48c3c816fe2ee770bc4da4cb690ad2e541b1fe6" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.908663 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-t9847"] Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.926627 4933 scope.go:117] "RemoveContainer" containerID="74e7d894f4d50b76e058dc85285c389af75f1ede3c4fe4c05f76f5f79c8e8739" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.947447 4933 scope.go:117] "RemoveContainer" containerID="ab98eba2cb9192a580a39042e1c7ad09e005ecaab2f74d692a27de8885a92a5d" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.968805 4933 scope.go:117] "RemoveContainer" containerID="519e2601237770354a92ddd33e9eeb8e592ed81c3c8a45fd6640e296a38514cd" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.988278 4933 scope.go:117] "RemoveContainer" containerID="74e7d894f4d50b76e058dc85285c389af75f1ede3c4fe4c05f76f5f79c8e8739" Jan 22 07:09:42 crc kubenswrapper[4933]: E0122 07:09:42.988956 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74e7d894f4d50b76e058dc85285c389af75f1ede3c4fe4c05f76f5f79c8e8739\": container with ID starting with 74e7d894f4d50b76e058dc85285c389af75f1ede3c4fe4c05f76f5f79c8e8739 not found: ID does not exist" containerID="74e7d894f4d50b76e058dc85285c389af75f1ede3c4fe4c05f76f5f79c8e8739" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.989017 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74e7d894f4d50b76e058dc85285c389af75f1ede3c4fe4c05f76f5f79c8e8739"} err="failed to get container status \"74e7d894f4d50b76e058dc85285c389af75f1ede3c4fe4c05f76f5f79c8e8739\": rpc error: code = NotFound desc = could not find container \"74e7d894f4d50b76e058dc85285c389af75f1ede3c4fe4c05f76f5f79c8e8739\": container with ID starting with 74e7d894f4d50b76e058dc85285c389af75f1ede3c4fe4c05f76f5f79c8e8739 not found: ID does not exist" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.989061 4933 scope.go:117] "RemoveContainer" containerID="ab98eba2cb9192a580a39042e1c7ad09e005ecaab2f74d692a27de8885a92a5d" Jan 22 07:09:42 crc kubenswrapper[4933]: E0122 07:09:42.989779 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab98eba2cb9192a580a39042e1c7ad09e005ecaab2f74d692a27de8885a92a5d\": container with ID starting with ab98eba2cb9192a580a39042e1c7ad09e005ecaab2f74d692a27de8885a92a5d not found: ID does not exist" containerID="ab98eba2cb9192a580a39042e1c7ad09e005ecaab2f74d692a27de8885a92a5d" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.989856 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab98eba2cb9192a580a39042e1c7ad09e005ecaab2f74d692a27de8885a92a5d"} err="failed to get container status \"ab98eba2cb9192a580a39042e1c7ad09e005ecaab2f74d692a27de8885a92a5d\": rpc error: code = NotFound desc = could not find container \"ab98eba2cb9192a580a39042e1c7ad09e005ecaab2f74d692a27de8885a92a5d\": container with ID starting with ab98eba2cb9192a580a39042e1c7ad09e005ecaab2f74d692a27de8885a92a5d not found: ID does not exist" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.989916 4933 scope.go:117] "RemoveContainer" containerID="519e2601237770354a92ddd33e9eeb8e592ed81c3c8a45fd6640e296a38514cd" Jan 22 07:09:42 crc kubenswrapper[4933]: E0122 07:09:42.990392 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"519e2601237770354a92ddd33e9eeb8e592ed81c3c8a45fd6640e296a38514cd\": container with ID starting with 519e2601237770354a92ddd33e9eeb8e592ed81c3c8a45fd6640e296a38514cd not found: ID does not exist" containerID="519e2601237770354a92ddd33e9eeb8e592ed81c3c8a45fd6640e296a38514cd" Jan 22 07:09:42 crc kubenswrapper[4933]: I0122 07:09:42.990425 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"519e2601237770354a92ddd33e9eeb8e592ed81c3c8a45fd6640e296a38514cd"} err="failed to get container status \"519e2601237770354a92ddd33e9eeb8e592ed81c3c8a45fd6640e296a38514cd\": rpc error: code = NotFound desc = could not find container \"519e2601237770354a92ddd33e9eeb8e592ed81c3c8a45fd6640e296a38514cd\": container with ID starting with 519e2601237770354a92ddd33e9eeb8e592ed81c3c8a45fd6640e296a38514cd not found: ID does not exist" Jan 22 07:09:44 crc kubenswrapper[4933]: I0122 07:09:44.508302 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63835767-bf4f-467e-858c-6c6709407ac6" path="/var/lib/kubelet/pods/63835767-bf4f-467e-858c-6c6709407ac6/volumes" Jan 22 07:09:44 crc kubenswrapper[4933]: I0122 07:09:44.510198 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99cadc0f-af5f-455a-8abe-07a207262b46" path="/var/lib/kubelet/pods/99cadc0f-af5f-455a-8abe-07a207262b46/volumes" Jan 22 07:10:10 crc kubenswrapper[4933]: I0122 07:10:10.942927 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:10:10 crc kubenswrapper[4933]: I0122 07:10:10.943501 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:10:40 crc kubenswrapper[4933]: I0122 07:10:40.943407 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:10:40 crc kubenswrapper[4933]: I0122 07:10:40.943987 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:10:40 crc kubenswrapper[4933]: I0122 07:10:40.944048 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 07:10:40 crc kubenswrapper[4933]: I0122 07:10:40.944783 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"95f0ca789178e77053405c316da5ad73a9ba931191b3cf740ca8ae2078616f25"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 07:10:40 crc kubenswrapper[4933]: I0122 07:10:40.944852 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://95f0ca789178e77053405c316da5ad73a9ba931191b3cf740ca8ae2078616f25" gracePeriod=600 Jan 22 07:10:41 crc kubenswrapper[4933]: I0122 07:10:41.344537 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="95f0ca789178e77053405c316da5ad73a9ba931191b3cf740ca8ae2078616f25" exitCode=0 Jan 22 07:10:41 crc kubenswrapper[4933]: I0122 07:10:41.344605 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"95f0ca789178e77053405c316da5ad73a9ba931191b3cf740ca8ae2078616f25"} Jan 22 07:10:41 crc kubenswrapper[4933]: I0122 07:10:41.344913 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92"} Jan 22 07:10:41 crc kubenswrapper[4933]: I0122 07:10:41.344941 4933 scope.go:117] "RemoveContainer" containerID="d50102539d4e07b60779f4528e926809a62f0f1c3d74b4eadbe38466d0573cb2" Jan 22 07:11:03 crc kubenswrapper[4933]: I0122 07:11:03.980060 4933 scope.go:117] "RemoveContainer" containerID="0972bb802bd4712043d9bca6bc6af6f2bfc714aed6fedc420d92641019e3dc0d" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.260106 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-copy-data"] Jan 22 07:12:45 crc kubenswrapper[4933]: E0122 07:12:45.260942 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99cadc0f-af5f-455a-8abe-07a207262b46" containerName="extract-utilities" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.260957 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="99cadc0f-af5f-455a-8abe-07a207262b46" containerName="extract-utilities" Jan 22 07:12:45 crc kubenswrapper[4933]: E0122 07:12:45.260973 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63835767-bf4f-467e-858c-6c6709407ac6" containerName="registry-server" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.260982 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="63835767-bf4f-467e-858c-6c6709407ac6" containerName="registry-server" Jan 22 07:12:45 crc kubenswrapper[4933]: E0122 07:12:45.260992 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d06718f-3acc-4a7c-b279-9813d3997d8b" containerName="extract-content" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.261000 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d06718f-3acc-4a7c-b279-9813d3997d8b" containerName="extract-content" Jan 22 07:12:45 crc kubenswrapper[4933]: E0122 07:12:45.261018 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99cadc0f-af5f-455a-8abe-07a207262b46" containerName="extract-content" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.261024 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="99cadc0f-af5f-455a-8abe-07a207262b46" containerName="extract-content" Jan 22 07:12:45 crc kubenswrapper[4933]: E0122 07:12:45.261033 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d06718f-3acc-4a7c-b279-9813d3997d8b" containerName="registry-server" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.261039 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d06718f-3acc-4a7c-b279-9813d3997d8b" containerName="registry-server" Jan 22 07:12:45 crc kubenswrapper[4933]: E0122 07:12:45.261057 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63835767-bf4f-467e-858c-6c6709407ac6" containerName="extract-utilities" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.261063 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="63835767-bf4f-467e-858c-6c6709407ac6" containerName="extract-utilities" Jan 22 07:12:45 crc kubenswrapper[4933]: E0122 07:12:45.261090 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d06718f-3acc-4a7c-b279-9813d3997d8b" containerName="extract-utilities" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.261098 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d06718f-3acc-4a7c-b279-9813d3997d8b" containerName="extract-utilities" Jan 22 07:12:45 crc kubenswrapper[4933]: E0122 07:12:45.261113 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63835767-bf4f-467e-858c-6c6709407ac6" containerName="extract-content" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.261121 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="63835767-bf4f-467e-858c-6c6709407ac6" containerName="extract-content" Jan 22 07:12:45 crc kubenswrapper[4933]: E0122 07:12:45.261135 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99cadc0f-af5f-455a-8abe-07a207262b46" containerName="registry-server" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.261141 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="99cadc0f-af5f-455a-8abe-07a207262b46" containerName="registry-server" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.261278 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d06718f-3acc-4a7c-b279-9813d3997d8b" containerName="registry-server" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.261289 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="63835767-bf4f-467e-858c-6c6709407ac6" containerName="registry-server" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.261298 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="99cadc0f-af5f-455a-8abe-07a207262b46" containerName="registry-server" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.261768 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.264183 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-v4vkg" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.279783 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.371446 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-e1980e76-c55b-4538-9de0-2dc1b8f8081b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e1980e76-c55b-4538-9de0-2dc1b8f8081b\") pod \"mariadb-copy-data\" (UID: \"17c00104-0b53-4111-a850-25ee465eb8ad\") " pod="openstack/mariadb-copy-data" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.371591 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lsr4d\" (UniqueName: \"kubernetes.io/projected/17c00104-0b53-4111-a850-25ee465eb8ad-kube-api-access-lsr4d\") pod \"mariadb-copy-data\" (UID: \"17c00104-0b53-4111-a850-25ee465eb8ad\") " pod="openstack/mariadb-copy-data" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.472528 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-e1980e76-c55b-4538-9de0-2dc1b8f8081b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e1980e76-c55b-4538-9de0-2dc1b8f8081b\") pod \"mariadb-copy-data\" (UID: \"17c00104-0b53-4111-a850-25ee465eb8ad\") " pod="openstack/mariadb-copy-data" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.472642 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lsr4d\" (UniqueName: \"kubernetes.io/projected/17c00104-0b53-4111-a850-25ee465eb8ad-kube-api-access-lsr4d\") pod \"mariadb-copy-data\" (UID: \"17c00104-0b53-4111-a850-25ee465eb8ad\") " pod="openstack/mariadb-copy-data" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.475197 4933 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.475234 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-e1980e76-c55b-4538-9de0-2dc1b8f8081b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e1980e76-c55b-4538-9de0-2dc1b8f8081b\") pod \"mariadb-copy-data\" (UID: \"17c00104-0b53-4111-a850-25ee465eb8ad\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/83959b2129d95824b1a43aabc7c6292656bb2d0dc4dd047c30f8716621fe32dc/globalmount\"" pod="openstack/mariadb-copy-data" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.499518 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lsr4d\" (UniqueName: \"kubernetes.io/projected/17c00104-0b53-4111-a850-25ee465eb8ad-kube-api-access-lsr4d\") pod \"mariadb-copy-data\" (UID: \"17c00104-0b53-4111-a850-25ee465eb8ad\") " pod="openstack/mariadb-copy-data" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.525574 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-e1980e76-c55b-4538-9de0-2dc1b8f8081b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e1980e76-c55b-4538-9de0-2dc1b8f8081b\") pod \"mariadb-copy-data\" (UID: \"17c00104-0b53-4111-a850-25ee465eb8ad\") " pod="openstack/mariadb-copy-data" Jan 22 07:12:45 crc kubenswrapper[4933]: I0122 07:12:45.587532 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Jan 22 07:12:46 crc kubenswrapper[4933]: I0122 07:12:46.071331 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Jan 22 07:12:46 crc kubenswrapper[4933]: I0122 07:12:46.317613 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"17c00104-0b53-4111-a850-25ee465eb8ad","Type":"ContainerStarted","Data":"70a46a534f0aa1f4752023a3906c243939bcc05194d3249eb3b0912a75390f15"} Jan 22 07:12:46 crc kubenswrapper[4933]: I0122 07:12:46.317932 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"17c00104-0b53-4111-a850-25ee465eb8ad","Type":"ContainerStarted","Data":"5c3f5972c4666b76d3019c31d1144efefcdee614982c03eeb5cc9bbd6754b4b6"} Jan 22 07:12:49 crc kubenswrapper[4933]: I0122 07:12:49.287706 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-copy-data" podStartSLOduration=5.287678212 podStartE2EDuration="5.287678212s" podCreationTimestamp="2026-01-22 07:12:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:12:46.350324032 +0000 UTC m=+5214.187449385" watchObservedRunningTime="2026-01-22 07:12:49.287678212 +0000 UTC m=+5217.124803595" Jan 22 07:12:49 crc kubenswrapper[4933]: I0122 07:12:49.291134 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Jan 22 07:12:49 crc kubenswrapper[4933]: I0122 07:12:49.292189 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 22 07:12:49 crc kubenswrapper[4933]: I0122 07:12:49.302667 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 22 07:12:49 crc kubenswrapper[4933]: I0122 07:12:49.439504 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2b6jn\" (UniqueName: \"kubernetes.io/projected/e4ab7d44-1f71-4b36-9212-bf830c609ef3-kube-api-access-2b6jn\") pod \"mariadb-client\" (UID: \"e4ab7d44-1f71-4b36-9212-bf830c609ef3\") " pod="openstack/mariadb-client" Jan 22 07:12:49 crc kubenswrapper[4933]: I0122 07:12:49.540760 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2b6jn\" (UniqueName: \"kubernetes.io/projected/e4ab7d44-1f71-4b36-9212-bf830c609ef3-kube-api-access-2b6jn\") pod \"mariadb-client\" (UID: \"e4ab7d44-1f71-4b36-9212-bf830c609ef3\") " pod="openstack/mariadb-client" Jan 22 07:12:49 crc kubenswrapper[4933]: I0122 07:12:49.566477 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2b6jn\" (UniqueName: \"kubernetes.io/projected/e4ab7d44-1f71-4b36-9212-bf830c609ef3-kube-api-access-2b6jn\") pod \"mariadb-client\" (UID: \"e4ab7d44-1f71-4b36-9212-bf830c609ef3\") " pod="openstack/mariadb-client" Jan 22 07:12:49 crc kubenswrapper[4933]: I0122 07:12:49.620742 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 22 07:12:50 crc kubenswrapper[4933]: I0122 07:12:50.083649 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 22 07:12:50 crc kubenswrapper[4933]: I0122 07:12:50.353117 4933 generic.go:334] "Generic (PLEG): container finished" podID="e4ab7d44-1f71-4b36-9212-bf830c609ef3" containerID="ec113587b5bfed50751f8fa7295d74522bfa44dcde3e2c5a134daf030b7d26d3" exitCode=0 Jan 22 07:12:50 crc kubenswrapper[4933]: I0122 07:12:50.353165 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"e4ab7d44-1f71-4b36-9212-bf830c609ef3","Type":"ContainerDied","Data":"ec113587b5bfed50751f8fa7295d74522bfa44dcde3e2c5a134daf030b7d26d3"} Jan 22 07:12:50 crc kubenswrapper[4933]: I0122 07:12:50.353193 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"e4ab7d44-1f71-4b36-9212-bf830c609ef3","Type":"ContainerStarted","Data":"d4dff6b527d4ff897270df889db95dd0fc076122e4d8162bf3e5b0d47d0539a4"} Jan 22 07:12:51 crc kubenswrapper[4933]: I0122 07:12:51.643809 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 22 07:12:51 crc kubenswrapper[4933]: I0122 07:12:51.668863 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_e4ab7d44-1f71-4b36-9212-bf830c609ef3/mariadb-client/0.log" Jan 22 07:12:51 crc kubenswrapper[4933]: I0122 07:12:51.700331 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Jan 22 07:12:51 crc kubenswrapper[4933]: I0122 07:12:51.705499 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Jan 22 07:12:51 crc kubenswrapper[4933]: I0122 07:12:51.776692 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2b6jn\" (UniqueName: \"kubernetes.io/projected/e4ab7d44-1f71-4b36-9212-bf830c609ef3-kube-api-access-2b6jn\") pod \"e4ab7d44-1f71-4b36-9212-bf830c609ef3\" (UID: \"e4ab7d44-1f71-4b36-9212-bf830c609ef3\") " Jan 22 07:12:51 crc kubenswrapper[4933]: I0122 07:12:51.782227 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4ab7d44-1f71-4b36-9212-bf830c609ef3-kube-api-access-2b6jn" (OuterVolumeSpecName: "kube-api-access-2b6jn") pod "e4ab7d44-1f71-4b36-9212-bf830c609ef3" (UID: "e4ab7d44-1f71-4b36-9212-bf830c609ef3"). InnerVolumeSpecName "kube-api-access-2b6jn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:12:51 crc kubenswrapper[4933]: I0122 07:12:51.850757 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Jan 22 07:12:51 crc kubenswrapper[4933]: E0122 07:12:51.851055 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4ab7d44-1f71-4b36-9212-bf830c609ef3" containerName="mariadb-client" Jan 22 07:12:51 crc kubenswrapper[4933]: I0122 07:12:51.851088 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4ab7d44-1f71-4b36-9212-bf830c609ef3" containerName="mariadb-client" Jan 22 07:12:51 crc kubenswrapper[4933]: I0122 07:12:51.851260 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4ab7d44-1f71-4b36-9212-bf830c609ef3" containerName="mariadb-client" Jan 22 07:12:51 crc kubenswrapper[4933]: I0122 07:12:51.851825 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 22 07:12:51 crc kubenswrapper[4933]: I0122 07:12:51.858444 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 22 07:12:51 crc kubenswrapper[4933]: I0122 07:12:51.878934 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2b6jn\" (UniqueName: \"kubernetes.io/projected/e4ab7d44-1f71-4b36-9212-bf830c609ef3-kube-api-access-2b6jn\") on node \"crc\" DevicePath \"\"" Jan 22 07:12:51 crc kubenswrapper[4933]: I0122 07:12:51.980921 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vrjd\" (UniqueName: \"kubernetes.io/projected/80821c23-e8a0-4d09-9476-c35a5d767705-kube-api-access-6vrjd\") pod \"mariadb-client\" (UID: \"80821c23-e8a0-4d09-9476-c35a5d767705\") " pod="openstack/mariadb-client" Jan 22 07:12:52 crc kubenswrapper[4933]: I0122 07:12:52.082783 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vrjd\" (UniqueName: \"kubernetes.io/projected/80821c23-e8a0-4d09-9476-c35a5d767705-kube-api-access-6vrjd\") pod \"mariadb-client\" (UID: \"80821c23-e8a0-4d09-9476-c35a5d767705\") " pod="openstack/mariadb-client" Jan 22 07:12:52 crc kubenswrapper[4933]: I0122 07:12:52.102918 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vrjd\" (UniqueName: \"kubernetes.io/projected/80821c23-e8a0-4d09-9476-c35a5d767705-kube-api-access-6vrjd\") pod \"mariadb-client\" (UID: \"80821c23-e8a0-4d09-9476-c35a5d767705\") " pod="openstack/mariadb-client" Jan 22 07:12:52 crc kubenswrapper[4933]: I0122 07:12:52.173692 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 22 07:12:52 crc kubenswrapper[4933]: I0122 07:12:52.385368 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d4dff6b527d4ff897270df889db95dd0fc076122e4d8162bf3e5b0d47d0539a4" Jan 22 07:12:52 crc kubenswrapper[4933]: I0122 07:12:52.385455 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 22 07:12:52 crc kubenswrapper[4933]: I0122 07:12:52.402488 4933 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/mariadb-client" oldPodUID="e4ab7d44-1f71-4b36-9212-bf830c609ef3" podUID="80821c23-e8a0-4d09-9476-c35a5d767705" Jan 22 07:12:52 crc kubenswrapper[4933]: I0122 07:12:52.413586 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Jan 22 07:12:52 crc kubenswrapper[4933]: I0122 07:12:52.512677 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4ab7d44-1f71-4b36-9212-bf830c609ef3" path="/var/lib/kubelet/pods/e4ab7d44-1f71-4b36-9212-bf830c609ef3/volumes" Jan 22 07:12:53 crc kubenswrapper[4933]: I0122 07:12:53.393295 4933 generic.go:334] "Generic (PLEG): container finished" podID="80821c23-e8a0-4d09-9476-c35a5d767705" containerID="45f34f6b3c271c0eb074fa62ad30f465b0f43458294c117d97e75e350c4dbdb3" exitCode=0 Jan 22 07:12:53 crc kubenswrapper[4933]: I0122 07:12:53.393395 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"80821c23-e8a0-4d09-9476-c35a5d767705","Type":"ContainerDied","Data":"45f34f6b3c271c0eb074fa62ad30f465b0f43458294c117d97e75e350c4dbdb3"} Jan 22 07:12:53 crc kubenswrapper[4933]: I0122 07:12:53.393515 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"80821c23-e8a0-4d09-9476-c35a5d767705","Type":"ContainerStarted","Data":"6e6d75671e4ce30df8d0138a98fb01fd6a0c3d760024dab58324965bba2a2188"} Jan 22 07:12:54 crc kubenswrapper[4933]: I0122 07:12:54.772693 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 22 07:12:54 crc kubenswrapper[4933]: I0122 07:12:54.801524 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_80821c23-e8a0-4d09-9476-c35a5d767705/mariadb-client/0.log" Jan 22 07:12:54 crc kubenswrapper[4933]: I0122 07:12:54.822545 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Jan 22 07:12:54 crc kubenswrapper[4933]: I0122 07:12:54.825623 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6vrjd\" (UniqueName: \"kubernetes.io/projected/80821c23-e8a0-4d09-9476-c35a5d767705-kube-api-access-6vrjd\") pod \"80821c23-e8a0-4d09-9476-c35a5d767705\" (UID: \"80821c23-e8a0-4d09-9476-c35a5d767705\") " Jan 22 07:12:54 crc kubenswrapper[4933]: I0122 07:12:54.828003 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Jan 22 07:12:54 crc kubenswrapper[4933]: I0122 07:12:54.847283 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80821c23-e8a0-4d09-9476-c35a5d767705-kube-api-access-6vrjd" (OuterVolumeSpecName: "kube-api-access-6vrjd") pod "80821c23-e8a0-4d09-9476-c35a5d767705" (UID: "80821c23-e8a0-4d09-9476-c35a5d767705"). InnerVolumeSpecName "kube-api-access-6vrjd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:12:54 crc kubenswrapper[4933]: I0122 07:12:54.927247 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6vrjd\" (UniqueName: \"kubernetes.io/projected/80821c23-e8a0-4d09-9476-c35a5d767705-kube-api-access-6vrjd\") on node \"crc\" DevicePath \"\"" Jan 22 07:12:55 crc kubenswrapper[4933]: I0122 07:12:55.415809 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6e6d75671e4ce30df8d0138a98fb01fd6a0c3d760024dab58324965bba2a2188" Jan 22 07:12:55 crc kubenswrapper[4933]: I0122 07:12:55.415874 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Jan 22 07:12:56 crc kubenswrapper[4933]: I0122 07:12:56.503331 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80821c23-e8a0-4d09-9476-c35a5d767705" path="/var/lib/kubelet/pods/80821c23-e8a0-4d09-9476-c35a5d767705/volumes" Jan 22 07:13:04 crc kubenswrapper[4933]: I0122 07:13:04.069844 4933 scope.go:117] "RemoveContainer" containerID="fe82a5213f67e01e95def3bb6d552cc7cd565448c238f9cdacc3041d48404c95" Jan 22 07:13:04 crc kubenswrapper[4933]: I0122 07:13:04.103006 4933 scope.go:117] "RemoveContainer" containerID="a58a7452f96be96db72f32cfed0cfe8a581186cc350e4b0fe45ff219ff3442a1" Jan 22 07:13:10 crc kubenswrapper[4933]: I0122 07:13:10.943797 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:13:10 crc kubenswrapper[4933]: I0122 07:13:10.944388 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.390140 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 22 07:13:32 crc kubenswrapper[4933]: E0122 07:13:32.390807 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80821c23-e8a0-4d09-9476-c35a5d767705" containerName="mariadb-client" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.390881 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="80821c23-e8a0-4d09-9476-c35a5d767705" containerName="mariadb-client" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.391014 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="80821c23-e8a0-4d09-9476-c35a5d767705" containerName="mariadb-client" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.391774 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.394346 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.394519 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-xkhqr" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.394866 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.398725 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.398731 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.408197 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-1"] Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.409696 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.417166 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.423083 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-2"] Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.424709 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.430469 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.441191 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.445791 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1333917e-870b-4157-8a9b-8e799f5ce1e9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.445871 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1333917e-870b-4157-8a9b-8e799f5ce1e9-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.445925 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4shj\" (UniqueName: \"kubernetes.io/projected/1333917e-870b-4157-8a9b-8e799f5ce1e9-kube-api-access-h4shj\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.445974 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1333917e-870b-4157-8a9b-8e799f5ce1e9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.446012 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7d4c7565-ca99-4604-94a3-2fc293258fe7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7d4c7565-ca99-4604-94a3-2fc293258fe7\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.446048 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1333917e-870b-4157-8a9b-8e799f5ce1e9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.446114 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1333917e-870b-4157-8a9b-8e799f5ce1e9-config\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.446153 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1333917e-870b-4157-8a9b-8e799f5ce1e9-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.548488 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7d8d8036-af27-4990-972d-42a2137818fe-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.548541 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7d4c7565-ca99-4604-94a3-2fc293258fe7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7d4c7565-ca99-4604-94a3-2fc293258fe7\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.548785 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/66c1626a-9ba7-4e87-ba85-60e7b300579c-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.548811 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1333917e-870b-4157-8a9b-8e799f5ce1e9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.548842 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/66c1626a-9ba7-4e87-ba85-60e7b300579c-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.548899 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6f8b349e-2d57-49ed-80a8-ac6b7412a866\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f8b349e-2d57-49ed-80a8-ac6b7412a866\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.548929 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1333917e-870b-4157-8a9b-8e799f5ce1e9-config\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.548963 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1333917e-870b-4157-8a9b-8e799f5ce1e9-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.548996 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-58659c86-361b-4d0a-bf24-7c13a7ef97d4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-58659c86-361b-4d0a-bf24-7c13a7ef97d4\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.549017 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d8d8036-af27-4990-972d-42a2137818fe-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.549038 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d8d8036-af27-4990-972d-42a2137818fe-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.549089 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7d8d8036-af27-4990-972d-42a2137818fe-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.549114 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1333917e-870b-4157-8a9b-8e799f5ce1e9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.549143 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d8d8036-af27-4990-972d-42a2137818fe-config\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.549166 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/66c1626a-9ba7-4e87-ba85-60e7b300579c-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.549195 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1333917e-870b-4157-8a9b-8e799f5ce1e9-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.549254 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66c1626a-9ba7-4e87-ba85-60e7b300579c-config\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.549275 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/66c1626a-9ba7-4e87-ba85-60e7b300579c-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.549314 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d8d8036-af27-4990-972d-42a2137818fe-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.549353 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4shj\" (UniqueName: \"kubernetes.io/projected/1333917e-870b-4157-8a9b-8e799f5ce1e9-kube-api-access-h4shj\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.549385 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66c1626a-9ba7-4e87-ba85-60e7b300579c-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.549429 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvq87\" (UniqueName: \"kubernetes.io/projected/7d8d8036-af27-4990-972d-42a2137818fe-kube-api-access-zvq87\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.549454 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1333917e-870b-4157-8a9b-8e799f5ce1e9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.549479 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cs6tw\" (UniqueName: \"kubernetes.io/projected/66c1626a-9ba7-4e87-ba85-60e7b300579c-kube-api-access-cs6tw\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.551163 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1333917e-870b-4157-8a9b-8e799f5ce1e9-config\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.551781 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1333917e-870b-4157-8a9b-8e799f5ce1e9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.552211 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1333917e-870b-4157-8a9b-8e799f5ce1e9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.554843 4933 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.554892 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7d4c7565-ca99-4604-94a3-2fc293258fe7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7d4c7565-ca99-4604-94a3-2fc293258fe7\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/7209c6d76ae313e1fbe91531e3e3e499892a37f3b546b345470ab9cb2c3df852/globalmount\"" pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.557381 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1333917e-870b-4157-8a9b-8e799f5ce1e9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.557530 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1333917e-870b-4157-8a9b-8e799f5ce1e9-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.563989 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1333917e-870b-4157-8a9b-8e799f5ce1e9-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.571194 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4shj\" (UniqueName: \"kubernetes.io/projected/1333917e-870b-4157-8a9b-8e799f5ce1e9-kube-api-access-h4shj\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.588011 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7d4c7565-ca99-4604-94a3-2fc293258fe7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7d4c7565-ca99-4604-94a3-2fc293258fe7\") pod \"ovsdbserver-nb-0\" (UID: \"1333917e-870b-4157-8a9b-8e799f5ce1e9\") " pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.651150 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvq87\" (UniqueName: \"kubernetes.io/projected/7d8d8036-af27-4990-972d-42a2137818fe-kube-api-access-zvq87\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.651209 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cs6tw\" (UniqueName: \"kubernetes.io/projected/66c1626a-9ba7-4e87-ba85-60e7b300579c-kube-api-access-cs6tw\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.651247 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7d8d8036-af27-4990-972d-42a2137818fe-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.651287 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/66c1626a-9ba7-4e87-ba85-60e7b300579c-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.651318 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/66c1626a-9ba7-4e87-ba85-60e7b300579c-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.651357 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6f8b349e-2d57-49ed-80a8-ac6b7412a866\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f8b349e-2d57-49ed-80a8-ac6b7412a866\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.651407 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-58659c86-361b-4d0a-bf24-7c13a7ef97d4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-58659c86-361b-4d0a-bf24-7c13a7ef97d4\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.651434 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d8d8036-af27-4990-972d-42a2137818fe-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.651458 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d8d8036-af27-4990-972d-42a2137818fe-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.651490 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7d8d8036-af27-4990-972d-42a2137818fe-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.651517 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d8d8036-af27-4990-972d-42a2137818fe-config\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.651539 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/66c1626a-9ba7-4e87-ba85-60e7b300579c-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.651572 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66c1626a-9ba7-4e87-ba85-60e7b300579c-config\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.651599 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/66c1626a-9ba7-4e87-ba85-60e7b300579c-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.651622 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d8d8036-af27-4990-972d-42a2137818fe-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.651670 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66c1626a-9ba7-4e87-ba85-60e7b300579c-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.652126 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/66c1626a-9ba7-4e87-ba85-60e7b300579c-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.652478 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7d8d8036-af27-4990-972d-42a2137818fe-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.652501 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/66c1626a-9ba7-4e87-ba85-60e7b300579c-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.652522 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7d8d8036-af27-4990-972d-42a2137818fe-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.652933 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d8d8036-af27-4990-972d-42a2137818fe-config\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.653198 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66c1626a-9ba7-4e87-ba85-60e7b300579c-config\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.655359 4933 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.655467 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-58659c86-361b-4d0a-bf24-7c13a7ef97d4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-58659c86-361b-4d0a-bf24-7c13a7ef97d4\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e541369ee8ef4e87b55f5754148e727851ccba382f24a2d8c94b8a19090db8f2/globalmount\"" pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.655548 4933 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.655591 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6f8b349e-2d57-49ed-80a8-ac6b7412a866\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f8b349e-2d57-49ed-80a8-ac6b7412a866\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9fde5230f916ab564adef71b22142fad51b1ac96efa5630e040071fca8311a15/globalmount\"" pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.655801 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/66c1626a-9ba7-4e87-ba85-60e7b300579c-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.655816 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d8d8036-af27-4990-972d-42a2137818fe-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.659708 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d8d8036-af27-4990-972d-42a2137818fe-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.661680 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d8d8036-af27-4990-972d-42a2137818fe-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.663614 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66c1626a-9ba7-4e87-ba85-60e7b300579c-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.664394 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/66c1626a-9ba7-4e87-ba85-60e7b300579c-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.667956 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cs6tw\" (UniqueName: \"kubernetes.io/projected/66c1626a-9ba7-4e87-ba85-60e7b300579c-kube-api-access-cs6tw\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.669453 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvq87\" (UniqueName: \"kubernetes.io/projected/7d8d8036-af27-4990-972d-42a2137818fe-kube-api-access-zvq87\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.680523 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6f8b349e-2d57-49ed-80a8-ac6b7412a866\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f8b349e-2d57-49ed-80a8-ac6b7412a866\") pod \"ovsdbserver-nb-2\" (UID: \"66c1626a-9ba7-4e87-ba85-60e7b300579c\") " pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.685007 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-58659c86-361b-4d0a-bf24-7c13a7ef97d4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-58659c86-361b-4d0a-bf24-7c13a7ef97d4\") pod \"ovsdbserver-nb-1\" (UID: \"7d8d8036-af27-4990-972d-42a2137818fe\") " pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.708357 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.727086 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:32 crc kubenswrapper[4933]: I0122 07:13:32.746898 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.232356 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.326981 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.504696 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.506946 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.511438 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.511627 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.511661 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.511807 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-bkwqt" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.516540 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-2"] Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.532701 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.554639 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.561633 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-1"] Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.562965 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.565835 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e17d563-a1a2-4165-b91e-af8182d086bf-config\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.565876 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.565895 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.565912 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e17d563-a1a2-4165-b91e-af8182d086bf-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.565936 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e17d563-a1a2-4165-b91e-af8182d086bf-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.565963 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.565982 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-40c7d719-276d-4cc5-a2cf-76fc1fd65fb8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-40c7d719-276d-4cc5-a2cf-76fc1fd65fb8\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.566001 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jk7kg\" (UniqueName: \"kubernetes.io/projected/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-kube-api-access-jk7kg\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.566021 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vlj9\" (UniqueName: \"kubernetes.io/projected/1e17d563-a1a2-4165-b91e-af8182d086bf-kube-api-access-5vlj9\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.566035 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e17d563-a1a2-4165-b91e-af8182d086bf-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.566052 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1e17d563-a1a2-4165-b91e-af8182d086bf-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.566068 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-config\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.566111 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f61fae67-e944-4cb4-ba60-3cbc8d332143\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f61fae67-e944-4cb4-ba60-3cbc8d332143\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.566299 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.566400 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1e17d563-a1a2-4165-b91e-af8182d086bf-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.566418 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.589100 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.594321 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667366 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4edbbe04-3891-42df-b567-72040af96322-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667418 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d820ff88-d275-4584-b8b6-54f2cd48eb21\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d820ff88-d275-4584-b8b6-54f2cd48eb21\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667447 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4edbbe04-3891-42df-b567-72040af96322-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667470 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1e17d563-a1a2-4165-b91e-af8182d086bf-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667489 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667516 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e17d563-a1a2-4165-b91e-af8182d086bf-config\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667534 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4edbbe04-3891-42df-b567-72040af96322-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667556 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667573 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667589 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e17d563-a1a2-4165-b91e-af8182d086bf-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667607 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkw4c\" (UniqueName: \"kubernetes.io/projected/4edbbe04-3891-42df-b567-72040af96322-kube-api-access-fkw4c\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667633 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e17d563-a1a2-4165-b91e-af8182d086bf-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667661 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4edbbe04-3891-42df-b567-72040af96322-config\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667687 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4edbbe04-3891-42df-b567-72040af96322-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667707 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667727 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-40c7d719-276d-4cc5-a2cf-76fc1fd65fb8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-40c7d719-276d-4cc5-a2cf-76fc1fd65fb8\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667757 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jk7kg\" (UniqueName: \"kubernetes.io/projected/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-kube-api-access-jk7kg\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667782 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vlj9\" (UniqueName: \"kubernetes.io/projected/1e17d563-a1a2-4165-b91e-af8182d086bf-kube-api-access-5vlj9\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667801 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e17d563-a1a2-4165-b91e-af8182d086bf-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667823 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1e17d563-a1a2-4165-b91e-af8182d086bf-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667845 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-config\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667871 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f61fae67-e944-4cb4-ba60-3cbc8d332143\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f61fae67-e944-4cb4-ba60-3cbc8d332143\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667898 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.667915 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4edbbe04-3891-42df-b567-72040af96322-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.668519 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1e17d563-a1a2-4165-b91e-af8182d086bf-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.668737 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1e17d563-a1a2-4165-b91e-af8182d086bf-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.668704 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.671509 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-config\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.673559 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e17d563-a1a2-4165-b91e-af8182d086bf-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.674430 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e17d563-a1a2-4165-b91e-af8182d086bf-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.675063 4933 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.675119 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f61fae67-e944-4cb4-ba60-3cbc8d332143\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f61fae67-e944-4cb4-ba60-3cbc8d332143\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/69b5098fb153119695bdfc85ed9a093a1453d952540b2df5c5507b7bff54ea9b/globalmount\"" pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.675759 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.675961 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.677398 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.677770 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1e17d563-a1a2-4165-b91e-af8182d086bf-config\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.679091 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e17d563-a1a2-4165-b91e-af8182d086bf-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.684910 4933 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.684966 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-40c7d719-276d-4cc5-a2cf-76fc1fd65fb8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-40c7d719-276d-4cc5-a2cf-76fc1fd65fb8\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/959990b2fb4a8dc651c6d0edd6c0a9322d7cac8ac0773fd13dfa470a6afd3803/globalmount\"" pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.687183 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.687203 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vlj9\" (UniqueName: \"kubernetes.io/projected/1e17d563-a1a2-4165-b91e-af8182d086bf-kube-api-access-5vlj9\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.700017 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jk7kg\" (UniqueName: \"kubernetes.io/projected/28e80b7a-c02f-42ef-ac07-ceaa85aa37ed-kube-api-access-jk7kg\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.710158 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"66c1626a-9ba7-4e87-ba85-60e7b300579c","Type":"ContainerStarted","Data":"18c0b4af1f700dcb6291b996321fab42969145177b088b296e85a8792cd9d15d"} Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.710198 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"66c1626a-9ba7-4e87-ba85-60e7b300579c","Type":"ContainerStarted","Data":"af083064d6a813184944b5b6dbcbf4f3e4d13d4de13b5462b119bb56993de310"} Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.712165 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"1333917e-870b-4157-8a9b-8e799f5ce1e9","Type":"ContainerStarted","Data":"ecf15625a0b98f96cbd288a86a253307b61da1103b8967469bbc8fca4a01335b"} Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.712209 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"1333917e-870b-4157-8a9b-8e799f5ce1e9","Type":"ContainerStarted","Data":"61487b304c0f24b0b3b6f238d7327239b232b15d0bcf617ab5293e2de04af134"} Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.728657 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-40c7d719-276d-4cc5-a2cf-76fc1fd65fb8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-40c7d719-276d-4cc5-a2cf-76fc1fd65fb8\") pod \"ovsdbserver-sb-2\" (UID: \"1e17d563-a1a2-4165-b91e-af8182d086bf\") " pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.729125 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f61fae67-e944-4cb4-ba60-3cbc8d332143\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f61fae67-e944-4cb4-ba60-3cbc8d332143\") pod \"ovsdbserver-sb-0\" (UID: \"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed\") " pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.769436 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkw4c\" (UniqueName: \"kubernetes.io/projected/4edbbe04-3891-42df-b567-72040af96322-kube-api-access-fkw4c\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.769488 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4edbbe04-3891-42df-b567-72040af96322-config\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.769576 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4edbbe04-3891-42df-b567-72040af96322-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.769634 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4edbbe04-3891-42df-b567-72040af96322-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.769657 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4edbbe04-3891-42df-b567-72040af96322-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.769686 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d820ff88-d275-4584-b8b6-54f2cd48eb21\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d820ff88-d275-4584-b8b6-54f2cd48eb21\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.769708 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4edbbe04-3891-42df-b567-72040af96322-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.769739 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4edbbe04-3891-42df-b567-72040af96322-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.770236 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4edbbe04-3891-42df-b567-72040af96322-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.771550 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4edbbe04-3891-42df-b567-72040af96322-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.772920 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4edbbe04-3891-42df-b567-72040af96322-config\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.773923 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4edbbe04-3891-42df-b567-72040af96322-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.774621 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4edbbe04-3891-42df-b567-72040af96322-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.775552 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4edbbe04-3891-42df-b567-72040af96322-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.775608 4933 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.775631 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d820ff88-d275-4584-b8b6-54f2cd48eb21\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d820ff88-d275-4584-b8b6-54f2cd48eb21\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f6711284174acdbdd5824a1f32f6c04f08d557179e7158197b0b9f92f0c50074/globalmount\"" pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.788770 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkw4c\" (UniqueName: \"kubernetes.io/projected/4edbbe04-3891-42df-b567-72040af96322-kube-api-access-fkw4c\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.836190 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d820ff88-d275-4584-b8b6-54f2cd48eb21\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d820ff88-d275-4584-b8b6-54f2cd48eb21\") pod \"ovsdbserver-sb-1\" (UID: \"4edbbe04-3891-42df-b567-72040af96322\") " pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.837244 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:33 crc kubenswrapper[4933]: I0122 07:13:33.865130 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:34 crc kubenswrapper[4933]: I0122 07:13:34.116212 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:34 crc kubenswrapper[4933]: I0122 07:13:34.198645 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Jan 22 07:13:34 crc kubenswrapper[4933]: I0122 07:13:34.360835 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 22 07:13:34 crc kubenswrapper[4933]: W0122 07:13:34.368367 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28e80b7a_c02f_42ef_ac07_ceaa85aa37ed.slice/crio-7c23470cce584d8c054ae2c301760179d55b8695127c02c2519e446fcb05f87d WatchSource:0}: Error finding container 7c23470cce584d8c054ae2c301760179d55b8695127c02c2519e446fcb05f87d: Status 404 returned error can't find the container with id 7c23470cce584d8c054ae2c301760179d55b8695127c02c2519e446fcb05f87d Jan 22 07:13:34 crc kubenswrapper[4933]: I0122 07:13:34.451139 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Jan 22 07:13:34 crc kubenswrapper[4933]: I0122 07:13:34.689992 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Jan 22 07:13:34 crc kubenswrapper[4933]: W0122 07:13:34.701046 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4edbbe04_3891_42df_b567_72040af96322.slice/crio-26b354ca7fc8b988f37426b90812426c40c90a634070a79b14e8f4659e234660 WatchSource:0}: Error finding container 26b354ca7fc8b988f37426b90812426c40c90a634070a79b14e8f4659e234660: Status 404 returned error can't find the container with id 26b354ca7fc8b988f37426b90812426c40c90a634070a79b14e8f4659e234660 Jan 22 07:13:34 crc kubenswrapper[4933]: I0122 07:13:34.722667 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"1333917e-870b-4157-8a9b-8e799f5ce1e9","Type":"ContainerStarted","Data":"b43d0ef2db362d9cd0e3a2ad79e5041c17ec3cec4efb45aa90dd0bb96c4f7ba9"} Jan 22 07:13:34 crc kubenswrapper[4933]: I0122 07:13:34.726206 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed","Type":"ContainerStarted","Data":"5555e0a680e7be4670e0e9b559d7e73b3e6320a9d32f4ecef2f6b86954c3c16e"} Jan 22 07:13:34 crc kubenswrapper[4933]: I0122 07:13:34.726241 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed","Type":"ContainerStarted","Data":"7c23470cce584d8c054ae2c301760179d55b8695127c02c2519e446fcb05f87d"} Jan 22 07:13:34 crc kubenswrapper[4933]: I0122 07:13:34.728124 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"7d8d8036-af27-4990-972d-42a2137818fe","Type":"ContainerStarted","Data":"9bed20687c33e726660bd4c6bc46e8d381913966723c734b7bc0b56494df1533"} Jan 22 07:13:34 crc kubenswrapper[4933]: I0122 07:13:34.728147 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"7d8d8036-af27-4990-972d-42a2137818fe","Type":"ContainerStarted","Data":"4f283e0f89619403849f216ea3e8d7e1912c3d548d9e7cd1e0c83e09cfbaf4d6"} Jan 22 07:13:34 crc kubenswrapper[4933]: I0122 07:13:34.728157 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"7d8d8036-af27-4990-972d-42a2137818fe","Type":"ContainerStarted","Data":"7d3e4117f6b1dd06ddfbc3f47eaada37ea5ceb1d85d1e5d2327764dea5d86a16"} Jan 22 07:13:34 crc kubenswrapper[4933]: I0122 07:13:34.731434 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"1e17d563-a1a2-4165-b91e-af8182d086bf","Type":"ContainerStarted","Data":"d32844162ad89709355de15951f6ac947b538b3545526d1d2c156153686306a0"} Jan 22 07:13:34 crc kubenswrapper[4933]: I0122 07:13:34.732185 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"4edbbe04-3891-42df-b567-72040af96322","Type":"ContainerStarted","Data":"26b354ca7fc8b988f37426b90812426c40c90a634070a79b14e8f4659e234660"} Jan 22 07:13:34 crc kubenswrapper[4933]: I0122 07:13:34.749599 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"66c1626a-9ba7-4e87-ba85-60e7b300579c","Type":"ContainerStarted","Data":"5d11bbaa158cad32fe7d88109e519615bd6754fb385edbd17b1d4437c5c5d2da"} Jan 22 07:13:34 crc kubenswrapper[4933]: I0122 07:13:34.764134 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=3.764109513 podStartE2EDuration="3.764109513s" podCreationTimestamp="2026-01-22 07:13:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:13:34.743200495 +0000 UTC m=+5262.580325858" watchObservedRunningTime="2026-01-22 07:13:34.764109513 +0000 UTC m=+5262.601234866" Jan 22 07:13:34 crc kubenswrapper[4933]: I0122 07:13:34.766105 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-1" podStartSLOduration=3.766099521 podStartE2EDuration="3.766099521s" podCreationTimestamp="2026-01-22 07:13:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:13:34.761767436 +0000 UTC m=+5262.598892809" watchObservedRunningTime="2026-01-22 07:13:34.766099521 +0000 UTC m=+5262.603224874" Jan 22 07:13:34 crc kubenswrapper[4933]: I0122 07:13:34.805422 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-2" podStartSLOduration=3.805342825 podStartE2EDuration="3.805342825s" podCreationTimestamp="2026-01-22 07:13:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:13:34.792383291 +0000 UTC m=+5262.629508654" watchObservedRunningTime="2026-01-22 07:13:34.805342825 +0000 UTC m=+5262.642468198" Jan 22 07:13:35 crc kubenswrapper[4933]: I0122 07:13:35.709062 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:35 crc kubenswrapper[4933]: I0122 07:13:35.727171 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:35 crc kubenswrapper[4933]: I0122 07:13:35.747481 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:35 crc kubenswrapper[4933]: I0122 07:13:35.759644 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"28e80b7a-c02f-42ef-ac07-ceaa85aa37ed","Type":"ContainerStarted","Data":"03d0bd9c50361d9046d31a71391cac6140def92f20d1a804951479fac1e7f1df"} Jan 22 07:13:35 crc kubenswrapper[4933]: I0122 07:13:35.762386 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"1e17d563-a1a2-4165-b91e-af8182d086bf","Type":"ContainerStarted","Data":"3f79cf85486889c87f96c5f021bc4170fc82f58f2547982503adc81700a7dea5"} Jan 22 07:13:35 crc kubenswrapper[4933]: I0122 07:13:35.762425 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"1e17d563-a1a2-4165-b91e-af8182d086bf","Type":"ContainerStarted","Data":"de50148512d37bd7325c56e7df4d01aa0b77fabad9d7374b931b0c232aad76fd"} Jan 22 07:13:35 crc kubenswrapper[4933]: I0122 07:13:35.764635 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"4edbbe04-3891-42df-b567-72040af96322","Type":"ContainerStarted","Data":"d9948dae80c3e540b5089d263b19e2c07e2fd26a1029c4201166c681d952f504"} Jan 22 07:13:35 crc kubenswrapper[4933]: I0122 07:13:35.764667 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"4edbbe04-3891-42df-b567-72040af96322","Type":"ContainerStarted","Data":"4428b38622b57afb5382bb57bf5b64f33b6703bd029f512877bf48176e004d0e"} Jan 22 07:13:35 crc kubenswrapper[4933]: I0122 07:13:35.794372 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=3.794344707 podStartE2EDuration="3.794344707s" podCreationTimestamp="2026-01-22 07:13:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:13:35.791686384 +0000 UTC m=+5263.628811757" watchObservedRunningTime="2026-01-22 07:13:35.794344707 +0000 UTC m=+5263.631470070" Jan 22 07:13:35 crc kubenswrapper[4933]: I0122 07:13:35.822677 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-2" podStartSLOduration=3.822653386 podStartE2EDuration="3.822653386s" podCreationTimestamp="2026-01-22 07:13:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:13:35.819816177 +0000 UTC m=+5263.656941610" watchObservedRunningTime="2026-01-22 07:13:35.822653386 +0000 UTC m=+5263.659778749" Jan 22 07:13:35 crc kubenswrapper[4933]: I0122 07:13:35.843706 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-1" podStartSLOduration=3.843687428 podStartE2EDuration="3.843687428s" podCreationTimestamp="2026-01-22 07:13:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:13:35.839446805 +0000 UTC m=+5263.676572178" watchObservedRunningTime="2026-01-22 07:13:35.843687428 +0000 UTC m=+5263.680812791" Jan 22 07:13:36 crc kubenswrapper[4933]: I0122 07:13:36.838224 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:36 crc kubenswrapper[4933]: I0122 07:13:36.866017 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:37 crc kubenswrapper[4933]: I0122 07:13:37.119533 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:37 crc kubenswrapper[4933]: I0122 07:13:37.179876 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:37 crc kubenswrapper[4933]: I0122 07:13:37.709415 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:37 crc kubenswrapper[4933]: I0122 07:13:37.728085 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:37 crc kubenswrapper[4933]: I0122 07:13:37.747747 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:37 crc kubenswrapper[4933]: I0122 07:13:37.780654 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:38 crc kubenswrapper[4933]: I0122 07:13:38.765825 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:38 crc kubenswrapper[4933]: I0122 07:13:38.773039 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:38 crc kubenswrapper[4933]: I0122 07:13:38.794934 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:38 crc kubenswrapper[4933]: I0122 07:13:38.842521 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:38 crc kubenswrapper[4933]: I0122 07:13:38.846624 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-2" Jan 22 07:13:38 crc kubenswrapper[4933]: I0122 07:13:38.846697 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 22 07:13:38 crc kubenswrapper[4933]: I0122 07:13:38.868149 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.039527 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6dc945dddc-hxxqm"] Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.041265 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dc945dddc-hxxqm" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.045713 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.054371 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6dc945dddc-hxxqm"] Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.153673 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-1" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.174155 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b781883-00cc-4c64-b7b5-0038c7b32e44-dns-svc\") pod \"dnsmasq-dns-6dc945dddc-hxxqm\" (UID: \"7b781883-00cc-4c64-b7b5-0038c7b32e44\") " pod="openstack/dnsmasq-dns-6dc945dddc-hxxqm" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.174407 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b781883-00cc-4c64-b7b5-0038c7b32e44-config\") pod \"dnsmasq-dns-6dc945dddc-hxxqm\" (UID: \"7b781883-00cc-4c64-b7b5-0038c7b32e44\") " pod="openstack/dnsmasq-dns-6dc945dddc-hxxqm" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.174444 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b781883-00cc-4c64-b7b5-0038c7b32e44-ovsdbserver-nb\") pod \"dnsmasq-dns-6dc945dddc-hxxqm\" (UID: \"7b781883-00cc-4c64-b7b5-0038c7b32e44\") " pod="openstack/dnsmasq-dns-6dc945dddc-hxxqm" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.174518 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66qsx\" (UniqueName: \"kubernetes.io/projected/7b781883-00cc-4c64-b7b5-0038c7b32e44-kube-api-access-66qsx\") pod \"dnsmasq-dns-6dc945dddc-hxxqm\" (UID: \"7b781883-00cc-4c64-b7b5-0038c7b32e44\") " pod="openstack/dnsmasq-dns-6dc945dddc-hxxqm" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.276007 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b781883-00cc-4c64-b7b5-0038c7b32e44-dns-svc\") pod \"dnsmasq-dns-6dc945dddc-hxxqm\" (UID: \"7b781883-00cc-4c64-b7b5-0038c7b32e44\") " pod="openstack/dnsmasq-dns-6dc945dddc-hxxqm" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.276194 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b781883-00cc-4c64-b7b5-0038c7b32e44-config\") pod \"dnsmasq-dns-6dc945dddc-hxxqm\" (UID: \"7b781883-00cc-4c64-b7b5-0038c7b32e44\") " pod="openstack/dnsmasq-dns-6dc945dddc-hxxqm" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.276226 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b781883-00cc-4c64-b7b5-0038c7b32e44-ovsdbserver-nb\") pod \"dnsmasq-dns-6dc945dddc-hxxqm\" (UID: \"7b781883-00cc-4c64-b7b5-0038c7b32e44\") " pod="openstack/dnsmasq-dns-6dc945dddc-hxxqm" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.276273 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66qsx\" (UniqueName: \"kubernetes.io/projected/7b781883-00cc-4c64-b7b5-0038c7b32e44-kube-api-access-66qsx\") pod \"dnsmasq-dns-6dc945dddc-hxxqm\" (UID: \"7b781883-00cc-4c64-b7b5-0038c7b32e44\") " pod="openstack/dnsmasq-dns-6dc945dddc-hxxqm" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.277150 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b781883-00cc-4c64-b7b5-0038c7b32e44-ovsdbserver-nb\") pod \"dnsmasq-dns-6dc945dddc-hxxqm\" (UID: \"7b781883-00cc-4c64-b7b5-0038c7b32e44\") " pod="openstack/dnsmasq-dns-6dc945dddc-hxxqm" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.277155 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b781883-00cc-4c64-b7b5-0038c7b32e44-config\") pod \"dnsmasq-dns-6dc945dddc-hxxqm\" (UID: \"7b781883-00cc-4c64-b7b5-0038c7b32e44\") " pod="openstack/dnsmasq-dns-6dc945dddc-hxxqm" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.277248 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b781883-00cc-4c64-b7b5-0038c7b32e44-dns-svc\") pod \"dnsmasq-dns-6dc945dddc-hxxqm\" (UID: \"7b781883-00cc-4c64-b7b5-0038c7b32e44\") " pod="openstack/dnsmasq-dns-6dc945dddc-hxxqm" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.295342 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66qsx\" (UniqueName: \"kubernetes.io/projected/7b781883-00cc-4c64-b7b5-0038c7b32e44-kube-api-access-66qsx\") pod \"dnsmasq-dns-6dc945dddc-hxxqm\" (UID: \"7b781883-00cc-4c64-b7b5-0038c7b32e44\") " pod="openstack/dnsmasq-dns-6dc945dddc-hxxqm" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.363931 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dc945dddc-hxxqm" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.434671 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6dc945dddc-hxxqm"] Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.464311 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-659c7d5767-x2dcs"] Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.465900 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.468291 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.471589 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-659c7d5767-x2dcs"] Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.582852 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-ovsdbserver-nb\") pod \"dnsmasq-dns-659c7d5767-x2dcs\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.583281 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-dns-svc\") pod \"dnsmasq-dns-659c7d5767-x2dcs\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.583307 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sp4nb\" (UniqueName: \"kubernetes.io/projected/3702545a-242f-4b60-85b4-3eb809888177-kube-api-access-sp4nb\") pod \"dnsmasq-dns-659c7d5767-x2dcs\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.583373 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-ovsdbserver-sb\") pod \"dnsmasq-dns-659c7d5767-x2dcs\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.583434 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-config\") pod \"dnsmasq-dns-659c7d5767-x2dcs\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.684949 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-ovsdbserver-nb\") pod \"dnsmasq-dns-659c7d5767-x2dcs\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.684996 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-dns-svc\") pod \"dnsmasq-dns-659c7d5767-x2dcs\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.685016 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sp4nb\" (UniqueName: \"kubernetes.io/projected/3702545a-242f-4b60-85b4-3eb809888177-kube-api-access-sp4nb\") pod \"dnsmasq-dns-659c7d5767-x2dcs\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.685042 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-ovsdbserver-sb\") pod \"dnsmasq-dns-659c7d5767-x2dcs\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.685070 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-config\") pod \"dnsmasq-dns-659c7d5767-x2dcs\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.685994 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-config\") pod \"dnsmasq-dns-659c7d5767-x2dcs\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.686121 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-ovsdbserver-nb\") pod \"dnsmasq-dns-659c7d5767-x2dcs\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.687038 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-ovsdbserver-sb\") pod \"dnsmasq-dns-659c7d5767-x2dcs\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.688012 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-dns-svc\") pod \"dnsmasq-dns-659c7d5767-x2dcs\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.704184 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sp4nb\" (UniqueName: \"kubernetes.io/projected/3702545a-242f-4b60-85b4-3eb809888177-kube-api-access-sp4nb\") pod \"dnsmasq-dns-659c7d5767-x2dcs\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.802728 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.890660 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6dc945dddc-hxxqm"] Jan 22 07:13:39 crc kubenswrapper[4933]: I0122 07:13:39.933011 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:40 crc kubenswrapper[4933]: I0122 07:13:40.105779 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 22 07:13:40 crc kubenswrapper[4933]: I0122 07:13:40.105893 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:40 crc kubenswrapper[4933]: I0122 07:13:40.155608 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-2" Jan 22 07:13:40 crc kubenswrapper[4933]: I0122 07:13:40.205046 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-659c7d5767-x2dcs"] Jan 22 07:13:40 crc kubenswrapper[4933]: I0122 07:13:40.812317 4933 generic.go:334] "Generic (PLEG): container finished" podID="7b781883-00cc-4c64-b7b5-0038c7b32e44" containerID="7ac7e699b121f6453d35710efca311d9ea086a157642bb35c81b875bfa92c982" exitCode=0 Jan 22 07:13:40 crc kubenswrapper[4933]: I0122 07:13:40.812428 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dc945dddc-hxxqm" event={"ID":"7b781883-00cc-4c64-b7b5-0038c7b32e44","Type":"ContainerDied","Data":"7ac7e699b121f6453d35710efca311d9ea086a157642bb35c81b875bfa92c982"} Jan 22 07:13:40 crc kubenswrapper[4933]: I0122 07:13:40.812656 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dc945dddc-hxxqm" event={"ID":"7b781883-00cc-4c64-b7b5-0038c7b32e44","Type":"ContainerStarted","Data":"62b6bcbd6c927dff703b0ac6794e059c7dfdb9074a65909b9ee84ef5415e339d"} Jan 22 07:13:40 crc kubenswrapper[4933]: I0122 07:13:40.816024 4933 generic.go:334] "Generic (PLEG): container finished" podID="3702545a-242f-4b60-85b4-3eb809888177" containerID="b94be76d8de0b18c19ef9fa338e14cf38d83dffc65bb6688c37d94ea72e6aee7" exitCode=0 Jan 22 07:13:40 crc kubenswrapper[4933]: I0122 07:13:40.817220 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" event={"ID":"3702545a-242f-4b60-85b4-3eb809888177","Type":"ContainerDied","Data":"b94be76d8de0b18c19ef9fa338e14cf38d83dffc65bb6688c37d94ea72e6aee7"} Jan 22 07:13:40 crc kubenswrapper[4933]: I0122 07:13:40.817271 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" event={"ID":"3702545a-242f-4b60-85b4-3eb809888177","Type":"ContainerStarted","Data":"36ab808b3311d65e26e8356b5d31758ec509aec909d2dcb19575759535136cc9"} Jan 22 07:13:40 crc kubenswrapper[4933]: I0122 07:13:40.943166 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:13:40 crc kubenswrapper[4933]: I0122 07:13:40.943450 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.052387 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dc945dddc-hxxqm" Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.117071 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b781883-00cc-4c64-b7b5-0038c7b32e44-dns-svc\") pod \"7b781883-00cc-4c64-b7b5-0038c7b32e44\" (UID: \"7b781883-00cc-4c64-b7b5-0038c7b32e44\") " Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.117271 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b781883-00cc-4c64-b7b5-0038c7b32e44-ovsdbserver-nb\") pod \"7b781883-00cc-4c64-b7b5-0038c7b32e44\" (UID: \"7b781883-00cc-4c64-b7b5-0038c7b32e44\") " Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.117369 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66qsx\" (UniqueName: \"kubernetes.io/projected/7b781883-00cc-4c64-b7b5-0038c7b32e44-kube-api-access-66qsx\") pod \"7b781883-00cc-4c64-b7b5-0038c7b32e44\" (UID: \"7b781883-00cc-4c64-b7b5-0038c7b32e44\") " Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.117478 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b781883-00cc-4c64-b7b5-0038c7b32e44-config\") pod \"7b781883-00cc-4c64-b7b5-0038c7b32e44\" (UID: \"7b781883-00cc-4c64-b7b5-0038c7b32e44\") " Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.126851 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b781883-00cc-4c64-b7b5-0038c7b32e44-kube-api-access-66qsx" (OuterVolumeSpecName: "kube-api-access-66qsx") pod "7b781883-00cc-4c64-b7b5-0038c7b32e44" (UID: "7b781883-00cc-4c64-b7b5-0038c7b32e44"). InnerVolumeSpecName "kube-api-access-66qsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.135610 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b781883-00cc-4c64-b7b5-0038c7b32e44-config" (OuterVolumeSpecName: "config") pod "7b781883-00cc-4c64-b7b5-0038c7b32e44" (UID: "7b781883-00cc-4c64-b7b5-0038c7b32e44"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.136627 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b781883-00cc-4c64-b7b5-0038c7b32e44-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7b781883-00cc-4c64-b7b5-0038c7b32e44" (UID: "7b781883-00cc-4c64-b7b5-0038c7b32e44"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.139862 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b781883-00cc-4c64-b7b5-0038c7b32e44-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7b781883-00cc-4c64-b7b5-0038c7b32e44" (UID: "7b781883-00cc-4c64-b7b5-0038c7b32e44"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.219471 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b781883-00cc-4c64-b7b5-0038c7b32e44-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.219521 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66qsx\" (UniqueName: \"kubernetes.io/projected/7b781883-00cc-4c64-b7b5-0038c7b32e44-kube-api-access-66qsx\") on node \"crc\" DevicePath \"\"" Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.219534 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b781883-00cc-4c64-b7b5-0038c7b32e44-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.219547 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b781883-00cc-4c64-b7b5-0038c7b32e44-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.825793 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dc945dddc-hxxqm" event={"ID":"7b781883-00cc-4c64-b7b5-0038c7b32e44","Type":"ContainerDied","Data":"62b6bcbd6c927dff703b0ac6794e059c7dfdb9074a65909b9ee84ef5415e339d"} Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.826139 4933 scope.go:117] "RemoveContainer" containerID="7ac7e699b121f6453d35710efca311d9ea086a157642bb35c81b875bfa92c982" Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.825877 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dc945dddc-hxxqm" Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.848980 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" event={"ID":"3702545a-242f-4b60-85b4-3eb809888177","Type":"ContainerStarted","Data":"6fa062a7da32beb1763d9846036314f83c2d235a2ddca68123708f974b92891b"} Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.850144 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.870873 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" podStartSLOduration=2.870852406 podStartE2EDuration="2.870852406s" podCreationTimestamp="2026-01-22 07:13:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:13:41.87017163 +0000 UTC m=+5269.707296983" watchObservedRunningTime="2026-01-22 07:13:41.870852406 +0000 UTC m=+5269.707977759" Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.914156 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6dc945dddc-hxxqm"] Jan 22 07:13:41 crc kubenswrapper[4933]: I0122 07:13:41.924750 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6dc945dddc-hxxqm"] Jan 22 07:13:42 crc kubenswrapper[4933]: I0122 07:13:42.501326 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b781883-00cc-4c64-b7b5-0038c7b32e44" path="/var/lib/kubelet/pods/7b781883-00cc-4c64-b7b5-0038c7b32e44/volumes" Jan 22 07:13:42 crc kubenswrapper[4933]: I0122 07:13:42.778597 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-1" Jan 22 07:13:45 crc kubenswrapper[4933]: I0122 07:13:45.749403 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-copy-data"] Jan 22 07:13:45 crc kubenswrapper[4933]: E0122 07:13:45.750122 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b781883-00cc-4c64-b7b5-0038c7b32e44" containerName="init" Jan 22 07:13:45 crc kubenswrapper[4933]: I0122 07:13:45.750139 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b781883-00cc-4c64-b7b5-0038c7b32e44" containerName="init" Jan 22 07:13:45 crc kubenswrapper[4933]: I0122 07:13:45.750366 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b781883-00cc-4c64-b7b5-0038c7b32e44" containerName="init" Jan 22 07:13:45 crc kubenswrapper[4933]: I0122 07:13:45.752816 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Jan 22 07:13:45 crc kubenswrapper[4933]: I0122 07:13:45.756937 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovn-data-cert" Jan 22 07:13:45 crc kubenswrapper[4933]: I0122 07:13:45.757646 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Jan 22 07:13:45 crc kubenswrapper[4933]: I0122 07:13:45.910695 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a71a8ad7-512c-40c2-be34-86107845b772\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a71a8ad7-512c-40c2-be34-86107845b772\") pod \"ovn-copy-data\" (UID: \"6fabaea7-ea14-4e59-ba7e-c60429929e8c\") " pod="openstack/ovn-copy-data" Jan 22 07:13:45 crc kubenswrapper[4933]: I0122 07:13:45.910883 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/6fabaea7-ea14-4e59-ba7e-c60429929e8c-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"6fabaea7-ea14-4e59-ba7e-c60429929e8c\") " pod="openstack/ovn-copy-data" Jan 22 07:13:45 crc kubenswrapper[4933]: I0122 07:13:45.910998 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tcrsv\" (UniqueName: \"kubernetes.io/projected/6fabaea7-ea14-4e59-ba7e-c60429929e8c-kube-api-access-tcrsv\") pod \"ovn-copy-data\" (UID: \"6fabaea7-ea14-4e59-ba7e-c60429929e8c\") " pod="openstack/ovn-copy-data" Jan 22 07:13:46 crc kubenswrapper[4933]: I0122 07:13:46.012655 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/6fabaea7-ea14-4e59-ba7e-c60429929e8c-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"6fabaea7-ea14-4e59-ba7e-c60429929e8c\") " pod="openstack/ovn-copy-data" Jan 22 07:13:46 crc kubenswrapper[4933]: I0122 07:13:46.012748 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tcrsv\" (UniqueName: \"kubernetes.io/projected/6fabaea7-ea14-4e59-ba7e-c60429929e8c-kube-api-access-tcrsv\") pod \"ovn-copy-data\" (UID: \"6fabaea7-ea14-4e59-ba7e-c60429929e8c\") " pod="openstack/ovn-copy-data" Jan 22 07:13:46 crc kubenswrapper[4933]: I0122 07:13:46.012771 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a71a8ad7-512c-40c2-be34-86107845b772\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a71a8ad7-512c-40c2-be34-86107845b772\") pod \"ovn-copy-data\" (UID: \"6fabaea7-ea14-4e59-ba7e-c60429929e8c\") " pod="openstack/ovn-copy-data" Jan 22 07:13:46 crc kubenswrapper[4933]: I0122 07:13:46.015289 4933 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 22 07:13:46 crc kubenswrapper[4933]: I0122 07:13:46.015320 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a71a8ad7-512c-40c2-be34-86107845b772\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a71a8ad7-512c-40c2-be34-86107845b772\") pod \"ovn-copy-data\" (UID: \"6fabaea7-ea14-4e59-ba7e-c60429929e8c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/192f26556661c8fa528067c39b11d3e12fefb603f6baa96ecda5670100a1839e/globalmount\"" pod="openstack/ovn-copy-data" Jan 22 07:13:46 crc kubenswrapper[4933]: I0122 07:13:46.020804 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/6fabaea7-ea14-4e59-ba7e-c60429929e8c-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"6fabaea7-ea14-4e59-ba7e-c60429929e8c\") " pod="openstack/ovn-copy-data" Jan 22 07:13:46 crc kubenswrapper[4933]: I0122 07:13:46.036740 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tcrsv\" (UniqueName: \"kubernetes.io/projected/6fabaea7-ea14-4e59-ba7e-c60429929e8c-kube-api-access-tcrsv\") pod \"ovn-copy-data\" (UID: \"6fabaea7-ea14-4e59-ba7e-c60429929e8c\") " pod="openstack/ovn-copy-data" Jan 22 07:13:46 crc kubenswrapper[4933]: I0122 07:13:46.049401 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a71a8ad7-512c-40c2-be34-86107845b772\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a71a8ad7-512c-40c2-be34-86107845b772\") pod \"ovn-copy-data\" (UID: \"6fabaea7-ea14-4e59-ba7e-c60429929e8c\") " pod="openstack/ovn-copy-data" Jan 22 07:13:46 crc kubenswrapper[4933]: I0122 07:13:46.071858 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Jan 22 07:13:46 crc kubenswrapper[4933]: I0122 07:13:46.604169 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Jan 22 07:13:46 crc kubenswrapper[4933]: I0122 07:13:46.609137 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 07:13:46 crc kubenswrapper[4933]: I0122 07:13:46.910068 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"6fabaea7-ea14-4e59-ba7e-c60429929e8c","Type":"ContainerStarted","Data":"b8076e9dbaeed2a0237ec4bf10060636ed2697fa140d774b2c7105d663135424"} Jan 22 07:13:47 crc kubenswrapper[4933]: I0122 07:13:47.919907 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"6fabaea7-ea14-4e59-ba7e-c60429929e8c","Type":"ContainerStarted","Data":"e1d85e5ae3ba0ccbbcbb7351b37617fff9f4a1ac70b6595b934f4608fc36682e"} Jan 22 07:13:47 crc kubenswrapper[4933]: I0122 07:13:47.938895 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-copy-data" podStartSLOduration=3.079807065 podStartE2EDuration="3.938876079s" podCreationTimestamp="2026-01-22 07:13:44 +0000 UTC" firstStartedPulling="2026-01-22 07:13:46.608853986 +0000 UTC m=+5274.445979339" lastFinishedPulling="2026-01-22 07:13:47.467923 +0000 UTC m=+5275.305048353" observedRunningTime="2026-01-22 07:13:47.93230292 +0000 UTC m=+5275.769428273" watchObservedRunningTime="2026-01-22 07:13:47.938876079 +0000 UTC m=+5275.776001432" Jan 22 07:13:49 crc kubenswrapper[4933]: I0122 07:13:49.805294 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:13:49 crc kubenswrapper[4933]: I0122 07:13:49.863251 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-sf4ct"] Jan 22 07:13:49 crc kubenswrapper[4933]: I0122 07:13:49.863964 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-699964fbc-sf4ct" podUID="31f1a4e0-2ca2-41ee-8c42-48a7144e1e58" containerName="dnsmasq-dns" containerID="cri-o://1c718a4131b53ab8a1b84949a51ecaa237e7f4c4fba0d82248229b7b4a686274" gracePeriod=10 Jan 22 07:13:50 crc kubenswrapper[4933]: I0122 07:13:50.367636 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699964fbc-sf4ct" Jan 22 07:13:50 crc kubenswrapper[4933]: I0122 07:13:50.502433 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31f1a4e0-2ca2-41ee-8c42-48a7144e1e58-dns-svc\") pod \"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58\" (UID: \"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58\") " Jan 22 07:13:50 crc kubenswrapper[4933]: I0122 07:13:50.502783 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2kfs\" (UniqueName: \"kubernetes.io/projected/31f1a4e0-2ca2-41ee-8c42-48a7144e1e58-kube-api-access-n2kfs\") pod \"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58\" (UID: \"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58\") " Jan 22 07:13:50 crc kubenswrapper[4933]: I0122 07:13:50.502859 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31f1a4e0-2ca2-41ee-8c42-48a7144e1e58-config\") pod \"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58\" (UID: \"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58\") " Jan 22 07:13:50 crc kubenswrapper[4933]: I0122 07:13:50.509036 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31f1a4e0-2ca2-41ee-8c42-48a7144e1e58-kube-api-access-n2kfs" (OuterVolumeSpecName: "kube-api-access-n2kfs") pod "31f1a4e0-2ca2-41ee-8c42-48a7144e1e58" (UID: "31f1a4e0-2ca2-41ee-8c42-48a7144e1e58"). InnerVolumeSpecName "kube-api-access-n2kfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:13:50 crc kubenswrapper[4933]: I0122 07:13:50.550398 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31f1a4e0-2ca2-41ee-8c42-48a7144e1e58-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "31f1a4e0-2ca2-41ee-8c42-48a7144e1e58" (UID: "31f1a4e0-2ca2-41ee-8c42-48a7144e1e58"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:13:50 crc kubenswrapper[4933]: I0122 07:13:50.578295 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31f1a4e0-2ca2-41ee-8c42-48a7144e1e58-config" (OuterVolumeSpecName: "config") pod "31f1a4e0-2ca2-41ee-8c42-48a7144e1e58" (UID: "31f1a4e0-2ca2-41ee-8c42-48a7144e1e58"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:13:50 crc kubenswrapper[4933]: I0122 07:13:50.604480 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31f1a4e0-2ca2-41ee-8c42-48a7144e1e58-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:13:50 crc kubenswrapper[4933]: I0122 07:13:50.604684 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31f1a4e0-2ca2-41ee-8c42-48a7144e1e58-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 07:13:50 crc kubenswrapper[4933]: I0122 07:13:50.604742 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2kfs\" (UniqueName: \"kubernetes.io/projected/31f1a4e0-2ca2-41ee-8c42-48a7144e1e58-kube-api-access-n2kfs\") on node \"crc\" DevicePath \"\"" Jan 22 07:13:50 crc kubenswrapper[4933]: I0122 07:13:50.941277 4933 generic.go:334] "Generic (PLEG): container finished" podID="31f1a4e0-2ca2-41ee-8c42-48a7144e1e58" containerID="1c718a4131b53ab8a1b84949a51ecaa237e7f4c4fba0d82248229b7b4a686274" exitCode=0 Jan 22 07:13:50 crc kubenswrapper[4933]: I0122 07:13:50.941326 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-sf4ct" event={"ID":"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58","Type":"ContainerDied","Data":"1c718a4131b53ab8a1b84949a51ecaa237e7f4c4fba0d82248229b7b4a686274"} Jan 22 07:13:50 crc kubenswrapper[4933]: I0122 07:13:50.941346 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-699964fbc-sf4ct" Jan 22 07:13:50 crc kubenswrapper[4933]: I0122 07:13:50.941358 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-699964fbc-sf4ct" event={"ID":"31f1a4e0-2ca2-41ee-8c42-48a7144e1e58","Type":"ContainerDied","Data":"0c028689b1970118dc2cd2530070d8f8d79d501216231c20d5f1d8424f7fbbda"} Jan 22 07:13:50 crc kubenswrapper[4933]: I0122 07:13:50.941384 4933 scope.go:117] "RemoveContainer" containerID="1c718a4131b53ab8a1b84949a51ecaa237e7f4c4fba0d82248229b7b4a686274" Jan 22 07:13:50 crc kubenswrapper[4933]: I0122 07:13:50.980628 4933 scope.go:117] "RemoveContainer" containerID="6b8a00340b9a4cd0e1a3542db801f746b4ae971d4e66511d861615eaff3b4ea5" Jan 22 07:13:50 crc kubenswrapper[4933]: I0122 07:13:50.984682 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-sf4ct"] Jan 22 07:13:50 crc kubenswrapper[4933]: I0122 07:13:50.991526 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-699964fbc-sf4ct"] Jan 22 07:13:51 crc kubenswrapper[4933]: I0122 07:13:51.027540 4933 scope.go:117] "RemoveContainer" containerID="1c718a4131b53ab8a1b84949a51ecaa237e7f4c4fba0d82248229b7b4a686274" Jan 22 07:13:51 crc kubenswrapper[4933]: E0122 07:13:51.027894 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c718a4131b53ab8a1b84949a51ecaa237e7f4c4fba0d82248229b7b4a686274\": container with ID starting with 1c718a4131b53ab8a1b84949a51ecaa237e7f4c4fba0d82248229b7b4a686274 not found: ID does not exist" containerID="1c718a4131b53ab8a1b84949a51ecaa237e7f4c4fba0d82248229b7b4a686274" Jan 22 07:13:51 crc kubenswrapper[4933]: I0122 07:13:51.027931 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c718a4131b53ab8a1b84949a51ecaa237e7f4c4fba0d82248229b7b4a686274"} err="failed to get container status \"1c718a4131b53ab8a1b84949a51ecaa237e7f4c4fba0d82248229b7b4a686274\": rpc error: code = NotFound desc = could not find container \"1c718a4131b53ab8a1b84949a51ecaa237e7f4c4fba0d82248229b7b4a686274\": container with ID starting with 1c718a4131b53ab8a1b84949a51ecaa237e7f4c4fba0d82248229b7b4a686274 not found: ID does not exist" Jan 22 07:13:51 crc kubenswrapper[4933]: I0122 07:13:51.027953 4933 scope.go:117] "RemoveContainer" containerID="6b8a00340b9a4cd0e1a3542db801f746b4ae971d4e66511d861615eaff3b4ea5" Jan 22 07:13:51 crc kubenswrapper[4933]: E0122 07:13:51.028554 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b8a00340b9a4cd0e1a3542db801f746b4ae971d4e66511d861615eaff3b4ea5\": container with ID starting with 6b8a00340b9a4cd0e1a3542db801f746b4ae971d4e66511d861615eaff3b4ea5 not found: ID does not exist" containerID="6b8a00340b9a4cd0e1a3542db801f746b4ae971d4e66511d861615eaff3b4ea5" Jan 22 07:13:51 crc kubenswrapper[4933]: I0122 07:13:51.028599 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b8a00340b9a4cd0e1a3542db801f746b4ae971d4e66511d861615eaff3b4ea5"} err="failed to get container status \"6b8a00340b9a4cd0e1a3542db801f746b4ae971d4e66511d861615eaff3b4ea5\": rpc error: code = NotFound desc = could not find container \"6b8a00340b9a4cd0e1a3542db801f746b4ae971d4e66511d861615eaff3b4ea5\": container with ID starting with 6b8a00340b9a4cd0e1a3542db801f746b4ae971d4e66511d861615eaff3b4ea5 not found: ID does not exist" Jan 22 07:13:52 crc kubenswrapper[4933]: I0122 07:13:52.503566 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31f1a4e0-2ca2-41ee-8c42-48a7144e1e58" path="/var/lib/kubelet/pods/31f1a4e0-2ca2-41ee-8c42-48a7144e1e58/volumes" Jan 22 07:13:52 crc kubenswrapper[4933]: I0122 07:13:52.918976 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 22 07:13:52 crc kubenswrapper[4933]: E0122 07:13:52.919324 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31f1a4e0-2ca2-41ee-8c42-48a7144e1e58" containerName="dnsmasq-dns" Jan 22 07:13:52 crc kubenswrapper[4933]: I0122 07:13:52.919345 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="31f1a4e0-2ca2-41ee-8c42-48a7144e1e58" containerName="dnsmasq-dns" Jan 22 07:13:52 crc kubenswrapper[4933]: E0122 07:13:52.919369 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31f1a4e0-2ca2-41ee-8c42-48a7144e1e58" containerName="init" Jan 22 07:13:52 crc kubenswrapper[4933]: I0122 07:13:52.919375 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="31f1a4e0-2ca2-41ee-8c42-48a7144e1e58" containerName="init" Jan 22 07:13:52 crc kubenswrapper[4933]: I0122 07:13:52.919514 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="31f1a4e0-2ca2-41ee-8c42-48a7144e1e58" containerName="dnsmasq-dns" Jan 22 07:13:52 crc kubenswrapper[4933]: I0122 07:13:52.920495 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 22 07:13:52 crc kubenswrapper[4933]: I0122 07:13:52.927565 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 22 07:13:52 crc kubenswrapper[4933]: I0122 07:13:52.927769 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 22 07:13:52 crc kubenswrapper[4933]: I0122 07:13:52.927922 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-v4nml" Jan 22 07:13:52 crc kubenswrapper[4933]: I0122 07:13:52.927580 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 22 07:13:52 crc kubenswrapper[4933]: I0122 07:13:52.953543 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.049128 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/989ef748-4e1c-41fe-b299-90edf5c5b618-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.049175 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/989ef748-4e1c-41fe-b299-90edf5c5b618-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.049241 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/989ef748-4e1c-41fe-b299-90edf5c5b618-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.049271 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/989ef748-4e1c-41fe-b299-90edf5c5b618-config\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.049483 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/989ef748-4e1c-41fe-b299-90edf5c5b618-scripts\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.049534 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkdxh\" (UniqueName: \"kubernetes.io/projected/989ef748-4e1c-41fe-b299-90edf5c5b618-kube-api-access-pkdxh\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.049638 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/989ef748-4e1c-41fe-b299-90edf5c5b618-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.151390 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/989ef748-4e1c-41fe-b299-90edf5c5b618-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.151832 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/989ef748-4e1c-41fe-b299-90edf5c5b618-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.151866 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/989ef748-4e1c-41fe-b299-90edf5c5b618-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.151931 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/989ef748-4e1c-41fe-b299-90edf5c5b618-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.151973 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/989ef748-4e1c-41fe-b299-90edf5c5b618-config\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.152050 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/989ef748-4e1c-41fe-b299-90edf5c5b618-scripts\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.152090 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkdxh\" (UniqueName: \"kubernetes.io/projected/989ef748-4e1c-41fe-b299-90edf5c5b618-kube-api-access-pkdxh\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.152249 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/989ef748-4e1c-41fe-b299-90edf5c5b618-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.152968 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/989ef748-4e1c-41fe-b299-90edf5c5b618-config\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.152998 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/989ef748-4e1c-41fe-b299-90edf5c5b618-scripts\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.160140 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/989ef748-4e1c-41fe-b299-90edf5c5b618-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.165237 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/989ef748-4e1c-41fe-b299-90edf5c5b618-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.173846 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/989ef748-4e1c-41fe-b299-90edf5c5b618-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.186828 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkdxh\" (UniqueName: \"kubernetes.io/projected/989ef748-4e1c-41fe-b299-90edf5c5b618-kube-api-access-pkdxh\") pod \"ovn-northd-0\" (UID: \"989ef748-4e1c-41fe-b299-90edf5c5b618\") " pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.249944 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.699376 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 22 07:13:53 crc kubenswrapper[4933]: W0122 07:13:53.701224 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod989ef748_4e1c_41fe_b299_90edf5c5b618.slice/crio-7525e6ca4e36dfc2234b7ba160d53dec2ff4c105d2d54ccd9624a20f1e05e1cf WatchSource:0}: Error finding container 7525e6ca4e36dfc2234b7ba160d53dec2ff4c105d2d54ccd9624a20f1e05e1cf: Status 404 returned error can't find the container with id 7525e6ca4e36dfc2234b7ba160d53dec2ff4c105d2d54ccd9624a20f1e05e1cf Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.966156 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"989ef748-4e1c-41fe-b299-90edf5c5b618","Type":"ContainerStarted","Data":"62244bdb9c1de86caa66600b3b6db95ab8e879d08a179c1ff2fb0225ca0d8351"} Jan 22 07:13:53 crc kubenswrapper[4933]: I0122 07:13:53.966430 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"989ef748-4e1c-41fe-b299-90edf5c5b618","Type":"ContainerStarted","Data":"7525e6ca4e36dfc2234b7ba160d53dec2ff4c105d2d54ccd9624a20f1e05e1cf"} Jan 22 07:13:54 crc kubenswrapper[4933]: I0122 07:13:54.974646 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"989ef748-4e1c-41fe-b299-90edf5c5b618","Type":"ContainerStarted","Data":"48e54c33f17921c2377e4c4ba682711b2bbe7a27ece4248ee501a6f45d8c95f3"} Jan 22 07:13:54 crc kubenswrapper[4933]: I0122 07:13:54.975201 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 22 07:13:54 crc kubenswrapper[4933]: I0122 07:13:54.996403 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.996352634 podStartE2EDuration="2.996352634s" podCreationTimestamp="2026-01-22 07:13:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:13:54.989298453 +0000 UTC m=+5282.826423806" watchObservedRunningTime="2026-01-22 07:13:54.996352634 +0000 UTC m=+5282.833477987" Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.196994 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-q5kxm"] Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.197967 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-q5kxm" Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.211109 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-q5kxm"] Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.299386 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-f535-account-create-update-j5wk2"] Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.300600 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f535-account-create-update-j5wk2" Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.306293 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.308912 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-f535-account-create-update-j5wk2"] Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.347199 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgxq8\" (UniqueName: \"kubernetes.io/projected/c0a694f6-8abf-4b41-84e9-73d4d8a061ff-kube-api-access-wgxq8\") pod \"keystone-db-create-q5kxm\" (UID: \"c0a694f6-8abf-4b41-84e9-73d4d8a061ff\") " pod="openstack/keystone-db-create-q5kxm" Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.347369 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0a694f6-8abf-4b41-84e9-73d4d8a061ff-operator-scripts\") pod \"keystone-db-create-q5kxm\" (UID: \"c0a694f6-8abf-4b41-84e9-73d4d8a061ff\") " pod="openstack/keystone-db-create-q5kxm" Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.449013 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb028b66-afaa-4d7f-a2c4-d72b6a5afe43-operator-scripts\") pod \"keystone-f535-account-create-update-j5wk2\" (UID: \"cb028b66-afaa-4d7f-a2c4-d72b6a5afe43\") " pod="openstack/keystone-f535-account-create-update-j5wk2" Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.449107 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0a694f6-8abf-4b41-84e9-73d4d8a061ff-operator-scripts\") pod \"keystone-db-create-q5kxm\" (UID: \"c0a694f6-8abf-4b41-84e9-73d4d8a061ff\") " pod="openstack/keystone-db-create-q5kxm" Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.449166 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ww7w8\" (UniqueName: \"kubernetes.io/projected/cb028b66-afaa-4d7f-a2c4-d72b6a5afe43-kube-api-access-ww7w8\") pod \"keystone-f535-account-create-update-j5wk2\" (UID: \"cb028b66-afaa-4d7f-a2c4-d72b6a5afe43\") " pod="openstack/keystone-f535-account-create-update-j5wk2" Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.449373 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgxq8\" (UniqueName: \"kubernetes.io/projected/c0a694f6-8abf-4b41-84e9-73d4d8a061ff-kube-api-access-wgxq8\") pod \"keystone-db-create-q5kxm\" (UID: \"c0a694f6-8abf-4b41-84e9-73d4d8a061ff\") " pod="openstack/keystone-db-create-q5kxm" Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.450056 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0a694f6-8abf-4b41-84e9-73d4d8a061ff-operator-scripts\") pod \"keystone-db-create-q5kxm\" (UID: \"c0a694f6-8abf-4b41-84e9-73d4d8a061ff\") " pod="openstack/keystone-db-create-q5kxm" Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.473018 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgxq8\" (UniqueName: \"kubernetes.io/projected/c0a694f6-8abf-4b41-84e9-73d4d8a061ff-kube-api-access-wgxq8\") pod \"keystone-db-create-q5kxm\" (UID: \"c0a694f6-8abf-4b41-84e9-73d4d8a061ff\") " pod="openstack/keystone-db-create-q5kxm" Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.517633 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-q5kxm" Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.551284 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ww7w8\" (UniqueName: \"kubernetes.io/projected/cb028b66-afaa-4d7f-a2c4-d72b6a5afe43-kube-api-access-ww7w8\") pod \"keystone-f535-account-create-update-j5wk2\" (UID: \"cb028b66-afaa-4d7f-a2c4-d72b6a5afe43\") " pod="openstack/keystone-f535-account-create-update-j5wk2" Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.551808 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb028b66-afaa-4d7f-a2c4-d72b6a5afe43-operator-scripts\") pod \"keystone-f535-account-create-update-j5wk2\" (UID: \"cb028b66-afaa-4d7f-a2c4-d72b6a5afe43\") " pod="openstack/keystone-f535-account-create-update-j5wk2" Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.552683 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb028b66-afaa-4d7f-a2c4-d72b6a5afe43-operator-scripts\") pod \"keystone-f535-account-create-update-j5wk2\" (UID: \"cb028b66-afaa-4d7f-a2c4-d72b6a5afe43\") " pod="openstack/keystone-f535-account-create-update-j5wk2" Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.571703 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ww7w8\" (UniqueName: \"kubernetes.io/projected/cb028b66-afaa-4d7f-a2c4-d72b6a5afe43-kube-api-access-ww7w8\") pod \"keystone-f535-account-create-update-j5wk2\" (UID: \"cb028b66-afaa-4d7f-a2c4-d72b6a5afe43\") " pod="openstack/keystone-f535-account-create-update-j5wk2" Jan 22 07:13:58 crc kubenswrapper[4933]: I0122 07:13:58.616570 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f535-account-create-update-j5wk2" Jan 22 07:13:59 crc kubenswrapper[4933]: I0122 07:13:59.040014 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-q5kxm"] Jan 22 07:13:59 crc kubenswrapper[4933]: W0122 07:13:59.042996 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc0a694f6_8abf_4b41_84e9_73d4d8a061ff.slice/crio-b1efc73f302d594eb841237b657fda5b22af5fd5f36004d98114feecf454d7e1 WatchSource:0}: Error finding container b1efc73f302d594eb841237b657fda5b22af5fd5f36004d98114feecf454d7e1: Status 404 returned error can't find the container with id b1efc73f302d594eb841237b657fda5b22af5fd5f36004d98114feecf454d7e1 Jan 22 07:13:59 crc kubenswrapper[4933]: I0122 07:13:59.207285 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-f535-account-create-update-j5wk2"] Jan 22 07:13:59 crc kubenswrapper[4933]: W0122 07:13:59.214199 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcb028b66_afaa_4d7f_a2c4_d72b6a5afe43.slice/crio-44c8f59e50c28cd321cd82f336e090a7368aa8aafee772cdc3e70257eb256aa1 WatchSource:0}: Error finding container 44c8f59e50c28cd321cd82f336e090a7368aa8aafee772cdc3e70257eb256aa1: Status 404 returned error can't find the container with id 44c8f59e50c28cd321cd82f336e090a7368aa8aafee772cdc3e70257eb256aa1 Jan 22 07:14:00 crc kubenswrapper[4933]: I0122 07:14:00.019260 4933 generic.go:334] "Generic (PLEG): container finished" podID="c0a694f6-8abf-4b41-84e9-73d4d8a061ff" containerID="3a208f7491446a44a0e279507c230499e6148f372264d1f816ea078bc13f68b5" exitCode=0 Jan 22 07:14:00 crc kubenswrapper[4933]: I0122 07:14:00.019390 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-q5kxm" event={"ID":"c0a694f6-8abf-4b41-84e9-73d4d8a061ff","Type":"ContainerDied","Data":"3a208f7491446a44a0e279507c230499e6148f372264d1f816ea078bc13f68b5"} Jan 22 07:14:00 crc kubenswrapper[4933]: I0122 07:14:00.019691 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-q5kxm" event={"ID":"c0a694f6-8abf-4b41-84e9-73d4d8a061ff","Type":"ContainerStarted","Data":"b1efc73f302d594eb841237b657fda5b22af5fd5f36004d98114feecf454d7e1"} Jan 22 07:14:00 crc kubenswrapper[4933]: I0122 07:14:00.020877 4933 generic.go:334] "Generic (PLEG): container finished" podID="cb028b66-afaa-4d7f-a2c4-d72b6a5afe43" containerID="b737bba133f9eb65d5ad3f2072b14b93d2bac0b82b38c9b6e536f78e4c11d5a1" exitCode=0 Jan 22 07:14:00 crc kubenswrapper[4933]: I0122 07:14:00.020945 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f535-account-create-update-j5wk2" event={"ID":"cb028b66-afaa-4d7f-a2c4-d72b6a5afe43","Type":"ContainerDied","Data":"b737bba133f9eb65d5ad3f2072b14b93d2bac0b82b38c9b6e536f78e4c11d5a1"} Jan 22 07:14:00 crc kubenswrapper[4933]: I0122 07:14:00.020974 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f535-account-create-update-j5wk2" event={"ID":"cb028b66-afaa-4d7f-a2c4-d72b6a5afe43","Type":"ContainerStarted","Data":"44c8f59e50c28cd321cd82f336e090a7368aa8aafee772cdc3e70257eb256aa1"} Jan 22 07:14:01 crc kubenswrapper[4933]: I0122 07:14:01.555160 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f535-account-create-update-j5wk2" Jan 22 07:14:01 crc kubenswrapper[4933]: I0122 07:14:01.560956 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-q5kxm" Jan 22 07:14:01 crc kubenswrapper[4933]: I0122 07:14:01.704660 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0a694f6-8abf-4b41-84e9-73d4d8a061ff-operator-scripts\") pod \"c0a694f6-8abf-4b41-84e9-73d4d8a061ff\" (UID: \"c0a694f6-8abf-4b41-84e9-73d4d8a061ff\") " Jan 22 07:14:01 crc kubenswrapper[4933]: I0122 07:14:01.704711 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wgxq8\" (UniqueName: \"kubernetes.io/projected/c0a694f6-8abf-4b41-84e9-73d4d8a061ff-kube-api-access-wgxq8\") pod \"c0a694f6-8abf-4b41-84e9-73d4d8a061ff\" (UID: \"c0a694f6-8abf-4b41-84e9-73d4d8a061ff\") " Jan 22 07:14:01 crc kubenswrapper[4933]: I0122 07:14:01.704774 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb028b66-afaa-4d7f-a2c4-d72b6a5afe43-operator-scripts\") pod \"cb028b66-afaa-4d7f-a2c4-d72b6a5afe43\" (UID: \"cb028b66-afaa-4d7f-a2c4-d72b6a5afe43\") " Jan 22 07:14:01 crc kubenswrapper[4933]: I0122 07:14:01.704826 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ww7w8\" (UniqueName: \"kubernetes.io/projected/cb028b66-afaa-4d7f-a2c4-d72b6a5afe43-kube-api-access-ww7w8\") pod \"cb028b66-afaa-4d7f-a2c4-d72b6a5afe43\" (UID: \"cb028b66-afaa-4d7f-a2c4-d72b6a5afe43\") " Jan 22 07:14:01 crc kubenswrapper[4933]: I0122 07:14:01.705847 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb028b66-afaa-4d7f-a2c4-d72b6a5afe43-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cb028b66-afaa-4d7f-a2c4-d72b6a5afe43" (UID: "cb028b66-afaa-4d7f-a2c4-d72b6a5afe43"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:14:01 crc kubenswrapper[4933]: I0122 07:14:01.705855 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c0a694f6-8abf-4b41-84e9-73d4d8a061ff-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c0a694f6-8abf-4b41-84e9-73d4d8a061ff" (UID: "c0a694f6-8abf-4b41-84e9-73d4d8a061ff"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:14:01 crc kubenswrapper[4933]: I0122 07:14:01.716346 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb028b66-afaa-4d7f-a2c4-d72b6a5afe43-kube-api-access-ww7w8" (OuterVolumeSpecName: "kube-api-access-ww7w8") pod "cb028b66-afaa-4d7f-a2c4-d72b6a5afe43" (UID: "cb028b66-afaa-4d7f-a2c4-d72b6a5afe43"). InnerVolumeSpecName "kube-api-access-ww7w8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:14:01 crc kubenswrapper[4933]: I0122 07:14:01.716467 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0a694f6-8abf-4b41-84e9-73d4d8a061ff-kube-api-access-wgxq8" (OuterVolumeSpecName: "kube-api-access-wgxq8") pod "c0a694f6-8abf-4b41-84e9-73d4d8a061ff" (UID: "c0a694f6-8abf-4b41-84e9-73d4d8a061ff"). InnerVolumeSpecName "kube-api-access-wgxq8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:14:01 crc kubenswrapper[4933]: I0122 07:14:01.806527 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0a694f6-8abf-4b41-84e9-73d4d8a061ff-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:01 crc kubenswrapper[4933]: I0122 07:14:01.806720 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wgxq8\" (UniqueName: \"kubernetes.io/projected/c0a694f6-8abf-4b41-84e9-73d4d8a061ff-kube-api-access-wgxq8\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:01 crc kubenswrapper[4933]: I0122 07:14:01.806778 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb028b66-afaa-4d7f-a2c4-d72b6a5afe43-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:01 crc kubenswrapper[4933]: I0122 07:14:01.806861 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ww7w8\" (UniqueName: \"kubernetes.io/projected/cb028b66-afaa-4d7f-a2c4-d72b6a5afe43-kube-api-access-ww7w8\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:02 crc kubenswrapper[4933]: I0122 07:14:02.037443 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-q5kxm" Jan 22 07:14:02 crc kubenswrapper[4933]: I0122 07:14:02.037468 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-q5kxm" event={"ID":"c0a694f6-8abf-4b41-84e9-73d4d8a061ff","Type":"ContainerDied","Data":"b1efc73f302d594eb841237b657fda5b22af5fd5f36004d98114feecf454d7e1"} Jan 22 07:14:02 crc kubenswrapper[4933]: I0122 07:14:02.037973 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1efc73f302d594eb841237b657fda5b22af5fd5f36004d98114feecf454d7e1" Jan 22 07:14:02 crc kubenswrapper[4933]: I0122 07:14:02.039276 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f535-account-create-update-j5wk2" event={"ID":"cb028b66-afaa-4d7f-a2c4-d72b6a5afe43","Type":"ContainerDied","Data":"44c8f59e50c28cd321cd82f336e090a7368aa8aafee772cdc3e70257eb256aa1"} Jan 22 07:14:02 crc kubenswrapper[4933]: I0122 07:14:02.039317 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="44c8f59e50c28cd321cd82f336e090a7368aa8aafee772cdc3e70257eb256aa1" Jan 22 07:14:02 crc kubenswrapper[4933]: I0122 07:14:02.039352 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f535-account-create-update-j5wk2" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.327589 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.729218 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-tct52"] Jan 22 07:14:03 crc kubenswrapper[4933]: E0122 07:14:03.729686 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb028b66-afaa-4d7f-a2c4-d72b6a5afe43" containerName="mariadb-account-create-update" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.729712 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb028b66-afaa-4d7f-a2c4-d72b6a5afe43" containerName="mariadb-account-create-update" Jan 22 07:14:03 crc kubenswrapper[4933]: E0122 07:14:03.729743 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0a694f6-8abf-4b41-84e9-73d4d8a061ff" containerName="mariadb-database-create" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.729756 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0a694f6-8abf-4b41-84e9-73d4d8a061ff" containerName="mariadb-database-create" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.729998 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb028b66-afaa-4d7f-a2c4-d72b6a5afe43" containerName="mariadb-account-create-update" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.730027 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0a694f6-8abf-4b41-84e9-73d4d8a061ff" containerName="mariadb-database-create" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.730826 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tct52" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.732860 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-5n4dk" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.733221 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.733486 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.733735 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.737842 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-tct52"] Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.835023 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce8ad4c-791a-4e23-9e52-c361e03f8674-combined-ca-bundle\") pod \"keystone-db-sync-tct52\" (UID: \"bce8ad4c-791a-4e23-9e52-c361e03f8674\") " pod="openstack/keystone-db-sync-tct52" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.835091 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce8ad4c-791a-4e23-9e52-c361e03f8674-config-data\") pod \"keystone-db-sync-tct52\" (UID: \"bce8ad4c-791a-4e23-9e52-c361e03f8674\") " pod="openstack/keystone-db-sync-tct52" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.835133 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bd6sd\" (UniqueName: \"kubernetes.io/projected/bce8ad4c-791a-4e23-9e52-c361e03f8674-kube-api-access-bd6sd\") pod \"keystone-db-sync-tct52\" (UID: \"bce8ad4c-791a-4e23-9e52-c361e03f8674\") " pod="openstack/keystone-db-sync-tct52" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.936902 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce8ad4c-791a-4e23-9e52-c361e03f8674-combined-ca-bundle\") pod \"keystone-db-sync-tct52\" (UID: \"bce8ad4c-791a-4e23-9e52-c361e03f8674\") " pod="openstack/keystone-db-sync-tct52" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.936966 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce8ad4c-791a-4e23-9e52-c361e03f8674-config-data\") pod \"keystone-db-sync-tct52\" (UID: \"bce8ad4c-791a-4e23-9e52-c361e03f8674\") " pod="openstack/keystone-db-sync-tct52" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.937001 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bd6sd\" (UniqueName: \"kubernetes.io/projected/bce8ad4c-791a-4e23-9e52-c361e03f8674-kube-api-access-bd6sd\") pod \"keystone-db-sync-tct52\" (UID: \"bce8ad4c-791a-4e23-9e52-c361e03f8674\") " pod="openstack/keystone-db-sync-tct52" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.943628 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce8ad4c-791a-4e23-9e52-c361e03f8674-config-data\") pod \"keystone-db-sync-tct52\" (UID: \"bce8ad4c-791a-4e23-9e52-c361e03f8674\") " pod="openstack/keystone-db-sync-tct52" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.943643 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce8ad4c-791a-4e23-9e52-c361e03f8674-combined-ca-bundle\") pod \"keystone-db-sync-tct52\" (UID: \"bce8ad4c-791a-4e23-9e52-c361e03f8674\") " pod="openstack/keystone-db-sync-tct52" Jan 22 07:14:03 crc kubenswrapper[4933]: I0122 07:14:03.954574 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bd6sd\" (UniqueName: \"kubernetes.io/projected/bce8ad4c-791a-4e23-9e52-c361e03f8674-kube-api-access-bd6sd\") pod \"keystone-db-sync-tct52\" (UID: \"bce8ad4c-791a-4e23-9e52-c361e03f8674\") " pod="openstack/keystone-db-sync-tct52" Jan 22 07:14:04 crc kubenswrapper[4933]: I0122 07:14:04.051099 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tct52" Jan 22 07:14:04 crc kubenswrapper[4933]: I0122 07:14:04.164509 4933 scope.go:117] "RemoveContainer" containerID="5381aaf25fa5616528f037f82beab498d79d2050b61e79dc53b5e8b38779d6b0" Jan 22 07:14:04 crc kubenswrapper[4933]: I0122 07:14:04.485993 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-tct52"] Jan 22 07:14:04 crc kubenswrapper[4933]: W0122 07:14:04.490578 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbce8ad4c_791a_4e23_9e52_c361e03f8674.slice/crio-5e9763c5685a1dec416142da19b6ee9efdd9a5196a4e14b90facfd0fb5b675d6 WatchSource:0}: Error finding container 5e9763c5685a1dec416142da19b6ee9efdd9a5196a4e14b90facfd0fb5b675d6: Status 404 returned error can't find the container with id 5e9763c5685a1dec416142da19b6ee9efdd9a5196a4e14b90facfd0fb5b675d6 Jan 22 07:14:05 crc kubenswrapper[4933]: I0122 07:14:05.059797 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tct52" event={"ID":"bce8ad4c-791a-4e23-9e52-c361e03f8674","Type":"ContainerStarted","Data":"791ee139cdd01ee7264d040651dc10539788592ddc696d4bee66c94a8c05158c"} Jan 22 07:14:05 crc kubenswrapper[4933]: I0122 07:14:05.060095 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tct52" event={"ID":"bce8ad4c-791a-4e23-9e52-c361e03f8674","Type":"ContainerStarted","Data":"5e9763c5685a1dec416142da19b6ee9efdd9a5196a4e14b90facfd0fb5b675d6"} Jan 22 07:14:05 crc kubenswrapper[4933]: I0122 07:14:05.075658 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-tct52" podStartSLOduration=2.07563527 podStartE2EDuration="2.07563527s" podCreationTimestamp="2026-01-22 07:14:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:14:05.071775956 +0000 UTC m=+5292.908901309" watchObservedRunningTime="2026-01-22 07:14:05.07563527 +0000 UTC m=+5292.912760643" Jan 22 07:14:07 crc kubenswrapper[4933]: I0122 07:14:07.073984 4933 generic.go:334] "Generic (PLEG): container finished" podID="bce8ad4c-791a-4e23-9e52-c361e03f8674" containerID="791ee139cdd01ee7264d040651dc10539788592ddc696d4bee66c94a8c05158c" exitCode=0 Jan 22 07:14:07 crc kubenswrapper[4933]: I0122 07:14:07.074043 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tct52" event={"ID":"bce8ad4c-791a-4e23-9e52-c361e03f8674","Type":"ContainerDied","Data":"791ee139cdd01ee7264d040651dc10539788592ddc696d4bee66c94a8c05158c"} Jan 22 07:14:08 crc kubenswrapper[4933]: I0122 07:14:08.477136 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tct52" Jan 22 07:14:08 crc kubenswrapper[4933]: I0122 07:14:08.621012 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bd6sd\" (UniqueName: \"kubernetes.io/projected/bce8ad4c-791a-4e23-9e52-c361e03f8674-kube-api-access-bd6sd\") pod \"bce8ad4c-791a-4e23-9e52-c361e03f8674\" (UID: \"bce8ad4c-791a-4e23-9e52-c361e03f8674\") " Jan 22 07:14:08 crc kubenswrapper[4933]: I0122 07:14:08.621152 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce8ad4c-791a-4e23-9e52-c361e03f8674-combined-ca-bundle\") pod \"bce8ad4c-791a-4e23-9e52-c361e03f8674\" (UID: \"bce8ad4c-791a-4e23-9e52-c361e03f8674\") " Jan 22 07:14:08 crc kubenswrapper[4933]: I0122 07:14:08.621285 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce8ad4c-791a-4e23-9e52-c361e03f8674-config-data\") pod \"bce8ad4c-791a-4e23-9e52-c361e03f8674\" (UID: \"bce8ad4c-791a-4e23-9e52-c361e03f8674\") " Jan 22 07:14:08 crc kubenswrapper[4933]: I0122 07:14:08.639838 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bce8ad4c-791a-4e23-9e52-c361e03f8674-kube-api-access-bd6sd" (OuterVolumeSpecName: "kube-api-access-bd6sd") pod "bce8ad4c-791a-4e23-9e52-c361e03f8674" (UID: "bce8ad4c-791a-4e23-9e52-c361e03f8674"). InnerVolumeSpecName "kube-api-access-bd6sd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:14:08 crc kubenswrapper[4933]: I0122 07:14:08.649159 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bce8ad4c-791a-4e23-9e52-c361e03f8674-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bce8ad4c-791a-4e23-9e52-c361e03f8674" (UID: "bce8ad4c-791a-4e23-9e52-c361e03f8674"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:14:08 crc kubenswrapper[4933]: I0122 07:14:08.668498 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bce8ad4c-791a-4e23-9e52-c361e03f8674-config-data" (OuterVolumeSpecName: "config-data") pod "bce8ad4c-791a-4e23-9e52-c361e03f8674" (UID: "bce8ad4c-791a-4e23-9e52-c361e03f8674"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:14:08 crc kubenswrapper[4933]: I0122 07:14:08.723725 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bce8ad4c-791a-4e23-9e52-c361e03f8674-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:08 crc kubenswrapper[4933]: I0122 07:14:08.723758 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bd6sd\" (UniqueName: \"kubernetes.io/projected/bce8ad4c-791a-4e23-9e52-c361e03f8674-kube-api-access-bd6sd\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:08 crc kubenswrapper[4933]: I0122 07:14:08.723770 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bce8ad4c-791a-4e23-9e52-c361e03f8674-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.096455 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tct52" event={"ID":"bce8ad4c-791a-4e23-9e52-c361e03f8674","Type":"ContainerDied","Data":"5e9763c5685a1dec416142da19b6ee9efdd9a5196a4e14b90facfd0fb5b675d6"} Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.096503 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5e9763c5685a1dec416142da19b6ee9efdd9a5196a4e14b90facfd0fb5b675d6" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.096563 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tct52" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.248831 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d997bd4b5-wcm95"] Jan 22 07:14:09 crc kubenswrapper[4933]: E0122 07:14:09.249294 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bce8ad4c-791a-4e23-9e52-c361e03f8674" containerName="keystone-db-sync" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.249317 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="bce8ad4c-791a-4e23-9e52-c361e03f8674" containerName="keystone-db-sync" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.249555 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="bce8ad4c-791a-4e23-9e52-c361e03f8674" containerName="keystone-db-sync" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.250613 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.262456 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d997bd4b5-wcm95"] Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.295531 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-qn82l"] Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.298136 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.303762 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.304321 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.304499 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-5n4dk" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.304551 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.304722 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.307912 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-qn82l"] Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.338016 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-dns-svc\") pod \"dnsmasq-dns-6d997bd4b5-wcm95\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.338172 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-ovsdbserver-sb\") pod \"dnsmasq-dns-6d997bd4b5-wcm95\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.338230 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wb5ss\" (UniqueName: \"kubernetes.io/projected/43bccb46-764e-45cb-9261-816ee71c8b0b-kube-api-access-wb5ss\") pod \"dnsmasq-dns-6d997bd4b5-wcm95\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.338249 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-config\") pod \"dnsmasq-dns-6d997bd4b5-wcm95\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.338284 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-ovsdbserver-nb\") pod \"dnsmasq-dns-6d997bd4b5-wcm95\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.439845 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-scripts\") pod \"keystone-bootstrap-qn82l\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.440159 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t56n8\" (UniqueName: \"kubernetes.io/projected/72dcc735-9602-4fc1-a644-cf735051a3b5-kube-api-access-t56n8\") pod \"keystone-bootstrap-qn82l\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.440315 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-ovsdbserver-sb\") pod \"dnsmasq-dns-6d997bd4b5-wcm95\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.440437 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wb5ss\" (UniqueName: \"kubernetes.io/projected/43bccb46-764e-45cb-9261-816ee71c8b0b-kube-api-access-wb5ss\") pod \"dnsmasq-dns-6d997bd4b5-wcm95\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.440522 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-combined-ca-bundle\") pod \"keystone-bootstrap-qn82l\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.440651 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-config\") pod \"dnsmasq-dns-6d997bd4b5-wcm95\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.441622 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-credential-keys\") pod \"keystone-bootstrap-qn82l\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.441567 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-config\") pod \"dnsmasq-dns-6d997bd4b5-wcm95\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.441248 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-ovsdbserver-sb\") pod \"dnsmasq-dns-6d997bd4b5-wcm95\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.442056 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-ovsdbserver-nb\") pod \"dnsmasq-dns-6d997bd4b5-wcm95\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.442858 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-config-data\") pod \"keystone-bootstrap-qn82l\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.442799 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-ovsdbserver-nb\") pod \"dnsmasq-dns-6d997bd4b5-wcm95\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.443186 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-fernet-keys\") pod \"keystone-bootstrap-qn82l\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.443324 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-dns-svc\") pod \"dnsmasq-dns-6d997bd4b5-wcm95\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.443991 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-dns-svc\") pod \"dnsmasq-dns-6d997bd4b5-wcm95\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.458789 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wb5ss\" (UniqueName: \"kubernetes.io/projected/43bccb46-764e-45cb-9261-816ee71c8b0b-kube-api-access-wb5ss\") pod \"dnsmasq-dns-6d997bd4b5-wcm95\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.545285 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-config-data\") pod \"keystone-bootstrap-qn82l\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.546245 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-fernet-keys\") pod \"keystone-bootstrap-qn82l\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.546373 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-scripts\") pod \"keystone-bootstrap-qn82l\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.546487 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t56n8\" (UniqueName: \"kubernetes.io/projected/72dcc735-9602-4fc1-a644-cf735051a3b5-kube-api-access-t56n8\") pod \"keystone-bootstrap-qn82l\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.546582 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-combined-ca-bundle\") pod \"keystone-bootstrap-qn82l\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.546660 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-credential-keys\") pod \"keystone-bootstrap-qn82l\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.550407 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-combined-ca-bundle\") pod \"keystone-bootstrap-qn82l\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.550487 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-credential-keys\") pod \"keystone-bootstrap-qn82l\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.550675 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-fernet-keys\") pod \"keystone-bootstrap-qn82l\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.554233 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-scripts\") pod \"keystone-bootstrap-qn82l\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.558722 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-config-data\") pod \"keystone-bootstrap-qn82l\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.563802 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t56n8\" (UniqueName: \"kubernetes.io/projected/72dcc735-9602-4fc1-a644-cf735051a3b5-kube-api-access-t56n8\") pod \"keystone-bootstrap-qn82l\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.572057 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:14:09 crc kubenswrapper[4933]: I0122 07:14:09.617525 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:10 crc kubenswrapper[4933]: I0122 07:14:10.049830 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d997bd4b5-wcm95"] Jan 22 07:14:10 crc kubenswrapper[4933]: W0122 07:14:10.055941 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43bccb46_764e_45cb_9261_816ee71c8b0b.slice/crio-5ebc6ac5d4410cba7ebbb941837e9ee5381db71d4a0ce3fce612ab04d38e4a09 WatchSource:0}: Error finding container 5ebc6ac5d4410cba7ebbb941837e9ee5381db71d4a0ce3fce612ab04d38e4a09: Status 404 returned error can't find the container with id 5ebc6ac5d4410cba7ebbb941837e9ee5381db71d4a0ce3fce612ab04d38e4a09 Jan 22 07:14:10 crc kubenswrapper[4933]: I0122 07:14:10.115376 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" event={"ID":"43bccb46-764e-45cb-9261-816ee71c8b0b","Type":"ContainerStarted","Data":"5ebc6ac5d4410cba7ebbb941837e9ee5381db71d4a0ce3fce612ab04d38e4a09"} Jan 22 07:14:10 crc kubenswrapper[4933]: I0122 07:14:10.125086 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-qn82l"] Jan 22 07:14:10 crc kubenswrapper[4933]: I0122 07:14:10.942628 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:14:10 crc kubenswrapper[4933]: I0122 07:14:10.942953 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:14:10 crc kubenswrapper[4933]: I0122 07:14:10.943002 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 07:14:10 crc kubenswrapper[4933]: I0122 07:14:10.943650 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 07:14:10 crc kubenswrapper[4933]: I0122 07:14:10.943701 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" gracePeriod=600 Jan 22 07:14:11 crc kubenswrapper[4933]: E0122 07:14:11.087340 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:14:11 crc kubenswrapper[4933]: I0122 07:14:11.128419 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" exitCode=0 Jan 22 07:14:11 crc kubenswrapper[4933]: I0122 07:14:11.128440 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92"} Jan 22 07:14:11 crc kubenswrapper[4933]: I0122 07:14:11.128527 4933 scope.go:117] "RemoveContainer" containerID="95f0ca789178e77053405c316da5ad73a9ba931191b3cf740ca8ae2078616f25" Jan 22 07:14:11 crc kubenswrapper[4933]: I0122 07:14:11.129308 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:14:11 crc kubenswrapper[4933]: E0122 07:14:11.129715 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:14:11 crc kubenswrapper[4933]: I0122 07:14:11.134013 4933 generic.go:334] "Generic (PLEG): container finished" podID="43bccb46-764e-45cb-9261-816ee71c8b0b" containerID="a74cd3b6abe8c7f622e07ff297a1d8c60e7477d0d400546b03797dbf0f8de518" exitCode=0 Jan 22 07:14:11 crc kubenswrapper[4933]: I0122 07:14:11.134104 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" event={"ID":"43bccb46-764e-45cb-9261-816ee71c8b0b","Type":"ContainerDied","Data":"a74cd3b6abe8c7f622e07ff297a1d8c60e7477d0d400546b03797dbf0f8de518"} Jan 22 07:14:11 crc kubenswrapper[4933]: I0122 07:14:11.136820 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qn82l" event={"ID":"72dcc735-9602-4fc1-a644-cf735051a3b5","Type":"ContainerStarted","Data":"e7ac1bc107ca82175d72b2c56b74ce0436bd44f94fbc67fab854adf4c664fc0c"} Jan 22 07:14:11 crc kubenswrapper[4933]: I0122 07:14:11.136865 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qn82l" event={"ID":"72dcc735-9602-4fc1-a644-cf735051a3b5","Type":"ContainerStarted","Data":"c7734f20c6ab1805b8a2821db07eac1f8f04137160bbf9208ecd5e3040dc88d3"} Jan 22 07:14:11 crc kubenswrapper[4933]: I0122 07:14:11.185586 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-qn82l" podStartSLOduration=2.18556543 podStartE2EDuration="2.18556543s" podCreationTimestamp="2026-01-22 07:14:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:14:11.176566222 +0000 UTC m=+5299.013691595" watchObservedRunningTime="2026-01-22 07:14:11.18556543 +0000 UTC m=+5299.022690773" Jan 22 07:14:12 crc kubenswrapper[4933]: I0122 07:14:12.147345 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" event={"ID":"43bccb46-764e-45cb-9261-816ee71c8b0b","Type":"ContainerStarted","Data":"327e5671019c61edc8b869d669626cc3907475d04f7eb392aa10a6f212dde0f5"} Jan 22 07:14:12 crc kubenswrapper[4933]: I0122 07:14:12.147698 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:14:12 crc kubenswrapper[4933]: I0122 07:14:12.188052 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" podStartSLOduration=3.18803198 podStartE2EDuration="3.18803198s" podCreationTimestamp="2026-01-22 07:14:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:14:12.162216943 +0000 UTC m=+5299.999342316" watchObservedRunningTime="2026-01-22 07:14:12.18803198 +0000 UTC m=+5300.025157323" Jan 22 07:14:14 crc kubenswrapper[4933]: I0122 07:14:14.161637 4933 generic.go:334] "Generic (PLEG): container finished" podID="72dcc735-9602-4fc1-a644-cf735051a3b5" containerID="e7ac1bc107ca82175d72b2c56b74ce0436bd44f94fbc67fab854adf4c664fc0c" exitCode=0 Jan 22 07:14:14 crc kubenswrapper[4933]: I0122 07:14:14.161750 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qn82l" event={"ID":"72dcc735-9602-4fc1-a644-cf735051a3b5","Type":"ContainerDied","Data":"e7ac1bc107ca82175d72b2c56b74ce0436bd44f94fbc67fab854adf4c664fc0c"} Jan 22 07:14:15 crc kubenswrapper[4933]: I0122 07:14:15.572610 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:15 crc kubenswrapper[4933]: I0122 07:14:15.645729 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-combined-ca-bundle\") pod \"72dcc735-9602-4fc1-a644-cf735051a3b5\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " Jan 22 07:14:15 crc kubenswrapper[4933]: I0122 07:14:15.645862 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-credential-keys\") pod \"72dcc735-9602-4fc1-a644-cf735051a3b5\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " Jan 22 07:14:15 crc kubenswrapper[4933]: I0122 07:14:15.645900 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-fernet-keys\") pod \"72dcc735-9602-4fc1-a644-cf735051a3b5\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " Jan 22 07:14:15 crc kubenswrapper[4933]: I0122 07:14:15.645917 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-scripts\") pod \"72dcc735-9602-4fc1-a644-cf735051a3b5\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " Jan 22 07:14:15 crc kubenswrapper[4933]: I0122 07:14:15.645966 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-config-data\") pod \"72dcc735-9602-4fc1-a644-cf735051a3b5\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " Jan 22 07:14:15 crc kubenswrapper[4933]: I0122 07:14:15.645983 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t56n8\" (UniqueName: \"kubernetes.io/projected/72dcc735-9602-4fc1-a644-cf735051a3b5-kube-api-access-t56n8\") pod \"72dcc735-9602-4fc1-a644-cf735051a3b5\" (UID: \"72dcc735-9602-4fc1-a644-cf735051a3b5\") " Jan 22 07:14:15 crc kubenswrapper[4933]: I0122 07:14:15.652216 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72dcc735-9602-4fc1-a644-cf735051a3b5-kube-api-access-t56n8" (OuterVolumeSpecName: "kube-api-access-t56n8") pod "72dcc735-9602-4fc1-a644-cf735051a3b5" (UID: "72dcc735-9602-4fc1-a644-cf735051a3b5"). InnerVolumeSpecName "kube-api-access-t56n8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:14:15 crc kubenswrapper[4933]: I0122 07:14:15.652689 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "72dcc735-9602-4fc1-a644-cf735051a3b5" (UID: "72dcc735-9602-4fc1-a644-cf735051a3b5"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:14:15 crc kubenswrapper[4933]: I0122 07:14:15.654199 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-scripts" (OuterVolumeSpecName: "scripts") pod "72dcc735-9602-4fc1-a644-cf735051a3b5" (UID: "72dcc735-9602-4fc1-a644-cf735051a3b5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:14:15 crc kubenswrapper[4933]: I0122 07:14:15.655251 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "72dcc735-9602-4fc1-a644-cf735051a3b5" (UID: "72dcc735-9602-4fc1-a644-cf735051a3b5"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:14:15 crc kubenswrapper[4933]: I0122 07:14:15.671954 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "72dcc735-9602-4fc1-a644-cf735051a3b5" (UID: "72dcc735-9602-4fc1-a644-cf735051a3b5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:14:15 crc kubenswrapper[4933]: I0122 07:14:15.672843 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-config-data" (OuterVolumeSpecName: "config-data") pod "72dcc735-9602-4fc1-a644-cf735051a3b5" (UID: "72dcc735-9602-4fc1-a644-cf735051a3b5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:14:15 crc kubenswrapper[4933]: I0122 07:14:15.748430 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:15 crc kubenswrapper[4933]: I0122 07:14:15.748473 4933 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:15 crc kubenswrapper[4933]: I0122 07:14:15.748487 4933 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:15 crc kubenswrapper[4933]: I0122 07:14:15.748499 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:15 crc kubenswrapper[4933]: I0122 07:14:15.748515 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t56n8\" (UniqueName: \"kubernetes.io/projected/72dcc735-9602-4fc1-a644-cf735051a3b5-kube-api-access-t56n8\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:15 crc kubenswrapper[4933]: I0122 07:14:15.748527 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72dcc735-9602-4fc1-a644-cf735051a3b5-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.209130 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qn82l" event={"ID":"72dcc735-9602-4fc1-a644-cf735051a3b5","Type":"ContainerDied","Data":"c7734f20c6ab1805b8a2821db07eac1f8f04137160bbf9208ecd5e3040dc88d3"} Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.209383 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c7734f20c6ab1805b8a2821db07eac1f8f04137160bbf9208ecd5e3040dc88d3" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.209284 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qn82l" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.275735 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-qn82l"] Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.283423 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-qn82l"] Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.360372 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-8x2zf"] Jan 22 07:14:16 crc kubenswrapper[4933]: E0122 07:14:16.361557 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72dcc735-9602-4fc1-a644-cf735051a3b5" containerName="keystone-bootstrap" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.361626 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="72dcc735-9602-4fc1-a644-cf735051a3b5" containerName="keystone-bootstrap" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.362221 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="72dcc735-9602-4fc1-a644-cf735051a3b5" containerName="keystone-bootstrap" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.363311 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.366422 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.366304 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.366719 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.370100 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8x2zf"] Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.371604 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.371767 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-5n4dk" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.459974 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-credential-keys\") pod \"keystone-bootstrap-8x2zf\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.460193 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-config-data\") pod \"keystone-bootstrap-8x2zf\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.460457 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llldn\" (UniqueName: \"kubernetes.io/projected/95b8a659-2af3-4f21-b848-d395f968a0fe-kube-api-access-llldn\") pod \"keystone-bootstrap-8x2zf\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.460670 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-combined-ca-bundle\") pod \"keystone-bootstrap-8x2zf\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.460725 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-fernet-keys\") pod \"keystone-bootstrap-8x2zf\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.460776 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-scripts\") pod \"keystone-bootstrap-8x2zf\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.500539 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72dcc735-9602-4fc1-a644-cf735051a3b5" path="/var/lib/kubelet/pods/72dcc735-9602-4fc1-a644-cf735051a3b5/volumes" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.562464 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-fernet-keys\") pod \"keystone-bootstrap-8x2zf\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.562534 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-scripts\") pod \"keystone-bootstrap-8x2zf\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.562596 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-credential-keys\") pod \"keystone-bootstrap-8x2zf\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.562620 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-config-data\") pod \"keystone-bootstrap-8x2zf\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.562655 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llldn\" (UniqueName: \"kubernetes.io/projected/95b8a659-2af3-4f21-b848-d395f968a0fe-kube-api-access-llldn\") pod \"keystone-bootstrap-8x2zf\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.562713 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-combined-ca-bundle\") pod \"keystone-bootstrap-8x2zf\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.568513 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-scripts\") pod \"keystone-bootstrap-8x2zf\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.569047 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-combined-ca-bundle\") pod \"keystone-bootstrap-8x2zf\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.569238 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-credential-keys\") pod \"keystone-bootstrap-8x2zf\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.570108 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-fernet-keys\") pod \"keystone-bootstrap-8x2zf\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.570850 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-config-data\") pod \"keystone-bootstrap-8x2zf\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.581369 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llldn\" (UniqueName: \"kubernetes.io/projected/95b8a659-2af3-4f21-b848-d395f968a0fe-kube-api-access-llldn\") pod \"keystone-bootstrap-8x2zf\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:16 crc kubenswrapper[4933]: I0122 07:14:16.687856 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:17 crc kubenswrapper[4933]: I0122 07:14:17.178339 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8x2zf"] Jan 22 07:14:17 crc kubenswrapper[4933]: W0122 07:14:17.180875 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95b8a659_2af3_4f21_b848_d395f968a0fe.slice/crio-8bc61796188100c26219891d7cf91b2d49b52699fcde109b341bc0ec685c4a5a WatchSource:0}: Error finding container 8bc61796188100c26219891d7cf91b2d49b52699fcde109b341bc0ec685c4a5a: Status 404 returned error can't find the container with id 8bc61796188100c26219891d7cf91b2d49b52699fcde109b341bc0ec685c4a5a Jan 22 07:14:17 crc kubenswrapper[4933]: I0122 07:14:17.220618 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8x2zf" event={"ID":"95b8a659-2af3-4f21-b848-d395f968a0fe","Type":"ContainerStarted","Data":"8bc61796188100c26219891d7cf91b2d49b52699fcde109b341bc0ec685c4a5a"} Jan 22 07:14:18 crc kubenswrapper[4933]: I0122 07:14:18.228890 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8x2zf" event={"ID":"95b8a659-2af3-4f21-b848-d395f968a0fe","Type":"ContainerStarted","Data":"e06943f28023d4bf16b8f3df6fd47a3c5569c55b4239064adbd7f0fb4beb0ab2"} Jan 22 07:14:18 crc kubenswrapper[4933]: I0122 07:14:18.252720 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-8x2zf" podStartSLOduration=2.252706971 podStartE2EDuration="2.252706971s" podCreationTimestamp="2026-01-22 07:14:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:14:18.251458611 +0000 UTC m=+5306.088583994" watchObservedRunningTime="2026-01-22 07:14:18.252706971 +0000 UTC m=+5306.089832324" Jan 22 07:14:19 crc kubenswrapper[4933]: I0122 07:14:19.575593 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:14:19 crc kubenswrapper[4933]: I0122 07:14:19.633063 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-659c7d5767-x2dcs"] Jan 22 07:14:19 crc kubenswrapper[4933]: I0122 07:14:19.633371 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" podUID="3702545a-242f-4b60-85b4-3eb809888177" containerName="dnsmasq-dns" containerID="cri-o://6fa062a7da32beb1763d9846036314f83c2d235a2ddca68123708f974b92891b" gracePeriod=10 Jan 22 07:14:19 crc kubenswrapper[4933]: I0122 07:14:19.805241 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" podUID="3702545a-242f-4b60-85b4-3eb809888177" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.1.15:5353: connect: connection refused" Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.120581 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.253391 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-ovsdbserver-nb\") pod \"3702545a-242f-4b60-85b4-3eb809888177\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.253472 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-config\") pod \"3702545a-242f-4b60-85b4-3eb809888177\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.253521 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-ovsdbserver-sb\") pod \"3702545a-242f-4b60-85b4-3eb809888177\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.253545 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sp4nb\" (UniqueName: \"kubernetes.io/projected/3702545a-242f-4b60-85b4-3eb809888177-kube-api-access-sp4nb\") pod \"3702545a-242f-4b60-85b4-3eb809888177\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.253640 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-dns-svc\") pod \"3702545a-242f-4b60-85b4-3eb809888177\" (UID: \"3702545a-242f-4b60-85b4-3eb809888177\") " Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.254676 4933 generic.go:334] "Generic (PLEG): container finished" podID="3702545a-242f-4b60-85b4-3eb809888177" containerID="6fa062a7da32beb1763d9846036314f83c2d235a2ddca68123708f974b92891b" exitCode=0 Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.254730 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" event={"ID":"3702545a-242f-4b60-85b4-3eb809888177","Type":"ContainerDied","Data":"6fa062a7da32beb1763d9846036314f83c2d235a2ddca68123708f974b92891b"} Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.254762 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" event={"ID":"3702545a-242f-4b60-85b4-3eb809888177","Type":"ContainerDied","Data":"36ab808b3311d65e26e8356b5d31758ec509aec909d2dcb19575759535136cc9"} Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.254781 4933 scope.go:117] "RemoveContainer" containerID="6fa062a7da32beb1763d9846036314f83c2d235a2ddca68123708f974b92891b" Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.254885 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-659c7d5767-x2dcs" Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.259462 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3702545a-242f-4b60-85b4-3eb809888177-kube-api-access-sp4nb" (OuterVolumeSpecName: "kube-api-access-sp4nb") pod "3702545a-242f-4b60-85b4-3eb809888177" (UID: "3702545a-242f-4b60-85b4-3eb809888177"). InnerVolumeSpecName "kube-api-access-sp4nb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.291089 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3702545a-242f-4b60-85b4-3eb809888177" (UID: "3702545a-242f-4b60-85b4-3eb809888177"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.292921 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3702545a-242f-4b60-85b4-3eb809888177" (UID: "3702545a-242f-4b60-85b4-3eb809888177"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.298557 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3702545a-242f-4b60-85b4-3eb809888177" (UID: "3702545a-242f-4b60-85b4-3eb809888177"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.304544 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-config" (OuterVolumeSpecName: "config") pod "3702545a-242f-4b60-85b4-3eb809888177" (UID: "3702545a-242f-4b60-85b4-3eb809888177"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.356480 4933 scope.go:117] "RemoveContainer" containerID="b94be76d8de0b18c19ef9fa338e14cf38d83dffc65bb6688c37d94ea72e6aee7" Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.376161 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.376199 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.376222 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sp4nb\" (UniqueName: \"kubernetes.io/projected/3702545a-242f-4b60-85b4-3eb809888177-kube-api-access-sp4nb\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.376240 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.376256 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3702545a-242f-4b60-85b4-3eb809888177-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.403356 4933 scope.go:117] "RemoveContainer" containerID="6fa062a7da32beb1763d9846036314f83c2d235a2ddca68123708f974b92891b" Jan 22 07:14:20 crc kubenswrapper[4933]: E0122 07:14:20.404742 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6fa062a7da32beb1763d9846036314f83c2d235a2ddca68123708f974b92891b\": container with ID starting with 6fa062a7da32beb1763d9846036314f83c2d235a2ddca68123708f974b92891b not found: ID does not exist" containerID="6fa062a7da32beb1763d9846036314f83c2d235a2ddca68123708f974b92891b" Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.404777 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fa062a7da32beb1763d9846036314f83c2d235a2ddca68123708f974b92891b"} err="failed to get container status \"6fa062a7da32beb1763d9846036314f83c2d235a2ddca68123708f974b92891b\": rpc error: code = NotFound desc = could not find container \"6fa062a7da32beb1763d9846036314f83c2d235a2ddca68123708f974b92891b\": container with ID starting with 6fa062a7da32beb1763d9846036314f83c2d235a2ddca68123708f974b92891b not found: ID does not exist" Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.404801 4933 scope.go:117] "RemoveContainer" containerID="b94be76d8de0b18c19ef9fa338e14cf38d83dffc65bb6688c37d94ea72e6aee7" Jan 22 07:14:20 crc kubenswrapper[4933]: E0122 07:14:20.405614 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b94be76d8de0b18c19ef9fa338e14cf38d83dffc65bb6688c37d94ea72e6aee7\": container with ID starting with b94be76d8de0b18c19ef9fa338e14cf38d83dffc65bb6688c37d94ea72e6aee7 not found: ID does not exist" containerID="b94be76d8de0b18c19ef9fa338e14cf38d83dffc65bb6688c37d94ea72e6aee7" Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.405658 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b94be76d8de0b18c19ef9fa338e14cf38d83dffc65bb6688c37d94ea72e6aee7"} err="failed to get container status \"b94be76d8de0b18c19ef9fa338e14cf38d83dffc65bb6688c37d94ea72e6aee7\": rpc error: code = NotFound desc = could not find container \"b94be76d8de0b18c19ef9fa338e14cf38d83dffc65bb6688c37d94ea72e6aee7\": container with ID starting with b94be76d8de0b18c19ef9fa338e14cf38d83dffc65bb6688c37d94ea72e6aee7 not found: ID does not exist" Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.581368 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-659c7d5767-x2dcs"] Jan 22 07:14:20 crc kubenswrapper[4933]: I0122 07:14:20.590131 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-659c7d5767-x2dcs"] Jan 22 07:14:21 crc kubenswrapper[4933]: I0122 07:14:21.269070 4933 generic.go:334] "Generic (PLEG): container finished" podID="95b8a659-2af3-4f21-b848-d395f968a0fe" containerID="e06943f28023d4bf16b8f3df6fd47a3c5569c55b4239064adbd7f0fb4beb0ab2" exitCode=0 Jan 22 07:14:21 crc kubenswrapper[4933]: I0122 07:14:21.269133 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8x2zf" event={"ID":"95b8a659-2af3-4f21-b848-d395f968a0fe","Type":"ContainerDied","Data":"e06943f28023d4bf16b8f3df6fd47a3c5569c55b4239064adbd7f0fb4beb0ab2"} Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.504606 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3702545a-242f-4b60-85b4-3eb809888177" path="/var/lib/kubelet/pods/3702545a-242f-4b60-85b4-3eb809888177/volumes" Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.641025 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.716931 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-fernet-keys\") pod \"95b8a659-2af3-4f21-b848-d395f968a0fe\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.717210 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llldn\" (UniqueName: \"kubernetes.io/projected/95b8a659-2af3-4f21-b848-d395f968a0fe-kube-api-access-llldn\") pod \"95b8a659-2af3-4f21-b848-d395f968a0fe\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.717263 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-scripts\") pod \"95b8a659-2af3-4f21-b848-d395f968a0fe\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.717322 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-combined-ca-bundle\") pod \"95b8a659-2af3-4f21-b848-d395f968a0fe\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.717371 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-credential-keys\") pod \"95b8a659-2af3-4f21-b848-d395f968a0fe\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.717678 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-config-data\") pod \"95b8a659-2af3-4f21-b848-d395f968a0fe\" (UID: \"95b8a659-2af3-4f21-b848-d395f968a0fe\") " Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.722057 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95b8a659-2af3-4f21-b848-d395f968a0fe-kube-api-access-llldn" (OuterVolumeSpecName: "kube-api-access-llldn") pod "95b8a659-2af3-4f21-b848-d395f968a0fe" (UID: "95b8a659-2af3-4f21-b848-d395f968a0fe"). InnerVolumeSpecName "kube-api-access-llldn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.722414 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "95b8a659-2af3-4f21-b848-d395f968a0fe" (UID: "95b8a659-2af3-4f21-b848-d395f968a0fe"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.734608 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-scripts" (OuterVolumeSpecName: "scripts") pod "95b8a659-2af3-4f21-b848-d395f968a0fe" (UID: "95b8a659-2af3-4f21-b848-d395f968a0fe"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.734650 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "95b8a659-2af3-4f21-b848-d395f968a0fe" (UID: "95b8a659-2af3-4f21-b848-d395f968a0fe"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.741050 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "95b8a659-2af3-4f21-b848-d395f968a0fe" (UID: "95b8a659-2af3-4f21-b848-d395f968a0fe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.744293 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-config-data" (OuterVolumeSpecName: "config-data") pod "95b8a659-2af3-4f21-b848-d395f968a0fe" (UID: "95b8a659-2af3-4f21-b848-d395f968a0fe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.820435 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.820465 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.820476 4933 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.820486 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.820495 4933 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/95b8a659-2af3-4f21-b848-d395f968a0fe-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:22 crc kubenswrapper[4933]: I0122 07:14:22.820503 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llldn\" (UniqueName: \"kubernetes.io/projected/95b8a659-2af3-4f21-b848-d395f968a0fe-kube-api-access-llldn\") on node \"crc\" DevicePath \"\"" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.288754 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8x2zf" event={"ID":"95b8a659-2af3-4f21-b848-d395f968a0fe","Type":"ContainerDied","Data":"8bc61796188100c26219891d7cf91b2d49b52699fcde109b341bc0ec685c4a5a"} Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.288792 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8bc61796188100c26219891d7cf91b2d49b52699fcde109b341bc0ec685c4a5a" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.288825 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8x2zf" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.382173 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-7fdd56f9f4-vndmm"] Jan 22 07:14:23 crc kubenswrapper[4933]: E0122 07:14:23.382758 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95b8a659-2af3-4f21-b848-d395f968a0fe" containerName="keystone-bootstrap" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.382792 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="95b8a659-2af3-4f21-b848-d395f968a0fe" containerName="keystone-bootstrap" Jan 22 07:14:23 crc kubenswrapper[4933]: E0122 07:14:23.382826 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3702545a-242f-4b60-85b4-3eb809888177" containerName="init" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.382838 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3702545a-242f-4b60-85b4-3eb809888177" containerName="init" Jan 22 07:14:23 crc kubenswrapper[4933]: E0122 07:14:23.382858 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3702545a-242f-4b60-85b4-3eb809888177" containerName="dnsmasq-dns" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.382870 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3702545a-242f-4b60-85b4-3eb809888177" containerName="dnsmasq-dns" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.383176 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="95b8a659-2af3-4f21-b848-d395f968a0fe" containerName="keystone-bootstrap" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.383208 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="3702545a-242f-4b60-85b4-3eb809888177" containerName="dnsmasq-dns" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.384063 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.390874 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7fdd56f9f4-vndmm"] Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.392623 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-5n4dk" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.392665 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.392815 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.392887 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.392982 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.393125 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.443487 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvgkn\" (UniqueName: \"kubernetes.io/projected/935cd692-2979-4e50-81e2-47c2af0738d1-kube-api-access-mvgkn\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.443548 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-public-tls-certs\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.443569 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-internal-tls-certs\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.443611 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-config-data\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.443798 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-scripts\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.443852 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-credential-keys\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.443909 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-combined-ca-bundle\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.444008 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-fernet-keys\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.491235 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:14:23 crc kubenswrapper[4933]: E0122 07:14:23.491854 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.545984 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvgkn\" (UniqueName: \"kubernetes.io/projected/935cd692-2979-4e50-81e2-47c2af0738d1-kube-api-access-mvgkn\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.546049 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-public-tls-certs\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.546092 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-internal-tls-certs\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.546148 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-config-data\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.546243 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-credential-keys\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.546271 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-scripts\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.546303 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-combined-ca-bundle\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.546389 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-fernet-keys\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.549920 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-public-tls-certs\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.550037 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-scripts\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.551127 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-internal-tls-certs\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.551342 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-credential-keys\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.551698 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-fernet-keys\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.553535 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-config-data\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.554009 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/935cd692-2979-4e50-81e2-47c2af0738d1-combined-ca-bundle\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.563053 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvgkn\" (UniqueName: \"kubernetes.io/projected/935cd692-2979-4e50-81e2-47c2af0738d1-kube-api-access-mvgkn\") pod \"keystone-7fdd56f9f4-vndmm\" (UID: \"935cd692-2979-4e50-81e2-47c2af0738d1\") " pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:23 crc kubenswrapper[4933]: I0122 07:14:23.766031 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:24 crc kubenswrapper[4933]: I0122 07:14:24.257893 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7fdd56f9f4-vndmm"] Jan 22 07:14:24 crc kubenswrapper[4933]: W0122 07:14:24.264597 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod935cd692_2979_4e50_81e2_47c2af0738d1.slice/crio-bcde6d57bb79c234242264df33f3839c427376db50329f2438341cc0a4fe5b9a WatchSource:0}: Error finding container bcde6d57bb79c234242264df33f3839c427376db50329f2438341cc0a4fe5b9a: Status 404 returned error can't find the container with id bcde6d57bb79c234242264df33f3839c427376db50329f2438341cc0a4fe5b9a Jan 22 07:14:24 crc kubenswrapper[4933]: I0122 07:14:24.299355 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7fdd56f9f4-vndmm" event={"ID":"935cd692-2979-4e50-81e2-47c2af0738d1","Type":"ContainerStarted","Data":"bcde6d57bb79c234242264df33f3839c427376db50329f2438341cc0a4fe5b9a"} Jan 22 07:14:25 crc kubenswrapper[4933]: I0122 07:14:25.309985 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7fdd56f9f4-vndmm" event={"ID":"935cd692-2979-4e50-81e2-47c2af0738d1","Type":"ContainerStarted","Data":"59dc2b0be8763283f5c1c10d4810bf6a9eb04377191a4a3c42d003fc77c90a45"} Jan 22 07:14:25 crc kubenswrapper[4933]: I0122 07:14:25.310417 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:25 crc kubenswrapper[4933]: I0122 07:14:25.346450 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-7fdd56f9f4-vndmm" podStartSLOduration=2.346429368 podStartE2EDuration="2.346429368s" podCreationTimestamp="2026-01-22 07:14:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:14:25.339404237 +0000 UTC m=+5313.176529630" watchObservedRunningTime="2026-01-22 07:14:25.346429368 +0000 UTC m=+5313.183554721" Jan 22 07:14:38 crc kubenswrapper[4933]: I0122 07:14:38.491537 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:14:38 crc kubenswrapper[4933]: E0122 07:14:38.492692 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:14:51 crc kubenswrapper[4933]: I0122 07:14:51.490668 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:14:51 crc kubenswrapper[4933]: E0122 07:14:51.491397 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:14:55 crc kubenswrapper[4933]: I0122 07:14:55.341836 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-7fdd56f9f4-vndmm" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.226286 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.228960 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.233908 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-kbth4" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.234234 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.234545 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.246157 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.255041 4933 status_manager.go:875] "Failed to update status for pod" pod="openstack/openstackclient" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e186cbb9-e24d-433d-ad74-8d8ad485996c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T07:14:58Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T07:14:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T07:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [openstackclient]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T07:14:58Z\\\",\\\"message\\\":\\\"containers with unready status: [openstackclient]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/podified-antelope-centos9/openstack-openstackclient@sha256:2b4f8494513a3af102066fec5868ab167ac8664aceb2f0c639d7a0b60260a944\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"openstackclient\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/home/cloud-admin/.config/openstack/clouds.yaml\\\",\\\"name\\\":\\\"openstack-config\\\"},{\\\"mountPath\\\":\\\"/home/cloud-admin/.config/openstack/secure.yaml\\\",\\\"name\\\":\\\"openstack-config-secret\\\"},{\\\"mountPath\\\":\\\"/home/cloud-admin/cloudrc\\\",\\\"name\\\":\\\"openstack-config-secret\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem\\\",\\\"name\\\":\\\"combined-ca-bundle\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fc24w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T07:14:58Z\\\"}}\" for pod \"openstack\"/\"openstackclient\": pods \"openstackclient\" not found" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.261206 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 22 07:14:58 crc kubenswrapper[4933]: E0122 07:14:58.261857 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle kube-api-access-fc24w openstack-config openstack-config-secret], unattached volumes=[], failed to process volumes=[combined-ca-bundle kube-api-access-fc24w openstack-config openstack-config-secret]: context canceled" pod="openstack/openstackclient" podUID="e186cbb9-e24d-433d-ad74-8d8ad485996c" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.267381 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.285821 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.286992 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.295460 4933 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="e186cbb9-e24d-433d-ad74-8d8ad485996c" podUID="35e35bac-ac97-4f82-8358-218c35ada9a5" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.303005 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.385055 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7l7t\" (UniqueName: \"kubernetes.io/projected/35e35bac-ac97-4f82-8358-218c35ada9a5-kube-api-access-r7l7t\") pod \"openstackclient\" (UID: \"35e35bac-ac97-4f82-8358-218c35ada9a5\") " pod="openstack/openstackclient" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.385273 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/35e35bac-ac97-4f82-8358-218c35ada9a5-openstack-config-secret\") pod \"openstackclient\" (UID: \"35e35bac-ac97-4f82-8358-218c35ada9a5\") " pod="openstack/openstackclient" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.385331 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35e35bac-ac97-4f82-8358-218c35ada9a5-combined-ca-bundle\") pod \"openstackclient\" (UID: \"35e35bac-ac97-4f82-8358-218c35ada9a5\") " pod="openstack/openstackclient" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.385368 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/35e35bac-ac97-4f82-8358-218c35ada9a5-openstack-config\") pod \"openstackclient\" (UID: \"35e35bac-ac97-4f82-8358-218c35ada9a5\") " pod="openstack/openstackclient" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.487497 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/35e35bac-ac97-4f82-8358-218c35ada9a5-openstack-config-secret\") pod \"openstackclient\" (UID: \"35e35bac-ac97-4f82-8358-218c35ada9a5\") " pod="openstack/openstackclient" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.487955 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35e35bac-ac97-4f82-8358-218c35ada9a5-combined-ca-bundle\") pod \"openstackclient\" (UID: \"35e35bac-ac97-4f82-8358-218c35ada9a5\") " pod="openstack/openstackclient" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.487995 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/35e35bac-ac97-4f82-8358-218c35ada9a5-openstack-config\") pod \"openstackclient\" (UID: \"35e35bac-ac97-4f82-8358-218c35ada9a5\") " pod="openstack/openstackclient" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.488065 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7l7t\" (UniqueName: \"kubernetes.io/projected/35e35bac-ac97-4f82-8358-218c35ada9a5-kube-api-access-r7l7t\") pod \"openstackclient\" (UID: \"35e35bac-ac97-4f82-8358-218c35ada9a5\") " pod="openstack/openstackclient" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.489966 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/35e35bac-ac97-4f82-8358-218c35ada9a5-openstack-config\") pod \"openstackclient\" (UID: \"35e35bac-ac97-4f82-8358-218c35ada9a5\") " pod="openstack/openstackclient" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.494800 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/35e35bac-ac97-4f82-8358-218c35ada9a5-openstack-config-secret\") pod \"openstackclient\" (UID: \"35e35bac-ac97-4f82-8358-218c35ada9a5\") " pod="openstack/openstackclient" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.494846 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35e35bac-ac97-4f82-8358-218c35ada9a5-combined-ca-bundle\") pod \"openstackclient\" (UID: \"35e35bac-ac97-4f82-8358-218c35ada9a5\") " pod="openstack/openstackclient" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.523772 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7l7t\" (UniqueName: \"kubernetes.io/projected/35e35bac-ac97-4f82-8358-218c35ada9a5-kube-api-access-r7l7t\") pod \"openstackclient\" (UID: \"35e35bac-ac97-4f82-8358-218c35ada9a5\") " pod="openstack/openstackclient" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.549924 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e186cbb9-e24d-433d-ad74-8d8ad485996c" path="/var/lib/kubelet/pods/e186cbb9-e24d-433d-ad74-8d8ad485996c/volumes" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.610038 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.621387 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.627119 4933 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="e186cbb9-e24d-433d-ad74-8d8ad485996c" podUID="35e35bac-ac97-4f82-8358-218c35ada9a5" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.631916 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 07:14:58 crc kubenswrapper[4933]: I0122 07:14:58.640447 4933 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="e186cbb9-e24d-433d-ad74-8d8ad485996c" podUID="35e35bac-ac97-4f82-8358-218c35ada9a5" Jan 22 07:14:59 crc kubenswrapper[4933]: I0122 07:14:59.084684 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 22 07:14:59 crc kubenswrapper[4933]: I0122 07:14:59.634962 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 07:14:59 crc kubenswrapper[4933]: I0122 07:14:59.634966 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"35e35bac-ac97-4f82-8358-218c35ada9a5","Type":"ContainerStarted","Data":"52a30d5525917fa1ffcbe11dce16fb1f52d3e80dd2aacf9785848d1522c30246"} Jan 22 07:14:59 crc kubenswrapper[4933]: I0122 07:14:59.635264 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"35e35bac-ac97-4f82-8358-218c35ada9a5","Type":"ContainerStarted","Data":"3d145d6343da9dedf276c08768728b990d43d688b0017ff3dde97e6e789b22e7"} Jan 22 07:14:59 crc kubenswrapper[4933]: I0122 07:14:59.654678 4933 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="e186cbb9-e24d-433d-ad74-8d8ad485996c" podUID="35e35bac-ac97-4f82-8358-218c35ada9a5" Jan 22 07:14:59 crc kubenswrapper[4933]: I0122 07:14:59.657159 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=1.657141041 podStartE2EDuration="1.657141041s" podCreationTimestamp="2026-01-22 07:14:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:14:59.650702825 +0000 UTC m=+5347.487828178" watchObservedRunningTime="2026-01-22 07:14:59.657141041 +0000 UTC m=+5347.494266404" Jan 22 07:14:59 crc kubenswrapper[4933]: I0122 07:14:59.659286 4933 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="e186cbb9-e24d-433d-ad74-8d8ad485996c" podUID="35e35bac-ac97-4f82-8358-218c35ada9a5" Jan 22 07:15:00 crc kubenswrapper[4933]: I0122 07:15:00.138663 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74"] Jan 22 07:15:00 crc kubenswrapper[4933]: I0122 07:15:00.139943 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74" Jan 22 07:15:00 crc kubenswrapper[4933]: I0122 07:15:00.142997 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 07:15:00 crc kubenswrapper[4933]: I0122 07:15:00.144655 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 07:15:00 crc kubenswrapper[4933]: I0122 07:15:00.152951 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74"] Jan 22 07:15:00 crc kubenswrapper[4933]: I0122 07:15:00.218011 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b83d43bd-6f8b-472a-b999-f7b90507e14b-secret-volume\") pod \"collect-profiles-29484435-45t74\" (UID: \"b83d43bd-6f8b-472a-b999-f7b90507e14b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74" Jan 22 07:15:00 crc kubenswrapper[4933]: I0122 07:15:00.218129 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b83d43bd-6f8b-472a-b999-f7b90507e14b-config-volume\") pod \"collect-profiles-29484435-45t74\" (UID: \"b83d43bd-6f8b-472a-b999-f7b90507e14b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74" Jan 22 07:15:00 crc kubenswrapper[4933]: I0122 07:15:00.218249 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rzsv\" (UniqueName: \"kubernetes.io/projected/b83d43bd-6f8b-472a-b999-f7b90507e14b-kube-api-access-6rzsv\") pod \"collect-profiles-29484435-45t74\" (UID: \"b83d43bd-6f8b-472a-b999-f7b90507e14b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74" Jan 22 07:15:00 crc kubenswrapper[4933]: I0122 07:15:00.320146 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rzsv\" (UniqueName: \"kubernetes.io/projected/b83d43bd-6f8b-472a-b999-f7b90507e14b-kube-api-access-6rzsv\") pod \"collect-profiles-29484435-45t74\" (UID: \"b83d43bd-6f8b-472a-b999-f7b90507e14b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74" Jan 22 07:15:00 crc kubenswrapper[4933]: I0122 07:15:00.320252 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b83d43bd-6f8b-472a-b999-f7b90507e14b-secret-volume\") pod \"collect-profiles-29484435-45t74\" (UID: \"b83d43bd-6f8b-472a-b999-f7b90507e14b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74" Jan 22 07:15:00 crc kubenswrapper[4933]: I0122 07:15:00.320315 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b83d43bd-6f8b-472a-b999-f7b90507e14b-config-volume\") pod \"collect-profiles-29484435-45t74\" (UID: \"b83d43bd-6f8b-472a-b999-f7b90507e14b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74" Jan 22 07:15:00 crc kubenswrapper[4933]: I0122 07:15:00.321282 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b83d43bd-6f8b-472a-b999-f7b90507e14b-config-volume\") pod \"collect-profiles-29484435-45t74\" (UID: \"b83d43bd-6f8b-472a-b999-f7b90507e14b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74" Jan 22 07:15:00 crc kubenswrapper[4933]: I0122 07:15:00.324857 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b83d43bd-6f8b-472a-b999-f7b90507e14b-secret-volume\") pod \"collect-profiles-29484435-45t74\" (UID: \"b83d43bd-6f8b-472a-b999-f7b90507e14b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74" Jan 22 07:15:00 crc kubenswrapper[4933]: I0122 07:15:00.338828 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rzsv\" (UniqueName: \"kubernetes.io/projected/b83d43bd-6f8b-472a-b999-f7b90507e14b-kube-api-access-6rzsv\") pod \"collect-profiles-29484435-45t74\" (UID: \"b83d43bd-6f8b-472a-b999-f7b90507e14b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74" Jan 22 07:15:00 crc kubenswrapper[4933]: I0122 07:15:00.467870 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74" Jan 22 07:15:00 crc kubenswrapper[4933]: I0122 07:15:00.900312 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74"] Jan 22 07:15:00 crc kubenswrapper[4933]: W0122 07:15:00.910159 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb83d43bd_6f8b_472a_b999_f7b90507e14b.slice/crio-d438927fdb457b7df2c57dffcaeb7ef863286dc7d8bece94c1c2f0ce6cbeca03 WatchSource:0}: Error finding container d438927fdb457b7df2c57dffcaeb7ef863286dc7d8bece94c1c2f0ce6cbeca03: Status 404 returned error can't find the container with id d438927fdb457b7df2c57dffcaeb7ef863286dc7d8bece94c1c2f0ce6cbeca03 Jan 22 07:15:01 crc kubenswrapper[4933]: I0122 07:15:01.662630 4933 generic.go:334] "Generic (PLEG): container finished" podID="b83d43bd-6f8b-472a-b999-f7b90507e14b" containerID="b04e7526db1898c258136ecf504d626753553d30f6199ea9a6de4f149a84d89f" exitCode=0 Jan 22 07:15:01 crc kubenswrapper[4933]: I0122 07:15:01.662985 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74" event={"ID":"b83d43bd-6f8b-472a-b999-f7b90507e14b","Type":"ContainerDied","Data":"b04e7526db1898c258136ecf504d626753553d30f6199ea9a6de4f149a84d89f"} Jan 22 07:15:01 crc kubenswrapper[4933]: I0122 07:15:01.663030 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74" event={"ID":"b83d43bd-6f8b-472a-b999-f7b90507e14b","Type":"ContainerStarted","Data":"d438927fdb457b7df2c57dffcaeb7ef863286dc7d8bece94c1c2f0ce6cbeca03"} Jan 22 07:15:03 crc kubenswrapper[4933]: I0122 07:15:03.014596 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74" Jan 22 07:15:03 crc kubenswrapper[4933]: I0122 07:15:03.065224 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rzsv\" (UniqueName: \"kubernetes.io/projected/b83d43bd-6f8b-472a-b999-f7b90507e14b-kube-api-access-6rzsv\") pod \"b83d43bd-6f8b-472a-b999-f7b90507e14b\" (UID: \"b83d43bd-6f8b-472a-b999-f7b90507e14b\") " Jan 22 07:15:03 crc kubenswrapper[4933]: I0122 07:15:03.065275 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b83d43bd-6f8b-472a-b999-f7b90507e14b-secret-volume\") pod \"b83d43bd-6f8b-472a-b999-f7b90507e14b\" (UID: \"b83d43bd-6f8b-472a-b999-f7b90507e14b\") " Jan 22 07:15:03 crc kubenswrapper[4933]: I0122 07:15:03.065413 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b83d43bd-6f8b-472a-b999-f7b90507e14b-config-volume\") pod \"b83d43bd-6f8b-472a-b999-f7b90507e14b\" (UID: \"b83d43bd-6f8b-472a-b999-f7b90507e14b\") " Jan 22 07:15:03 crc kubenswrapper[4933]: I0122 07:15:03.066054 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b83d43bd-6f8b-472a-b999-f7b90507e14b-config-volume" (OuterVolumeSpecName: "config-volume") pod "b83d43bd-6f8b-472a-b999-f7b90507e14b" (UID: "b83d43bd-6f8b-472a-b999-f7b90507e14b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:15:03 crc kubenswrapper[4933]: I0122 07:15:03.070935 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b83d43bd-6f8b-472a-b999-f7b90507e14b-kube-api-access-6rzsv" (OuterVolumeSpecName: "kube-api-access-6rzsv") pod "b83d43bd-6f8b-472a-b999-f7b90507e14b" (UID: "b83d43bd-6f8b-472a-b999-f7b90507e14b"). InnerVolumeSpecName "kube-api-access-6rzsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:15:03 crc kubenswrapper[4933]: I0122 07:15:03.072270 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b83d43bd-6f8b-472a-b999-f7b90507e14b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b83d43bd-6f8b-472a-b999-f7b90507e14b" (UID: "b83d43bd-6f8b-472a-b999-f7b90507e14b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:15:03 crc kubenswrapper[4933]: I0122 07:15:03.166890 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rzsv\" (UniqueName: \"kubernetes.io/projected/b83d43bd-6f8b-472a-b999-f7b90507e14b-kube-api-access-6rzsv\") on node \"crc\" DevicePath \"\"" Jan 22 07:15:03 crc kubenswrapper[4933]: I0122 07:15:03.166934 4933 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b83d43bd-6f8b-472a-b999-f7b90507e14b-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 07:15:03 crc kubenswrapper[4933]: I0122 07:15:03.166949 4933 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b83d43bd-6f8b-472a-b999-f7b90507e14b-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 07:15:03 crc kubenswrapper[4933]: I0122 07:15:03.682334 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74" event={"ID":"b83d43bd-6f8b-472a-b999-f7b90507e14b","Type":"ContainerDied","Data":"d438927fdb457b7df2c57dffcaeb7ef863286dc7d8bece94c1c2f0ce6cbeca03"} Jan 22 07:15:03 crc kubenswrapper[4933]: I0122 07:15:03.682395 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74" Jan 22 07:15:03 crc kubenswrapper[4933]: I0122 07:15:03.682403 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d438927fdb457b7df2c57dffcaeb7ef863286dc7d8bece94c1c2f0ce6cbeca03" Jan 22 07:15:04 crc kubenswrapper[4933]: I0122 07:15:04.096452 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk"] Jan 22 07:15:04 crc kubenswrapper[4933]: I0122 07:15:04.104281 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484390-dhpzk"] Jan 22 07:15:04 crc kubenswrapper[4933]: I0122 07:15:04.501845 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed" path="/var/lib/kubelet/pods/ee57b5f6-0cd3-4922-bd54-8a8dfb7ab2ed/volumes" Jan 22 07:15:06 crc kubenswrapper[4933]: I0122 07:15:06.490752 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:15:06 crc kubenswrapper[4933]: E0122 07:15:06.491424 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:15:19 crc kubenswrapper[4933]: I0122 07:15:19.491006 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:15:19 crc kubenswrapper[4933]: E0122 07:15:19.492674 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:15:33 crc kubenswrapper[4933]: I0122 07:15:33.491063 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:15:33 crc kubenswrapper[4933]: E0122 07:15:33.492146 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:15:36 crc kubenswrapper[4933]: E0122 07:15:36.292537 4933 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.163:49670->38.102.83.163:45627: write tcp 38.102.83.163:49670->38.102.83.163:45627: write: broken pipe Jan 22 07:15:48 crc kubenswrapper[4933]: I0122 07:15:48.491541 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:15:48 crc kubenswrapper[4933]: E0122 07:15:48.494829 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:16:02 crc kubenswrapper[4933]: I0122 07:16:02.496777 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:16:02 crc kubenswrapper[4933]: E0122 07:16:02.497679 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:16:04 crc kubenswrapper[4933]: I0122 07:16:04.298215 4933 scope.go:117] "RemoveContainer" containerID="d5f99abad65dd8d93d1e50f12735bdb64270cfdce58d1c0a32903403958dc0e0" Jan 22 07:16:14 crc kubenswrapper[4933]: I0122 07:16:14.491125 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:16:14 crc kubenswrapper[4933]: E0122 07:16:14.491939 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:16:27 crc kubenswrapper[4933]: I0122 07:16:27.491055 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:16:27 crc kubenswrapper[4933]: E0122 07:16:27.491808 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.371695 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-2vclg"] Jan 22 07:16:34 crc kubenswrapper[4933]: E0122 07:16:34.372717 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b83d43bd-6f8b-472a-b999-f7b90507e14b" containerName="collect-profiles" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.372735 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="b83d43bd-6f8b-472a-b999-f7b90507e14b" containerName="collect-profiles" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.372956 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="b83d43bd-6f8b-472a-b999-f7b90507e14b" containerName="collect-profiles" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.373641 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-2vclg" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.380410 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-2vclg"] Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.475426 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-3160-account-create-update-4tvjz"] Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.476502 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-3160-account-create-update-4tvjz" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.480446 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.489496 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-3160-account-create-update-4tvjz"] Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.537173 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4ba1a83-41bb-4111-a346-25041d9476de-operator-scripts\") pod \"barbican-db-create-2vclg\" (UID: \"d4ba1a83-41bb-4111-a346-25041d9476de\") " pod="openstack/barbican-db-create-2vclg" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.537377 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v24mz\" (UniqueName: \"kubernetes.io/projected/d4ba1a83-41bb-4111-a346-25041d9476de-kube-api-access-v24mz\") pod \"barbican-db-create-2vclg\" (UID: \"d4ba1a83-41bb-4111-a346-25041d9476de\") " pod="openstack/barbican-db-create-2vclg" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.639155 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v24mz\" (UniqueName: \"kubernetes.io/projected/d4ba1a83-41bb-4111-a346-25041d9476de-kube-api-access-v24mz\") pod \"barbican-db-create-2vclg\" (UID: \"d4ba1a83-41bb-4111-a346-25041d9476de\") " pod="openstack/barbican-db-create-2vclg" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.639230 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4ab530e0-2186-46ec-a3b7-c89cc912357b-operator-scripts\") pod \"barbican-3160-account-create-update-4tvjz\" (UID: \"4ab530e0-2186-46ec-a3b7-c89cc912357b\") " pod="openstack/barbican-3160-account-create-update-4tvjz" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.639291 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4ba1a83-41bb-4111-a346-25041d9476de-operator-scripts\") pod \"barbican-db-create-2vclg\" (UID: \"d4ba1a83-41bb-4111-a346-25041d9476de\") " pod="openstack/barbican-db-create-2vclg" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.639330 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wh4s4\" (UniqueName: \"kubernetes.io/projected/4ab530e0-2186-46ec-a3b7-c89cc912357b-kube-api-access-wh4s4\") pod \"barbican-3160-account-create-update-4tvjz\" (UID: \"4ab530e0-2186-46ec-a3b7-c89cc912357b\") " pod="openstack/barbican-3160-account-create-update-4tvjz" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.639954 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4ba1a83-41bb-4111-a346-25041d9476de-operator-scripts\") pod \"barbican-db-create-2vclg\" (UID: \"d4ba1a83-41bb-4111-a346-25041d9476de\") " pod="openstack/barbican-db-create-2vclg" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.658879 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v24mz\" (UniqueName: \"kubernetes.io/projected/d4ba1a83-41bb-4111-a346-25041d9476de-kube-api-access-v24mz\") pod \"barbican-db-create-2vclg\" (UID: \"d4ba1a83-41bb-4111-a346-25041d9476de\") " pod="openstack/barbican-db-create-2vclg" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.698994 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-2vclg" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.740937 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4ab530e0-2186-46ec-a3b7-c89cc912357b-operator-scripts\") pod \"barbican-3160-account-create-update-4tvjz\" (UID: \"4ab530e0-2186-46ec-a3b7-c89cc912357b\") " pod="openstack/barbican-3160-account-create-update-4tvjz" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.741019 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wh4s4\" (UniqueName: \"kubernetes.io/projected/4ab530e0-2186-46ec-a3b7-c89cc912357b-kube-api-access-wh4s4\") pod \"barbican-3160-account-create-update-4tvjz\" (UID: \"4ab530e0-2186-46ec-a3b7-c89cc912357b\") " pod="openstack/barbican-3160-account-create-update-4tvjz" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.741681 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4ab530e0-2186-46ec-a3b7-c89cc912357b-operator-scripts\") pod \"barbican-3160-account-create-update-4tvjz\" (UID: \"4ab530e0-2186-46ec-a3b7-c89cc912357b\") " pod="openstack/barbican-3160-account-create-update-4tvjz" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.761115 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wh4s4\" (UniqueName: \"kubernetes.io/projected/4ab530e0-2186-46ec-a3b7-c89cc912357b-kube-api-access-wh4s4\") pod \"barbican-3160-account-create-update-4tvjz\" (UID: \"4ab530e0-2186-46ec-a3b7-c89cc912357b\") " pod="openstack/barbican-3160-account-create-update-4tvjz" Jan 22 07:16:34 crc kubenswrapper[4933]: I0122 07:16:34.854180 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-3160-account-create-update-4tvjz" Jan 22 07:16:35 crc kubenswrapper[4933]: I0122 07:16:35.138904 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-2vclg"] Jan 22 07:16:35 crc kubenswrapper[4933]: W0122 07:16:35.298024 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ab530e0_2186_46ec_a3b7_c89cc912357b.slice/crio-507f33b5f5677f6352228f5553c4accb533742ddec4914812bee2fb3c26ddf97 WatchSource:0}: Error finding container 507f33b5f5677f6352228f5553c4accb533742ddec4914812bee2fb3c26ddf97: Status 404 returned error can't find the container with id 507f33b5f5677f6352228f5553c4accb533742ddec4914812bee2fb3c26ddf97 Jan 22 07:16:35 crc kubenswrapper[4933]: I0122 07:16:35.298185 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-3160-account-create-update-4tvjz"] Jan 22 07:16:35 crc kubenswrapper[4933]: I0122 07:16:35.445704 4933 generic.go:334] "Generic (PLEG): container finished" podID="d4ba1a83-41bb-4111-a346-25041d9476de" containerID="d89d6bd1e0d49f851a61145fc1ff7a9d635d3c6276a3e8cece2277d2ccd49749" exitCode=0 Jan 22 07:16:35 crc kubenswrapper[4933]: I0122 07:16:35.445816 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-2vclg" event={"ID":"d4ba1a83-41bb-4111-a346-25041d9476de","Type":"ContainerDied","Data":"d89d6bd1e0d49f851a61145fc1ff7a9d635d3c6276a3e8cece2277d2ccd49749"} Jan 22 07:16:35 crc kubenswrapper[4933]: I0122 07:16:35.445872 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-2vclg" event={"ID":"d4ba1a83-41bb-4111-a346-25041d9476de","Type":"ContainerStarted","Data":"324fd92ff9e19b2290ee5edd7bb2296dc2cf5cb30c27d5be6aa6b98a37226fef"} Jan 22 07:16:35 crc kubenswrapper[4933]: I0122 07:16:35.447157 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-3160-account-create-update-4tvjz" event={"ID":"4ab530e0-2186-46ec-a3b7-c89cc912357b","Type":"ContainerStarted","Data":"c9f75adfa66bac22b89b0e8c19109fb40c58e61fe0f1daf7dbb409c4486e321b"} Jan 22 07:16:35 crc kubenswrapper[4933]: I0122 07:16:35.447183 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-3160-account-create-update-4tvjz" event={"ID":"4ab530e0-2186-46ec-a3b7-c89cc912357b","Type":"ContainerStarted","Data":"507f33b5f5677f6352228f5553c4accb533742ddec4914812bee2fb3c26ddf97"} Jan 22 07:16:35 crc kubenswrapper[4933]: I0122 07:16:35.480211 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-3160-account-create-update-4tvjz" podStartSLOduration=1.480190857 podStartE2EDuration="1.480190857s" podCreationTimestamp="2026-01-22 07:16:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:16:35.472437928 +0000 UTC m=+5443.309563311" watchObservedRunningTime="2026-01-22 07:16:35.480190857 +0000 UTC m=+5443.317316210" Jan 22 07:16:36 crc kubenswrapper[4933]: I0122 07:16:36.462856 4933 generic.go:334] "Generic (PLEG): container finished" podID="4ab530e0-2186-46ec-a3b7-c89cc912357b" containerID="c9f75adfa66bac22b89b0e8c19109fb40c58e61fe0f1daf7dbb409c4486e321b" exitCode=0 Jan 22 07:16:36 crc kubenswrapper[4933]: I0122 07:16:36.463011 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-3160-account-create-update-4tvjz" event={"ID":"4ab530e0-2186-46ec-a3b7-c89cc912357b","Type":"ContainerDied","Data":"c9f75adfa66bac22b89b0e8c19109fb40c58e61fe0f1daf7dbb409c4486e321b"} Jan 22 07:16:36 crc kubenswrapper[4933]: I0122 07:16:36.796640 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-2vclg" Jan 22 07:16:36 crc kubenswrapper[4933]: I0122 07:16:36.880949 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4ba1a83-41bb-4111-a346-25041d9476de-operator-scripts\") pod \"d4ba1a83-41bb-4111-a346-25041d9476de\" (UID: \"d4ba1a83-41bb-4111-a346-25041d9476de\") " Jan 22 07:16:36 crc kubenswrapper[4933]: I0122 07:16:36.881136 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v24mz\" (UniqueName: \"kubernetes.io/projected/d4ba1a83-41bb-4111-a346-25041d9476de-kube-api-access-v24mz\") pod \"d4ba1a83-41bb-4111-a346-25041d9476de\" (UID: \"d4ba1a83-41bb-4111-a346-25041d9476de\") " Jan 22 07:16:36 crc kubenswrapper[4933]: I0122 07:16:36.881837 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4ba1a83-41bb-4111-a346-25041d9476de-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d4ba1a83-41bb-4111-a346-25041d9476de" (UID: "d4ba1a83-41bb-4111-a346-25041d9476de"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:16:36 crc kubenswrapper[4933]: I0122 07:16:36.887646 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4ba1a83-41bb-4111-a346-25041d9476de-kube-api-access-v24mz" (OuterVolumeSpecName: "kube-api-access-v24mz") pod "d4ba1a83-41bb-4111-a346-25041d9476de" (UID: "d4ba1a83-41bb-4111-a346-25041d9476de"). InnerVolumeSpecName "kube-api-access-v24mz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:16:36 crc kubenswrapper[4933]: I0122 07:16:36.983316 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d4ba1a83-41bb-4111-a346-25041d9476de-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:16:36 crc kubenswrapper[4933]: I0122 07:16:36.983368 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v24mz\" (UniqueName: \"kubernetes.io/projected/d4ba1a83-41bb-4111-a346-25041d9476de-kube-api-access-v24mz\") on node \"crc\" DevicePath \"\"" Jan 22 07:16:37 crc kubenswrapper[4933]: I0122 07:16:37.475553 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-2vclg" Jan 22 07:16:37 crc kubenswrapper[4933]: I0122 07:16:37.475565 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-2vclg" event={"ID":"d4ba1a83-41bb-4111-a346-25041d9476de","Type":"ContainerDied","Data":"324fd92ff9e19b2290ee5edd7bb2296dc2cf5cb30c27d5be6aa6b98a37226fef"} Jan 22 07:16:37 crc kubenswrapper[4933]: I0122 07:16:37.476126 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="324fd92ff9e19b2290ee5edd7bb2296dc2cf5cb30c27d5be6aa6b98a37226fef" Jan 22 07:16:37 crc kubenswrapper[4933]: I0122 07:16:37.819261 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-3160-account-create-update-4tvjz" Jan 22 07:16:37 crc kubenswrapper[4933]: I0122 07:16:37.897215 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wh4s4\" (UniqueName: \"kubernetes.io/projected/4ab530e0-2186-46ec-a3b7-c89cc912357b-kube-api-access-wh4s4\") pod \"4ab530e0-2186-46ec-a3b7-c89cc912357b\" (UID: \"4ab530e0-2186-46ec-a3b7-c89cc912357b\") " Jan 22 07:16:37 crc kubenswrapper[4933]: I0122 07:16:37.897341 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4ab530e0-2186-46ec-a3b7-c89cc912357b-operator-scripts\") pod \"4ab530e0-2186-46ec-a3b7-c89cc912357b\" (UID: \"4ab530e0-2186-46ec-a3b7-c89cc912357b\") " Jan 22 07:16:37 crc kubenswrapper[4933]: I0122 07:16:37.898259 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4ab530e0-2186-46ec-a3b7-c89cc912357b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4ab530e0-2186-46ec-a3b7-c89cc912357b" (UID: "4ab530e0-2186-46ec-a3b7-c89cc912357b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:16:37 crc kubenswrapper[4933]: I0122 07:16:37.901242 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ab530e0-2186-46ec-a3b7-c89cc912357b-kube-api-access-wh4s4" (OuterVolumeSpecName: "kube-api-access-wh4s4") pod "4ab530e0-2186-46ec-a3b7-c89cc912357b" (UID: "4ab530e0-2186-46ec-a3b7-c89cc912357b"). InnerVolumeSpecName "kube-api-access-wh4s4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:16:37 crc kubenswrapper[4933]: I0122 07:16:37.999344 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wh4s4\" (UniqueName: \"kubernetes.io/projected/4ab530e0-2186-46ec-a3b7-c89cc912357b-kube-api-access-wh4s4\") on node \"crc\" DevicePath \"\"" Jan 22 07:16:37 crc kubenswrapper[4933]: I0122 07:16:37.999386 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4ab530e0-2186-46ec-a3b7-c89cc912357b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:16:38 crc kubenswrapper[4933]: I0122 07:16:38.484027 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-3160-account-create-update-4tvjz" event={"ID":"4ab530e0-2186-46ec-a3b7-c89cc912357b","Type":"ContainerDied","Data":"507f33b5f5677f6352228f5553c4accb533742ddec4914812bee2fb3c26ddf97"} Jan 22 07:16:38 crc kubenswrapper[4933]: I0122 07:16:38.484100 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="507f33b5f5677f6352228f5553c4accb533742ddec4914812bee2fb3c26ddf97" Jan 22 07:16:38 crc kubenswrapper[4933]: I0122 07:16:38.484103 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-3160-account-create-update-4tvjz" Jan 22 07:16:39 crc kubenswrapper[4933]: I0122 07:16:39.490814 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:16:39 crc kubenswrapper[4933]: E0122 07:16:39.491756 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:16:39 crc kubenswrapper[4933]: I0122 07:16:39.743378 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-fllwb"] Jan 22 07:16:39 crc kubenswrapper[4933]: E0122 07:16:39.744392 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4ba1a83-41bb-4111-a346-25041d9476de" containerName="mariadb-database-create" Jan 22 07:16:39 crc kubenswrapper[4933]: I0122 07:16:39.744415 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4ba1a83-41bb-4111-a346-25041d9476de" containerName="mariadb-database-create" Jan 22 07:16:39 crc kubenswrapper[4933]: E0122 07:16:39.744453 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ab530e0-2186-46ec-a3b7-c89cc912357b" containerName="mariadb-account-create-update" Jan 22 07:16:39 crc kubenswrapper[4933]: I0122 07:16:39.744470 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ab530e0-2186-46ec-a3b7-c89cc912357b" containerName="mariadb-account-create-update" Jan 22 07:16:39 crc kubenswrapper[4933]: I0122 07:16:39.747828 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ab530e0-2186-46ec-a3b7-c89cc912357b" containerName="mariadb-account-create-update" Jan 22 07:16:39 crc kubenswrapper[4933]: I0122 07:16:39.747866 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4ba1a83-41bb-4111-a346-25041d9476de" containerName="mariadb-database-create" Jan 22 07:16:39 crc kubenswrapper[4933]: I0122 07:16:39.749250 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-fllwb" Jan 22 07:16:39 crc kubenswrapper[4933]: I0122 07:16:39.751976 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-fllwb"] Jan 22 07:16:39 crc kubenswrapper[4933]: I0122 07:16:39.753017 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 22 07:16:39 crc kubenswrapper[4933]: I0122 07:16:39.754638 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-n5b2g" Jan 22 07:16:39 crc kubenswrapper[4933]: I0122 07:16:39.829519 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee-combined-ca-bundle\") pod \"barbican-db-sync-fllwb\" (UID: \"a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee\") " pod="openstack/barbican-db-sync-fllwb" Jan 22 07:16:39 crc kubenswrapper[4933]: I0122 07:16:39.829604 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee-db-sync-config-data\") pod \"barbican-db-sync-fllwb\" (UID: \"a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee\") " pod="openstack/barbican-db-sync-fllwb" Jan 22 07:16:39 crc kubenswrapper[4933]: I0122 07:16:39.829702 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqc59\" (UniqueName: \"kubernetes.io/projected/a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee-kube-api-access-lqc59\") pod \"barbican-db-sync-fllwb\" (UID: \"a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee\") " pod="openstack/barbican-db-sync-fllwb" Jan 22 07:16:39 crc kubenswrapper[4933]: I0122 07:16:39.930967 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee-combined-ca-bundle\") pod \"barbican-db-sync-fllwb\" (UID: \"a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee\") " pod="openstack/barbican-db-sync-fllwb" Jan 22 07:16:39 crc kubenswrapper[4933]: I0122 07:16:39.931268 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee-db-sync-config-data\") pod \"barbican-db-sync-fllwb\" (UID: \"a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee\") " pod="openstack/barbican-db-sync-fllwb" Jan 22 07:16:39 crc kubenswrapper[4933]: I0122 07:16:39.931396 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqc59\" (UniqueName: \"kubernetes.io/projected/a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee-kube-api-access-lqc59\") pod \"barbican-db-sync-fllwb\" (UID: \"a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee\") " pod="openstack/barbican-db-sync-fllwb" Jan 22 07:16:39 crc kubenswrapper[4933]: I0122 07:16:39.936270 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee-db-sync-config-data\") pod \"barbican-db-sync-fllwb\" (UID: \"a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee\") " pod="openstack/barbican-db-sync-fllwb" Jan 22 07:16:39 crc kubenswrapper[4933]: I0122 07:16:39.937565 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee-combined-ca-bundle\") pod \"barbican-db-sync-fllwb\" (UID: \"a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee\") " pod="openstack/barbican-db-sync-fllwb" Jan 22 07:16:39 crc kubenswrapper[4933]: I0122 07:16:39.949015 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqc59\" (UniqueName: \"kubernetes.io/projected/a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee-kube-api-access-lqc59\") pod \"barbican-db-sync-fllwb\" (UID: \"a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee\") " pod="openstack/barbican-db-sync-fllwb" Jan 22 07:16:40 crc kubenswrapper[4933]: I0122 07:16:40.136136 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-fllwb" Jan 22 07:16:40 crc kubenswrapper[4933]: I0122 07:16:40.636884 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-fllwb"] Jan 22 07:16:41 crc kubenswrapper[4933]: I0122 07:16:41.509367 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-fllwb" event={"ID":"a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee","Type":"ContainerStarted","Data":"a618cb87ee04a6cba951ff64ff371edf55b1b26728fb3dda75de985ac3622e53"} Jan 22 07:16:41 crc kubenswrapper[4933]: I0122 07:16:41.509439 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-fllwb" event={"ID":"a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee","Type":"ContainerStarted","Data":"be9b5c24ebbde8c535ef7fed2132e37c70be8aae7ad9c7731ef570c865987dd1"} Jan 22 07:16:41 crc kubenswrapper[4933]: I0122 07:16:41.539412 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-fllwb" podStartSLOduration=2.539381443 podStartE2EDuration="2.539381443s" podCreationTimestamp="2026-01-22 07:16:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:16:41.528004757 +0000 UTC m=+5449.365130120" watchObservedRunningTime="2026-01-22 07:16:41.539381443 +0000 UTC m=+5449.376506826" Jan 22 07:16:43 crc kubenswrapper[4933]: I0122 07:16:43.527384 4933 generic.go:334] "Generic (PLEG): container finished" podID="a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee" containerID="a618cb87ee04a6cba951ff64ff371edf55b1b26728fb3dda75de985ac3622e53" exitCode=0 Jan 22 07:16:43 crc kubenswrapper[4933]: I0122 07:16:43.527479 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-fllwb" event={"ID":"a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee","Type":"ContainerDied","Data":"a618cb87ee04a6cba951ff64ff371edf55b1b26728fb3dda75de985ac3622e53"} Jan 22 07:16:44 crc kubenswrapper[4933]: I0122 07:16:44.855941 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-fllwb" Jan 22 07:16:44 crc kubenswrapper[4933]: I0122 07:16:44.918818 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee-db-sync-config-data\") pod \"a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee\" (UID: \"a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee\") " Jan 22 07:16:44 crc kubenswrapper[4933]: I0122 07:16:44.919150 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lqc59\" (UniqueName: \"kubernetes.io/projected/a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee-kube-api-access-lqc59\") pod \"a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee\" (UID: \"a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee\") " Jan 22 07:16:44 crc kubenswrapper[4933]: I0122 07:16:44.919266 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee-combined-ca-bundle\") pod \"a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee\" (UID: \"a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee\") " Jan 22 07:16:44 crc kubenswrapper[4933]: I0122 07:16:44.928316 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee" (UID: "a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:16:44 crc kubenswrapper[4933]: I0122 07:16:44.928441 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee-kube-api-access-lqc59" (OuterVolumeSpecName: "kube-api-access-lqc59") pod "a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee" (UID: "a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee"). InnerVolumeSpecName "kube-api-access-lqc59". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:16:44 crc kubenswrapper[4933]: I0122 07:16:44.944916 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee" (UID: "a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.021718 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.021753 4933 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.021764 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lqc59\" (UniqueName: \"kubernetes.io/projected/a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee-kube-api-access-lqc59\") on node \"crc\" DevicePath \"\"" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.546055 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-fllwb" event={"ID":"a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee","Type":"ContainerDied","Data":"be9b5c24ebbde8c535ef7fed2132e37c70be8aae7ad9c7731ef570c865987dd1"} Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.546140 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="be9b5c24ebbde8c535ef7fed2132e37c70be8aae7ad9c7731ef570c865987dd1" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.546103 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-fllwb" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.775033 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-69cb547445-wv7p7"] Jan 22 07:16:45 crc kubenswrapper[4933]: E0122 07:16:45.775535 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee" containerName="barbican-db-sync" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.775562 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee" containerName="barbican-db-sync" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.775762 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee" containerName="barbican-db-sync" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.777017 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-69cb547445-wv7p7" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.780451 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.780580 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.785508 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-n5b2g" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.787357 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-6486c94768-wxz8r"] Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.788592 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.802560 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.809623 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-69cb547445-wv7p7"] Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.832340 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629-config-data\") pod \"barbican-keystone-listener-6486c94768-wxz8r\" (UID: \"0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629\") " pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.832432 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a650d0c9-5c44-41b3-b8f6-8b584d976e40-combined-ca-bundle\") pod \"barbican-worker-69cb547445-wv7p7\" (UID: \"a650d0c9-5c44-41b3-b8f6-8b584d976e40\") " pod="openstack/barbican-worker-69cb547445-wv7p7" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.832479 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a650d0c9-5c44-41b3-b8f6-8b584d976e40-logs\") pod \"barbican-worker-69cb547445-wv7p7\" (UID: \"a650d0c9-5c44-41b3-b8f6-8b584d976e40\") " pod="openstack/barbican-worker-69cb547445-wv7p7" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.832497 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a650d0c9-5c44-41b3-b8f6-8b584d976e40-config-data\") pod \"barbican-worker-69cb547445-wv7p7\" (UID: \"a650d0c9-5c44-41b3-b8f6-8b584d976e40\") " pod="openstack/barbican-worker-69cb547445-wv7p7" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.832515 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629-logs\") pod \"barbican-keystone-listener-6486c94768-wxz8r\" (UID: \"0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629\") " pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.832535 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629-combined-ca-bundle\") pod \"barbican-keystone-listener-6486c94768-wxz8r\" (UID: \"0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629\") " pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.832558 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpjkq\" (UniqueName: \"kubernetes.io/projected/a650d0c9-5c44-41b3-b8f6-8b584d976e40-kube-api-access-fpjkq\") pod \"barbican-worker-69cb547445-wv7p7\" (UID: \"a650d0c9-5c44-41b3-b8f6-8b584d976e40\") " pod="openstack/barbican-worker-69cb547445-wv7p7" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.832577 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a650d0c9-5c44-41b3-b8f6-8b584d976e40-config-data-custom\") pod \"barbican-worker-69cb547445-wv7p7\" (UID: \"a650d0c9-5c44-41b3-b8f6-8b584d976e40\") " pod="openstack/barbican-worker-69cb547445-wv7p7" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.832594 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcq4l\" (UniqueName: \"kubernetes.io/projected/0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629-kube-api-access-fcq4l\") pod \"barbican-keystone-listener-6486c94768-wxz8r\" (UID: \"0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629\") " pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.832589 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6486c94768-wxz8r"] Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.832652 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629-config-data-custom\") pod \"barbican-keystone-listener-6486c94768-wxz8r\" (UID: \"0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629\") " pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.893977 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-556d77dddf-j2dhs"] Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.898551 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.924315 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-556d77dddf-j2dhs"] Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.939571 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-config\") pod \"dnsmasq-dns-556d77dddf-j2dhs\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.939633 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629-config-data-custom\") pod \"barbican-keystone-listener-6486c94768-wxz8r\" (UID: \"0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629\") " pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.939663 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629-config-data\") pod \"barbican-keystone-listener-6486c94768-wxz8r\" (UID: \"0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629\") " pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.939685 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pwk29\" (UniqueName: \"kubernetes.io/projected/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-kube-api-access-pwk29\") pod \"dnsmasq-dns-556d77dddf-j2dhs\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.939716 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a650d0c9-5c44-41b3-b8f6-8b584d976e40-combined-ca-bundle\") pod \"barbican-worker-69cb547445-wv7p7\" (UID: \"a650d0c9-5c44-41b3-b8f6-8b584d976e40\") " pod="openstack/barbican-worker-69cb547445-wv7p7" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.939738 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-dns-svc\") pod \"dnsmasq-dns-556d77dddf-j2dhs\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.939773 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a650d0c9-5c44-41b3-b8f6-8b584d976e40-logs\") pod \"barbican-worker-69cb547445-wv7p7\" (UID: \"a650d0c9-5c44-41b3-b8f6-8b584d976e40\") " pod="openstack/barbican-worker-69cb547445-wv7p7" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.939791 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a650d0c9-5c44-41b3-b8f6-8b584d976e40-config-data\") pod \"barbican-worker-69cb547445-wv7p7\" (UID: \"a650d0c9-5c44-41b3-b8f6-8b584d976e40\") " pod="openstack/barbican-worker-69cb547445-wv7p7" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.939810 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-ovsdbserver-sb\") pod \"dnsmasq-dns-556d77dddf-j2dhs\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.939826 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629-logs\") pod \"barbican-keystone-listener-6486c94768-wxz8r\" (UID: \"0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629\") " pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.939846 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629-combined-ca-bundle\") pod \"barbican-keystone-listener-6486c94768-wxz8r\" (UID: \"0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629\") " pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.939872 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-ovsdbserver-nb\") pod \"dnsmasq-dns-556d77dddf-j2dhs\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.939897 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpjkq\" (UniqueName: \"kubernetes.io/projected/a650d0c9-5c44-41b3-b8f6-8b584d976e40-kube-api-access-fpjkq\") pod \"barbican-worker-69cb547445-wv7p7\" (UID: \"a650d0c9-5c44-41b3-b8f6-8b584d976e40\") " pod="openstack/barbican-worker-69cb547445-wv7p7" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.939921 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a650d0c9-5c44-41b3-b8f6-8b584d976e40-config-data-custom\") pod \"barbican-worker-69cb547445-wv7p7\" (UID: \"a650d0c9-5c44-41b3-b8f6-8b584d976e40\") " pod="openstack/barbican-worker-69cb547445-wv7p7" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.939942 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcq4l\" (UniqueName: \"kubernetes.io/projected/0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629-kube-api-access-fcq4l\") pod \"barbican-keystone-listener-6486c94768-wxz8r\" (UID: \"0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629\") " pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.940571 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a650d0c9-5c44-41b3-b8f6-8b584d976e40-logs\") pod \"barbican-worker-69cb547445-wv7p7\" (UID: \"a650d0c9-5c44-41b3-b8f6-8b584d976e40\") " pod="openstack/barbican-worker-69cb547445-wv7p7" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.941069 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629-logs\") pod \"barbican-keystone-listener-6486c94768-wxz8r\" (UID: \"0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629\") " pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.944485 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a650d0c9-5c44-41b3-b8f6-8b584d976e40-config-data-custom\") pod \"barbican-worker-69cb547445-wv7p7\" (UID: \"a650d0c9-5c44-41b3-b8f6-8b584d976e40\") " pod="openstack/barbican-worker-69cb547445-wv7p7" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.945331 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629-config-data-custom\") pod \"barbican-keystone-listener-6486c94768-wxz8r\" (UID: \"0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629\") " pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.951525 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a650d0c9-5c44-41b3-b8f6-8b584d976e40-config-data\") pod \"barbican-worker-69cb547445-wv7p7\" (UID: \"a650d0c9-5c44-41b3-b8f6-8b584d976e40\") " pod="openstack/barbican-worker-69cb547445-wv7p7" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.956854 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a650d0c9-5c44-41b3-b8f6-8b584d976e40-combined-ca-bundle\") pod \"barbican-worker-69cb547445-wv7p7\" (UID: \"a650d0c9-5c44-41b3-b8f6-8b584d976e40\") " pod="openstack/barbican-worker-69cb547445-wv7p7" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.962804 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcq4l\" (UniqueName: \"kubernetes.io/projected/0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629-kube-api-access-fcq4l\") pod \"barbican-keystone-listener-6486c94768-wxz8r\" (UID: \"0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629\") " pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.963106 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629-combined-ca-bundle\") pod \"barbican-keystone-listener-6486c94768-wxz8r\" (UID: \"0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629\") " pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.963461 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629-config-data\") pod \"barbican-keystone-listener-6486c94768-wxz8r\" (UID: \"0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629\") " pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" Jan 22 07:16:45 crc kubenswrapper[4933]: I0122 07:16:45.966354 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpjkq\" (UniqueName: \"kubernetes.io/projected/a650d0c9-5c44-41b3-b8f6-8b584d976e40-kube-api-access-fpjkq\") pod \"barbican-worker-69cb547445-wv7p7\" (UID: \"a650d0c9-5c44-41b3-b8f6-8b584d976e40\") " pod="openstack/barbican-worker-69cb547445-wv7p7" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.043728 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-dns-svc\") pod \"dnsmasq-dns-556d77dddf-j2dhs\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.043801 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-ovsdbserver-sb\") pod \"dnsmasq-dns-556d77dddf-j2dhs\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.043835 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-ovsdbserver-nb\") pod \"dnsmasq-dns-556d77dddf-j2dhs\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.043890 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-config\") pod \"dnsmasq-dns-556d77dddf-j2dhs\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.043934 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pwk29\" (UniqueName: \"kubernetes.io/projected/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-kube-api-access-pwk29\") pod \"dnsmasq-dns-556d77dddf-j2dhs\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.044836 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-ovsdbserver-sb\") pod \"dnsmasq-dns-556d77dddf-j2dhs\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.044943 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-dns-svc\") pod \"dnsmasq-dns-556d77dddf-j2dhs\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.045033 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-ovsdbserver-nb\") pod \"dnsmasq-dns-556d77dddf-j2dhs\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.045423 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-config\") pod \"dnsmasq-dns-556d77dddf-j2dhs\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.054928 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5678b54fd8-wl98d"] Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.063904 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.065432 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5678b54fd8-wl98d"] Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.068550 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pwk29\" (UniqueName: \"kubernetes.io/projected/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-kube-api-access-pwk29\") pod \"dnsmasq-dns-556d77dddf-j2dhs\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.072800 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.108646 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-69cb547445-wv7p7" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.122130 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.151032 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfwzv\" (UniqueName: \"kubernetes.io/projected/f0899af4-85e0-4803-9ee1-de8894e2f674-kube-api-access-qfwzv\") pod \"barbican-api-5678b54fd8-wl98d\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.151097 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0899af4-85e0-4803-9ee1-de8894e2f674-combined-ca-bundle\") pod \"barbican-api-5678b54fd8-wl98d\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.151151 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f0899af4-85e0-4803-9ee1-de8894e2f674-logs\") pod \"barbican-api-5678b54fd8-wl98d\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.151175 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0899af4-85e0-4803-9ee1-de8894e2f674-config-data\") pod \"barbican-api-5678b54fd8-wl98d\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.151259 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f0899af4-85e0-4803-9ee1-de8894e2f674-config-data-custom\") pod \"barbican-api-5678b54fd8-wl98d\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.216053 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.253567 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f0899af4-85e0-4803-9ee1-de8894e2f674-config-data-custom\") pod \"barbican-api-5678b54fd8-wl98d\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.253673 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfwzv\" (UniqueName: \"kubernetes.io/projected/f0899af4-85e0-4803-9ee1-de8894e2f674-kube-api-access-qfwzv\") pod \"barbican-api-5678b54fd8-wl98d\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.253715 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0899af4-85e0-4803-9ee1-de8894e2f674-combined-ca-bundle\") pod \"barbican-api-5678b54fd8-wl98d\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.253739 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f0899af4-85e0-4803-9ee1-de8894e2f674-logs\") pod \"barbican-api-5678b54fd8-wl98d\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.253776 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0899af4-85e0-4803-9ee1-de8894e2f674-config-data\") pod \"barbican-api-5678b54fd8-wl98d\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.258800 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f0899af4-85e0-4803-9ee1-de8894e2f674-logs\") pod \"barbican-api-5678b54fd8-wl98d\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.259487 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0899af4-85e0-4803-9ee1-de8894e2f674-config-data\") pod \"barbican-api-5678b54fd8-wl98d\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.259516 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0899af4-85e0-4803-9ee1-de8894e2f674-combined-ca-bundle\") pod \"barbican-api-5678b54fd8-wl98d\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.264771 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f0899af4-85e0-4803-9ee1-de8894e2f674-config-data-custom\") pod \"barbican-api-5678b54fd8-wl98d\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.281206 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfwzv\" (UniqueName: \"kubernetes.io/projected/f0899af4-85e0-4803-9ee1-de8894e2f674-kube-api-access-qfwzv\") pod \"barbican-api-5678b54fd8-wl98d\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.422431 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.597769 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-69cb547445-wv7p7"] Jan 22 07:16:46 crc kubenswrapper[4933]: W0122 07:16:46.623017 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda650d0c9_5c44_41b3_b8f6_8b584d976e40.slice/crio-113be56d875c301a9da5233e136ee11d803a7e16d669db3a2b5586de1e3491af WatchSource:0}: Error finding container 113be56d875c301a9da5233e136ee11d803a7e16d669db3a2b5586de1e3491af: Status 404 returned error can't find the container with id 113be56d875c301a9da5233e136ee11d803a7e16d669db3a2b5586de1e3491af Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.682355 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6486c94768-wxz8r"] Jan 22 07:16:46 crc kubenswrapper[4933]: W0122 07:16:46.692963 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0fd2303a_3ca1_4c57_bfc3_ea8b63a8c629.slice/crio-1e0b5e7f1ad3217243faa34e9d11307a2568cc30c56d0b5cba2cf77ec854ce08 WatchSource:0}: Error finding container 1e0b5e7f1ad3217243faa34e9d11307a2568cc30c56d0b5cba2cf77ec854ce08: Status 404 returned error can't find the container with id 1e0b5e7f1ad3217243faa34e9d11307a2568cc30c56d0b5cba2cf77ec854ce08 Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.779482 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-556d77dddf-j2dhs"] Jan 22 07:16:46 crc kubenswrapper[4933]: W0122 07:16:46.783745 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7d3635eb_5e80_4d1b_b01b_ec8de50d7357.slice/crio-377c9e1df9f56aad39e54305496366ff55f7e962253da1b540fcf96f2663eeeb WatchSource:0}: Error finding container 377c9e1df9f56aad39e54305496366ff55f7e962253da1b540fcf96f2663eeeb: Status 404 returned error can't find the container with id 377c9e1df9f56aad39e54305496366ff55f7e962253da1b540fcf96f2663eeeb Jan 22 07:16:46 crc kubenswrapper[4933]: I0122 07:16:46.897007 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5678b54fd8-wl98d"] Jan 22 07:16:47 crc kubenswrapper[4933]: I0122 07:16:47.565840 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" event={"ID":"0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629","Type":"ContainerStarted","Data":"4cd5d37829a30799acc3c310b037eab1e9a9e05263d82a8373cd3ce672e94ad5"} Jan 22 07:16:47 crc kubenswrapper[4933]: I0122 07:16:47.566357 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" event={"ID":"0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629","Type":"ContainerStarted","Data":"484e417c5e43904de4bdf8fe28ae4f5f5d9f1773ed22030d5c167cfd8967d70d"} Jan 22 07:16:47 crc kubenswrapper[4933]: I0122 07:16:47.566371 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" event={"ID":"0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629","Type":"ContainerStarted","Data":"1e0b5e7f1ad3217243faa34e9d11307a2568cc30c56d0b5cba2cf77ec854ce08"} Jan 22 07:16:47 crc kubenswrapper[4933]: I0122 07:16:47.567904 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-69cb547445-wv7p7" event={"ID":"a650d0c9-5c44-41b3-b8f6-8b584d976e40","Type":"ContainerStarted","Data":"4747a139376c8e1ae3faefd3c2cfee8d0cc1e2af7606b216f82578fb511e152b"} Jan 22 07:16:47 crc kubenswrapper[4933]: I0122 07:16:47.567938 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-69cb547445-wv7p7" event={"ID":"a650d0c9-5c44-41b3-b8f6-8b584d976e40","Type":"ContainerStarted","Data":"29942ceeab9328e17711e006b27cafa5bc6027fcb1b77ef341f9cc9a23a76ff6"} Jan 22 07:16:47 crc kubenswrapper[4933]: I0122 07:16:47.567951 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-69cb547445-wv7p7" event={"ID":"a650d0c9-5c44-41b3-b8f6-8b584d976e40","Type":"ContainerStarted","Data":"113be56d875c301a9da5233e136ee11d803a7e16d669db3a2b5586de1e3491af"} Jan 22 07:16:47 crc kubenswrapper[4933]: I0122 07:16:47.573235 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5678b54fd8-wl98d" event={"ID":"f0899af4-85e0-4803-9ee1-de8894e2f674","Type":"ContainerStarted","Data":"c052d1e4e59b6201f3685f885e1878013fbd4d3c4483a43be02d861567ba6523"} Jan 22 07:16:47 crc kubenswrapper[4933]: I0122 07:16:47.573319 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5678b54fd8-wl98d" event={"ID":"f0899af4-85e0-4803-9ee1-de8894e2f674","Type":"ContainerStarted","Data":"8ae27aad85561ed4352a9f367620378b6df429f1a1fb0246c209f26794e78c15"} Jan 22 07:16:47 crc kubenswrapper[4933]: I0122 07:16:47.573333 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5678b54fd8-wl98d" event={"ID":"f0899af4-85e0-4803-9ee1-de8894e2f674","Type":"ContainerStarted","Data":"5fcb6ca326efdd69eece6d570670443baedf14e2eeb49acaf3906bfce22b7cc5"} Jan 22 07:16:47 crc kubenswrapper[4933]: I0122 07:16:47.574262 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:47 crc kubenswrapper[4933]: I0122 07:16:47.574303 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:47 crc kubenswrapper[4933]: I0122 07:16:47.578320 4933 generic.go:334] "Generic (PLEG): container finished" podID="7d3635eb-5e80-4d1b-b01b-ec8de50d7357" containerID="5989abb962e233737d57849de36d6565e9813de433e42a5fe0adab89dd1ca89a" exitCode=0 Jan 22 07:16:47 crc kubenswrapper[4933]: I0122 07:16:47.578368 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" event={"ID":"7d3635eb-5e80-4d1b-b01b-ec8de50d7357","Type":"ContainerDied","Data":"5989abb962e233737d57849de36d6565e9813de433e42a5fe0adab89dd1ca89a"} Jan 22 07:16:47 crc kubenswrapper[4933]: I0122 07:16:47.578397 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" event={"ID":"7d3635eb-5e80-4d1b-b01b-ec8de50d7357","Type":"ContainerStarted","Data":"377c9e1df9f56aad39e54305496366ff55f7e962253da1b540fcf96f2663eeeb"} Jan 22 07:16:47 crc kubenswrapper[4933]: I0122 07:16:47.587623 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-6486c94768-wxz8r" podStartSLOduration=2.587598704 podStartE2EDuration="2.587598704s" podCreationTimestamp="2026-01-22 07:16:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:16:47.585494602 +0000 UTC m=+5455.422619955" watchObservedRunningTime="2026-01-22 07:16:47.587598704 +0000 UTC m=+5455.424724077" Jan 22 07:16:47 crc kubenswrapper[4933]: I0122 07:16:47.632700 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-69cb547445-wv7p7" podStartSLOduration=2.63267912 podStartE2EDuration="2.63267912s" podCreationTimestamp="2026-01-22 07:16:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:16:47.627455462 +0000 UTC m=+5455.464580825" watchObservedRunningTime="2026-01-22 07:16:47.63267912 +0000 UTC m=+5455.469804473" Jan 22 07:16:47 crc kubenswrapper[4933]: I0122 07:16:47.655626 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5678b54fd8-wl98d" podStartSLOduration=1.6556029570000002 podStartE2EDuration="1.655602957s" podCreationTimestamp="2026-01-22 07:16:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:16:47.648111245 +0000 UTC m=+5455.485236608" watchObservedRunningTime="2026-01-22 07:16:47.655602957 +0000 UTC m=+5455.492728310" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.300022 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5cbf848d96-gz47x"] Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.301411 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.303742 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.303775 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.314239 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5cbf848d96-gz47x"] Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.404197 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wljlv\" (UniqueName: \"kubernetes.io/projected/505d78b7-992d-4d4c-891f-86a61305f83d-kube-api-access-wljlv\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.404477 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/505d78b7-992d-4d4c-891f-86a61305f83d-combined-ca-bundle\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.404529 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/505d78b7-992d-4d4c-891f-86a61305f83d-config-data-custom\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.404554 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/505d78b7-992d-4d4c-891f-86a61305f83d-logs\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.404643 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/505d78b7-992d-4d4c-891f-86a61305f83d-public-tls-certs\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.404700 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/505d78b7-992d-4d4c-891f-86a61305f83d-config-data\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.404719 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/505d78b7-992d-4d4c-891f-86a61305f83d-internal-tls-certs\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.506508 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/505d78b7-992d-4d4c-891f-86a61305f83d-config-data-custom\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.506570 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/505d78b7-992d-4d4c-891f-86a61305f83d-logs\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.506608 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/505d78b7-992d-4d4c-891f-86a61305f83d-public-tls-certs\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.506668 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/505d78b7-992d-4d4c-891f-86a61305f83d-config-data\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.506707 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/505d78b7-992d-4d4c-891f-86a61305f83d-internal-tls-certs\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.506740 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wljlv\" (UniqueName: \"kubernetes.io/projected/505d78b7-992d-4d4c-891f-86a61305f83d-kube-api-access-wljlv\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.506768 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/505d78b7-992d-4d4c-891f-86a61305f83d-combined-ca-bundle\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.507051 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/505d78b7-992d-4d4c-891f-86a61305f83d-logs\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.511751 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/505d78b7-992d-4d4c-891f-86a61305f83d-internal-tls-certs\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.512113 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/505d78b7-992d-4d4c-891f-86a61305f83d-config-data-custom\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.512231 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/505d78b7-992d-4d4c-891f-86a61305f83d-config-data\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.513318 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/505d78b7-992d-4d4c-891f-86a61305f83d-public-tls-certs\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.514040 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/505d78b7-992d-4d4c-891f-86a61305f83d-combined-ca-bundle\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.528244 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wljlv\" (UniqueName: \"kubernetes.io/projected/505d78b7-992d-4d4c-891f-86a61305f83d-kube-api-access-wljlv\") pod \"barbican-api-5cbf848d96-gz47x\" (UID: \"505d78b7-992d-4d4c-891f-86a61305f83d\") " pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.587500 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" event={"ID":"7d3635eb-5e80-4d1b-b01b-ec8de50d7357","Type":"ContainerStarted","Data":"32d75eb167ed591863169b95b4cebbb71b547d938f20085555d7a0d80d1d00b6"} Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.610431 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" podStartSLOduration=3.610416538 podStartE2EDuration="3.610416538s" podCreationTimestamp="2026-01-22 07:16:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:16:48.603541221 +0000 UTC m=+5456.440666574" watchObservedRunningTime="2026-01-22 07:16:48.610416538 +0000 UTC m=+5456.447541891" Jan 22 07:16:48 crc kubenswrapper[4933]: I0122 07:16:48.629646 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:49 crc kubenswrapper[4933]: I0122 07:16:49.050955 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5cbf848d96-gz47x"] Jan 22 07:16:49 crc kubenswrapper[4933]: I0122 07:16:49.598564 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5cbf848d96-gz47x" event={"ID":"505d78b7-992d-4d4c-891f-86a61305f83d","Type":"ContainerStarted","Data":"031c5aaa1ee30afa96f9ecc69a486e6e47ea31b9802b379376c888821e0dc218"} Jan 22 07:16:49 crc kubenswrapper[4933]: I0122 07:16:49.600433 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5cbf848d96-gz47x" event={"ID":"505d78b7-992d-4d4c-891f-86a61305f83d","Type":"ContainerStarted","Data":"2d6b55217f8af79c5594c3d007df4a6ebb18ef27b81aaec3ca023bd97710785e"} Jan 22 07:16:49 crc kubenswrapper[4933]: I0122 07:16:49.600575 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5cbf848d96-gz47x" event={"ID":"505d78b7-992d-4d4c-891f-86a61305f83d","Type":"ContainerStarted","Data":"dbb02f1b2f9c61fad57b44db5cd5a22558b5f48ee7b3f89cb3215c30fe8ab63e"} Jan 22 07:16:49 crc kubenswrapper[4933]: I0122 07:16:49.600692 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:16:49 crc kubenswrapper[4933]: I0122 07:16:49.620117 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5cbf848d96-gz47x" podStartSLOduration=1.620097243 podStartE2EDuration="1.620097243s" podCreationTimestamp="2026-01-22 07:16:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:16:49.619899648 +0000 UTC m=+5457.457025011" watchObservedRunningTime="2026-01-22 07:16:49.620097243 +0000 UTC m=+5457.457222616" Jan 22 07:16:50 crc kubenswrapper[4933]: I0122 07:16:50.606974 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:50 crc kubenswrapper[4933]: I0122 07:16:50.607324 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:16:51 crc kubenswrapper[4933]: I0122 07:16:51.491123 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:16:51 crc kubenswrapper[4933]: E0122 07:16:51.491443 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:16:56 crc kubenswrapper[4933]: I0122 07:16:56.218275 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:16:56 crc kubenswrapper[4933]: I0122 07:16:56.293052 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d997bd4b5-wcm95"] Jan 22 07:16:56 crc kubenswrapper[4933]: I0122 07:16:56.293284 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" podUID="43bccb46-764e-45cb-9261-816ee71c8b0b" containerName="dnsmasq-dns" containerID="cri-o://327e5671019c61edc8b869d669626cc3907475d04f7eb392aa10a6f212dde0f5" gracePeriod=10 Jan 22 07:16:56 crc kubenswrapper[4933]: E0122 07:16:56.495609 4933 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43bccb46_764e_45cb_9261_816ee71c8b0b.slice/crio-conmon-327e5671019c61edc8b869d669626cc3907475d04f7eb392aa10a6f212dde0f5.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43bccb46_764e_45cb_9261_816ee71c8b0b.slice/crio-327e5671019c61edc8b869d669626cc3907475d04f7eb392aa10a6f212dde0f5.scope\": RecentStats: unable to find data in memory cache]" Jan 22 07:16:56 crc kubenswrapper[4933]: I0122 07:16:56.649115 4933 generic.go:334] "Generic (PLEG): container finished" podID="43bccb46-764e-45cb-9261-816ee71c8b0b" containerID="327e5671019c61edc8b869d669626cc3907475d04f7eb392aa10a6f212dde0f5" exitCode=0 Jan 22 07:16:56 crc kubenswrapper[4933]: I0122 07:16:56.649153 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" event={"ID":"43bccb46-764e-45cb-9261-816ee71c8b0b","Type":"ContainerDied","Data":"327e5671019c61edc8b869d669626cc3907475d04f7eb392aa10a6f212dde0f5"} Jan 22 07:16:56 crc kubenswrapper[4933]: I0122 07:16:56.955618 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.063987 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wb5ss\" (UniqueName: \"kubernetes.io/projected/43bccb46-764e-45cb-9261-816ee71c8b0b-kube-api-access-wb5ss\") pod \"43bccb46-764e-45cb-9261-816ee71c8b0b\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.064166 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-ovsdbserver-nb\") pod \"43bccb46-764e-45cb-9261-816ee71c8b0b\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.064242 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-ovsdbserver-sb\") pod \"43bccb46-764e-45cb-9261-816ee71c8b0b\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.064287 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-config\") pod \"43bccb46-764e-45cb-9261-816ee71c8b0b\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.064325 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-dns-svc\") pod \"43bccb46-764e-45cb-9261-816ee71c8b0b\" (UID: \"43bccb46-764e-45cb-9261-816ee71c8b0b\") " Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.074747 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43bccb46-764e-45cb-9261-816ee71c8b0b-kube-api-access-wb5ss" (OuterVolumeSpecName: "kube-api-access-wb5ss") pod "43bccb46-764e-45cb-9261-816ee71c8b0b" (UID: "43bccb46-764e-45cb-9261-816ee71c8b0b"). InnerVolumeSpecName "kube-api-access-wb5ss". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.113067 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "43bccb46-764e-45cb-9261-816ee71c8b0b" (UID: "43bccb46-764e-45cb-9261-816ee71c8b0b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.114931 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "43bccb46-764e-45cb-9261-816ee71c8b0b" (UID: "43bccb46-764e-45cb-9261-816ee71c8b0b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.120623 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "43bccb46-764e-45cb-9261-816ee71c8b0b" (UID: "43bccb46-764e-45cb-9261-816ee71c8b0b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.122001 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-config" (OuterVolumeSpecName: "config") pod "43bccb46-764e-45cb-9261-816ee71c8b0b" (UID: "43bccb46-764e-45cb-9261-816ee71c8b0b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.166465 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.166723 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.166805 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.166884 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wb5ss\" (UniqueName: \"kubernetes.io/projected/43bccb46-764e-45cb-9261-816ee71c8b0b-kube-api-access-wb5ss\") on node \"crc\" DevicePath \"\"" Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.166952 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/43bccb46-764e-45cb-9261-816ee71c8b0b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.658205 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" event={"ID":"43bccb46-764e-45cb-9261-816ee71c8b0b","Type":"ContainerDied","Data":"5ebc6ac5d4410cba7ebbb941837e9ee5381db71d4a0ce3fce612ab04d38e4a09"} Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.658263 4933 scope.go:117] "RemoveContainer" containerID="327e5671019c61edc8b869d669626cc3907475d04f7eb392aa10a6f212dde0f5" Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.658396 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d997bd4b5-wcm95" Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.693601 4933 scope.go:117] "RemoveContainer" containerID="a74cd3b6abe8c7f622e07ff297a1d8c60e7477d0d400546b03797dbf0f8de518" Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.724169 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d997bd4b5-wcm95"] Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.733403 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d997bd4b5-wcm95"] Jan 22 07:16:57 crc kubenswrapper[4933]: I0122 07:16:57.969304 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:58 crc kubenswrapper[4933]: I0122 07:16:58.053881 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:16:58 crc kubenswrapper[4933]: I0122 07:16:58.500911 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43bccb46-764e-45cb-9261-816ee71c8b0b" path="/var/lib/kubelet/pods/43bccb46-764e-45cb-9261-816ee71c8b0b/volumes" Jan 22 07:17:00 crc kubenswrapper[4933]: I0122 07:17:00.015220 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:17:00 crc kubenswrapper[4933]: I0122 07:17:00.133141 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5cbf848d96-gz47x" Jan 22 07:17:00 crc kubenswrapper[4933]: I0122 07:17:00.190890 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5678b54fd8-wl98d"] Jan 22 07:17:00 crc kubenswrapper[4933]: I0122 07:17:00.191118 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5678b54fd8-wl98d" podUID="f0899af4-85e0-4803-9ee1-de8894e2f674" containerName="barbican-api-log" containerID="cri-o://8ae27aad85561ed4352a9f367620378b6df429f1a1fb0246c209f26794e78c15" gracePeriod=30 Jan 22 07:17:00 crc kubenswrapper[4933]: I0122 07:17:00.191484 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5678b54fd8-wl98d" podUID="f0899af4-85e0-4803-9ee1-de8894e2f674" containerName="barbican-api" containerID="cri-o://c052d1e4e59b6201f3685f885e1878013fbd4d3c4483a43be02d861567ba6523" gracePeriod=30 Jan 22 07:17:00 crc kubenswrapper[4933]: I0122 07:17:00.688184 4933 generic.go:334] "Generic (PLEG): container finished" podID="f0899af4-85e0-4803-9ee1-de8894e2f674" containerID="8ae27aad85561ed4352a9f367620378b6df429f1a1fb0246c209f26794e78c15" exitCode=143 Jan 22 07:17:00 crc kubenswrapper[4933]: I0122 07:17:00.688249 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5678b54fd8-wl98d" event={"ID":"f0899af4-85e0-4803-9ee1-de8894e2f674","Type":"ContainerDied","Data":"8ae27aad85561ed4352a9f367620378b6df429f1a1fb0246c209f26794e78c15"} Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.353379 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5678b54fd8-wl98d" podUID="f0899af4-85e0-4803-9ee1-de8894e2f674" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.1.34:9311/healthcheck\": read tcp 10.217.0.2:49944->10.217.1.34:9311: read: connection reset by peer" Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.353473 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5678b54fd8-wl98d" podUID="f0899af4-85e0-4803-9ee1-de8894e2f674" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.1.34:9311/healthcheck\": read tcp 10.217.0.2:49928->10.217.1.34:9311: read: connection reset by peer" Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.491041 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:17:03 crc kubenswrapper[4933]: E0122 07:17:03.491346 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.726386 4933 generic.go:334] "Generic (PLEG): container finished" podID="f0899af4-85e0-4803-9ee1-de8894e2f674" containerID="c052d1e4e59b6201f3685f885e1878013fbd4d3c4483a43be02d861567ba6523" exitCode=0 Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.726462 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5678b54fd8-wl98d" event={"ID":"f0899af4-85e0-4803-9ee1-de8894e2f674","Type":"ContainerDied","Data":"c052d1e4e59b6201f3685f885e1878013fbd4d3c4483a43be02d861567ba6523"} Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.726801 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5678b54fd8-wl98d" event={"ID":"f0899af4-85e0-4803-9ee1-de8894e2f674","Type":"ContainerDied","Data":"5fcb6ca326efdd69eece6d570670443baedf14e2eeb49acaf3906bfce22b7cc5"} Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.726820 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5fcb6ca326efdd69eece6d570670443baedf14e2eeb49acaf3906bfce22b7cc5" Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.808239 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.893495 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0899af4-85e0-4803-9ee1-de8894e2f674-combined-ca-bundle\") pod \"f0899af4-85e0-4803-9ee1-de8894e2f674\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.894158 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfwzv\" (UniqueName: \"kubernetes.io/projected/f0899af4-85e0-4803-9ee1-de8894e2f674-kube-api-access-qfwzv\") pod \"f0899af4-85e0-4803-9ee1-de8894e2f674\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.894341 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f0899af4-85e0-4803-9ee1-de8894e2f674-config-data-custom\") pod \"f0899af4-85e0-4803-9ee1-de8894e2f674\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.894465 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f0899af4-85e0-4803-9ee1-de8894e2f674-logs\") pod \"f0899af4-85e0-4803-9ee1-de8894e2f674\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.894567 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0899af4-85e0-4803-9ee1-de8894e2f674-config-data\") pod \"f0899af4-85e0-4803-9ee1-de8894e2f674\" (UID: \"f0899af4-85e0-4803-9ee1-de8894e2f674\") " Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.900051 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f0899af4-85e0-4803-9ee1-de8894e2f674-logs" (OuterVolumeSpecName: "logs") pod "f0899af4-85e0-4803-9ee1-de8894e2f674" (UID: "f0899af4-85e0-4803-9ee1-de8894e2f674"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.904834 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0899af4-85e0-4803-9ee1-de8894e2f674-kube-api-access-qfwzv" (OuterVolumeSpecName: "kube-api-access-qfwzv") pod "f0899af4-85e0-4803-9ee1-de8894e2f674" (UID: "f0899af4-85e0-4803-9ee1-de8894e2f674"). InnerVolumeSpecName "kube-api-access-qfwzv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.904874 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0899af4-85e0-4803-9ee1-de8894e2f674-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f0899af4-85e0-4803-9ee1-de8894e2f674" (UID: "f0899af4-85e0-4803-9ee1-de8894e2f674"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.924271 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0899af4-85e0-4803-9ee1-de8894e2f674-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f0899af4-85e0-4803-9ee1-de8894e2f674" (UID: "f0899af4-85e0-4803-9ee1-de8894e2f674"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.953559 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0899af4-85e0-4803-9ee1-de8894e2f674-config-data" (OuterVolumeSpecName: "config-data") pod "f0899af4-85e0-4803-9ee1-de8894e2f674" (UID: "f0899af4-85e0-4803-9ee1-de8894e2f674"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.996940 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f0899af4-85e0-4803-9ee1-de8894e2f674-logs\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.996970 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0899af4-85e0-4803-9ee1-de8894e2f674-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.996980 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0899af4-85e0-4803-9ee1-de8894e2f674-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.996990 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfwzv\" (UniqueName: \"kubernetes.io/projected/f0899af4-85e0-4803-9ee1-de8894e2f674-kube-api-access-qfwzv\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:03 crc kubenswrapper[4933]: I0122 07:17:03.996999 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f0899af4-85e0-4803-9ee1-de8894e2f674-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:04 crc kubenswrapper[4933]: I0122 07:17:04.733863 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5678b54fd8-wl98d" Jan 22 07:17:04 crc kubenswrapper[4933]: I0122 07:17:04.755966 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5678b54fd8-wl98d"] Jan 22 07:17:04 crc kubenswrapper[4933]: I0122 07:17:04.763534 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-5678b54fd8-wl98d"] Jan 22 07:17:06 crc kubenswrapper[4933]: I0122 07:17:06.501896 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0899af4-85e0-4803-9ee1-de8894e2f674" path="/var/lib/kubelet/pods/f0899af4-85e0-4803-9ee1-de8894e2f674/volumes" Jan 22 07:17:06 crc kubenswrapper[4933]: I0122 07:17:06.811879 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-2dtp5"] Jan 22 07:17:06 crc kubenswrapper[4933]: E0122 07:17:06.814202 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0899af4-85e0-4803-9ee1-de8894e2f674" containerName="barbican-api" Jan 22 07:17:06 crc kubenswrapper[4933]: I0122 07:17:06.814221 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0899af4-85e0-4803-9ee1-de8894e2f674" containerName="barbican-api" Jan 22 07:17:06 crc kubenswrapper[4933]: E0122 07:17:06.814240 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43bccb46-764e-45cb-9261-816ee71c8b0b" containerName="init" Jan 22 07:17:06 crc kubenswrapper[4933]: I0122 07:17:06.814247 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="43bccb46-764e-45cb-9261-816ee71c8b0b" containerName="init" Jan 22 07:17:06 crc kubenswrapper[4933]: E0122 07:17:06.814269 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0899af4-85e0-4803-9ee1-de8894e2f674" containerName="barbican-api-log" Jan 22 07:17:06 crc kubenswrapper[4933]: I0122 07:17:06.814275 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0899af4-85e0-4803-9ee1-de8894e2f674" containerName="barbican-api-log" Jan 22 07:17:06 crc kubenswrapper[4933]: E0122 07:17:06.814283 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43bccb46-764e-45cb-9261-816ee71c8b0b" containerName="dnsmasq-dns" Jan 22 07:17:06 crc kubenswrapper[4933]: I0122 07:17:06.814289 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="43bccb46-764e-45cb-9261-816ee71c8b0b" containerName="dnsmasq-dns" Jan 22 07:17:06 crc kubenswrapper[4933]: I0122 07:17:06.814423 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="43bccb46-764e-45cb-9261-816ee71c8b0b" containerName="dnsmasq-dns" Jan 22 07:17:06 crc kubenswrapper[4933]: I0122 07:17:06.814438 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0899af4-85e0-4803-9ee1-de8894e2f674" containerName="barbican-api-log" Jan 22 07:17:06 crc kubenswrapper[4933]: I0122 07:17:06.814449 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0899af4-85e0-4803-9ee1-de8894e2f674" containerName="barbican-api" Jan 22 07:17:06 crc kubenswrapper[4933]: I0122 07:17:06.814984 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-2dtp5" Jan 22 07:17:06 crc kubenswrapper[4933]: I0122 07:17:06.824621 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-2dtp5"] Jan 22 07:17:06 crc kubenswrapper[4933]: I0122 07:17:06.917409 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-a91e-account-create-update-2nc7n"] Jan 22 07:17:06 crc kubenswrapper[4933]: I0122 07:17:06.918729 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-a91e-account-create-update-2nc7n" Jan 22 07:17:06 crc kubenswrapper[4933]: I0122 07:17:06.920482 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 22 07:17:06 crc kubenswrapper[4933]: I0122 07:17:06.929733 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-a91e-account-create-update-2nc7n"] Jan 22 07:17:06 crc kubenswrapper[4933]: I0122 07:17:06.950612 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55750da1-d536-4f73-8a46-b599bdf0298c-operator-scripts\") pod \"neutron-db-create-2dtp5\" (UID: \"55750da1-d536-4f73-8a46-b599bdf0298c\") " pod="openstack/neutron-db-create-2dtp5" Jan 22 07:17:06 crc kubenswrapper[4933]: I0122 07:17:06.950676 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmjrd\" (UniqueName: \"kubernetes.io/projected/55750da1-d536-4f73-8a46-b599bdf0298c-kube-api-access-dmjrd\") pod \"neutron-db-create-2dtp5\" (UID: \"55750da1-d536-4f73-8a46-b599bdf0298c\") " pod="openstack/neutron-db-create-2dtp5" Jan 22 07:17:07 crc kubenswrapper[4933]: I0122 07:17:07.052383 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55750da1-d536-4f73-8a46-b599bdf0298c-operator-scripts\") pod \"neutron-db-create-2dtp5\" (UID: \"55750da1-d536-4f73-8a46-b599bdf0298c\") " pod="openstack/neutron-db-create-2dtp5" Jan 22 07:17:07 crc kubenswrapper[4933]: I0122 07:17:07.052440 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmjrd\" (UniqueName: \"kubernetes.io/projected/55750da1-d536-4f73-8a46-b599bdf0298c-kube-api-access-dmjrd\") pod \"neutron-db-create-2dtp5\" (UID: \"55750da1-d536-4f73-8a46-b599bdf0298c\") " pod="openstack/neutron-db-create-2dtp5" Jan 22 07:17:07 crc kubenswrapper[4933]: I0122 07:17:07.052487 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gl84z\" (UniqueName: \"kubernetes.io/projected/5419c0b4-6d57-47e9-a285-c7ddf188d895-kube-api-access-gl84z\") pod \"neutron-a91e-account-create-update-2nc7n\" (UID: \"5419c0b4-6d57-47e9-a285-c7ddf188d895\") " pod="openstack/neutron-a91e-account-create-update-2nc7n" Jan 22 07:17:07 crc kubenswrapper[4933]: I0122 07:17:07.052514 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5419c0b4-6d57-47e9-a285-c7ddf188d895-operator-scripts\") pod \"neutron-a91e-account-create-update-2nc7n\" (UID: \"5419c0b4-6d57-47e9-a285-c7ddf188d895\") " pod="openstack/neutron-a91e-account-create-update-2nc7n" Jan 22 07:17:07 crc kubenswrapper[4933]: I0122 07:17:07.053192 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55750da1-d536-4f73-8a46-b599bdf0298c-operator-scripts\") pod \"neutron-db-create-2dtp5\" (UID: \"55750da1-d536-4f73-8a46-b599bdf0298c\") " pod="openstack/neutron-db-create-2dtp5" Jan 22 07:17:07 crc kubenswrapper[4933]: I0122 07:17:07.070719 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmjrd\" (UniqueName: \"kubernetes.io/projected/55750da1-d536-4f73-8a46-b599bdf0298c-kube-api-access-dmjrd\") pod \"neutron-db-create-2dtp5\" (UID: \"55750da1-d536-4f73-8a46-b599bdf0298c\") " pod="openstack/neutron-db-create-2dtp5" Jan 22 07:17:07 crc kubenswrapper[4933]: I0122 07:17:07.135644 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-2dtp5" Jan 22 07:17:07 crc kubenswrapper[4933]: I0122 07:17:07.154523 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gl84z\" (UniqueName: \"kubernetes.io/projected/5419c0b4-6d57-47e9-a285-c7ddf188d895-kube-api-access-gl84z\") pod \"neutron-a91e-account-create-update-2nc7n\" (UID: \"5419c0b4-6d57-47e9-a285-c7ddf188d895\") " pod="openstack/neutron-a91e-account-create-update-2nc7n" Jan 22 07:17:07 crc kubenswrapper[4933]: I0122 07:17:07.154585 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5419c0b4-6d57-47e9-a285-c7ddf188d895-operator-scripts\") pod \"neutron-a91e-account-create-update-2nc7n\" (UID: \"5419c0b4-6d57-47e9-a285-c7ddf188d895\") " pod="openstack/neutron-a91e-account-create-update-2nc7n" Jan 22 07:17:07 crc kubenswrapper[4933]: I0122 07:17:07.155844 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5419c0b4-6d57-47e9-a285-c7ddf188d895-operator-scripts\") pod \"neutron-a91e-account-create-update-2nc7n\" (UID: \"5419c0b4-6d57-47e9-a285-c7ddf188d895\") " pod="openstack/neutron-a91e-account-create-update-2nc7n" Jan 22 07:17:07 crc kubenswrapper[4933]: I0122 07:17:07.174689 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gl84z\" (UniqueName: \"kubernetes.io/projected/5419c0b4-6d57-47e9-a285-c7ddf188d895-kube-api-access-gl84z\") pod \"neutron-a91e-account-create-update-2nc7n\" (UID: \"5419c0b4-6d57-47e9-a285-c7ddf188d895\") " pod="openstack/neutron-a91e-account-create-update-2nc7n" Jan 22 07:17:07 crc kubenswrapper[4933]: I0122 07:17:07.239670 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-a91e-account-create-update-2nc7n" Jan 22 07:17:07 crc kubenswrapper[4933]: I0122 07:17:07.575431 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-2dtp5"] Jan 22 07:17:07 crc kubenswrapper[4933]: W0122 07:17:07.580608 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55750da1_d536_4f73_8a46_b599bdf0298c.slice/crio-ece9927a06d65057cda92aa53cf30b60fce17399cefd284f9fd4ae8ee0c9f5dc WatchSource:0}: Error finding container ece9927a06d65057cda92aa53cf30b60fce17399cefd284f9fd4ae8ee0c9f5dc: Status 404 returned error can't find the container with id ece9927a06d65057cda92aa53cf30b60fce17399cefd284f9fd4ae8ee0c9f5dc Jan 22 07:17:07 crc kubenswrapper[4933]: W0122 07:17:07.678381 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5419c0b4_6d57_47e9_a285_c7ddf188d895.slice/crio-fb944ac475b5a23c12a273b9c41a0c60f65a746ccaabd0edfbc8b4477428ce37 WatchSource:0}: Error finding container fb944ac475b5a23c12a273b9c41a0c60f65a746ccaabd0edfbc8b4477428ce37: Status 404 returned error can't find the container with id fb944ac475b5a23c12a273b9c41a0c60f65a746ccaabd0edfbc8b4477428ce37 Jan 22 07:17:07 crc kubenswrapper[4933]: I0122 07:17:07.678970 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-a91e-account-create-update-2nc7n"] Jan 22 07:17:07 crc kubenswrapper[4933]: I0122 07:17:07.776591 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-2dtp5" event={"ID":"55750da1-d536-4f73-8a46-b599bdf0298c","Type":"ContainerStarted","Data":"256cda9fa1b92127d3c79d4bda117f5969322f17bb444c18c594df78f6c31bad"} Jan 22 07:17:07 crc kubenswrapper[4933]: I0122 07:17:07.776677 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-2dtp5" event={"ID":"55750da1-d536-4f73-8a46-b599bdf0298c","Type":"ContainerStarted","Data":"ece9927a06d65057cda92aa53cf30b60fce17399cefd284f9fd4ae8ee0c9f5dc"} Jan 22 07:17:07 crc kubenswrapper[4933]: I0122 07:17:07.778860 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-a91e-account-create-update-2nc7n" event={"ID":"5419c0b4-6d57-47e9-a285-c7ddf188d895","Type":"ContainerStarted","Data":"fb944ac475b5a23c12a273b9c41a0c60f65a746ccaabd0edfbc8b4477428ce37"} Jan 22 07:17:07 crc kubenswrapper[4933]: I0122 07:17:07.796778 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-2dtp5" podStartSLOduration=1.796747684 podStartE2EDuration="1.796747684s" podCreationTimestamp="2026-01-22 07:17:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:17:07.791132978 +0000 UTC m=+5475.628258331" watchObservedRunningTime="2026-01-22 07:17:07.796747684 +0000 UTC m=+5475.633873037" Jan 22 07:17:08 crc kubenswrapper[4933]: I0122 07:17:08.789365 4933 generic.go:334] "Generic (PLEG): container finished" podID="5419c0b4-6d57-47e9-a285-c7ddf188d895" containerID="94aee6d72ff1dd5d8864290ad13f8f73fa04be99ddb8086279485349921dde11" exitCode=0 Jan 22 07:17:08 crc kubenswrapper[4933]: I0122 07:17:08.789456 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-a91e-account-create-update-2nc7n" event={"ID":"5419c0b4-6d57-47e9-a285-c7ddf188d895","Type":"ContainerDied","Data":"94aee6d72ff1dd5d8864290ad13f8f73fa04be99ddb8086279485349921dde11"} Jan 22 07:17:08 crc kubenswrapper[4933]: I0122 07:17:08.791526 4933 generic.go:334] "Generic (PLEG): container finished" podID="55750da1-d536-4f73-8a46-b599bdf0298c" containerID="256cda9fa1b92127d3c79d4bda117f5969322f17bb444c18c594df78f6c31bad" exitCode=0 Jan 22 07:17:08 crc kubenswrapper[4933]: I0122 07:17:08.791564 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-2dtp5" event={"ID":"55750da1-d536-4f73-8a46-b599bdf0298c","Type":"ContainerDied","Data":"256cda9fa1b92127d3c79d4bda117f5969322f17bb444c18c594df78f6c31bad"} Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.229973 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-2dtp5" Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.236799 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-a91e-account-create-update-2nc7n" Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.311203 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55750da1-d536-4f73-8a46-b599bdf0298c-operator-scripts\") pod \"55750da1-d536-4f73-8a46-b599bdf0298c\" (UID: \"55750da1-d536-4f73-8a46-b599bdf0298c\") " Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.311278 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gl84z\" (UniqueName: \"kubernetes.io/projected/5419c0b4-6d57-47e9-a285-c7ddf188d895-kube-api-access-gl84z\") pod \"5419c0b4-6d57-47e9-a285-c7ddf188d895\" (UID: \"5419c0b4-6d57-47e9-a285-c7ddf188d895\") " Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.311311 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5419c0b4-6d57-47e9-a285-c7ddf188d895-operator-scripts\") pod \"5419c0b4-6d57-47e9-a285-c7ddf188d895\" (UID: \"5419c0b4-6d57-47e9-a285-c7ddf188d895\") " Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.311394 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dmjrd\" (UniqueName: \"kubernetes.io/projected/55750da1-d536-4f73-8a46-b599bdf0298c-kube-api-access-dmjrd\") pod \"55750da1-d536-4f73-8a46-b599bdf0298c\" (UID: \"55750da1-d536-4f73-8a46-b599bdf0298c\") " Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.312350 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55750da1-d536-4f73-8a46-b599bdf0298c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "55750da1-d536-4f73-8a46-b599bdf0298c" (UID: "55750da1-d536-4f73-8a46-b599bdf0298c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.312350 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5419c0b4-6d57-47e9-a285-c7ddf188d895-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5419c0b4-6d57-47e9-a285-c7ddf188d895" (UID: "5419c0b4-6d57-47e9-a285-c7ddf188d895"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.318750 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5419c0b4-6d57-47e9-a285-c7ddf188d895-kube-api-access-gl84z" (OuterVolumeSpecName: "kube-api-access-gl84z") pod "5419c0b4-6d57-47e9-a285-c7ddf188d895" (UID: "5419c0b4-6d57-47e9-a285-c7ddf188d895"). InnerVolumeSpecName "kube-api-access-gl84z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.321327 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55750da1-d536-4f73-8a46-b599bdf0298c-kube-api-access-dmjrd" (OuterVolumeSpecName: "kube-api-access-dmjrd") pod "55750da1-d536-4f73-8a46-b599bdf0298c" (UID: "55750da1-d536-4f73-8a46-b599bdf0298c"). InnerVolumeSpecName "kube-api-access-dmjrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.413358 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55750da1-d536-4f73-8a46-b599bdf0298c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.413406 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gl84z\" (UniqueName: \"kubernetes.io/projected/5419c0b4-6d57-47e9-a285-c7ddf188d895-kube-api-access-gl84z\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.413421 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5419c0b4-6d57-47e9-a285-c7ddf188d895-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.413433 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dmjrd\" (UniqueName: \"kubernetes.io/projected/55750da1-d536-4f73-8a46-b599bdf0298c-kube-api-access-dmjrd\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.818469 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-2dtp5" Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.818547 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-2dtp5" event={"ID":"55750da1-d536-4f73-8a46-b599bdf0298c","Type":"ContainerDied","Data":"ece9927a06d65057cda92aa53cf30b60fce17399cefd284f9fd4ae8ee0c9f5dc"} Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.819109 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ece9927a06d65057cda92aa53cf30b60fce17399cefd284f9fd4ae8ee0c9f5dc" Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.819869 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-a91e-account-create-update-2nc7n" event={"ID":"5419c0b4-6d57-47e9-a285-c7ddf188d895","Type":"ContainerDied","Data":"fb944ac475b5a23c12a273b9c41a0c60f65a746ccaabd0edfbc8b4477428ce37"} Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.819924 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fb944ac475b5a23c12a273b9c41a0c60f65a746ccaabd0edfbc8b4477428ce37" Jan 22 07:17:10 crc kubenswrapper[4933]: I0122 07:17:10.819998 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-a91e-account-create-update-2nc7n" Jan 22 07:17:11 crc kubenswrapper[4933]: I0122 07:17:11.900941 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sh4tn"] Jan 22 07:17:11 crc kubenswrapper[4933]: E0122 07:17:11.901415 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55750da1-d536-4f73-8a46-b599bdf0298c" containerName="mariadb-database-create" Jan 22 07:17:11 crc kubenswrapper[4933]: I0122 07:17:11.901432 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="55750da1-d536-4f73-8a46-b599bdf0298c" containerName="mariadb-database-create" Jan 22 07:17:11 crc kubenswrapper[4933]: E0122 07:17:11.901461 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5419c0b4-6d57-47e9-a285-c7ddf188d895" containerName="mariadb-account-create-update" Jan 22 07:17:11 crc kubenswrapper[4933]: I0122 07:17:11.901470 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="5419c0b4-6d57-47e9-a285-c7ddf188d895" containerName="mariadb-account-create-update" Jan 22 07:17:11 crc kubenswrapper[4933]: I0122 07:17:11.901663 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="55750da1-d536-4f73-8a46-b599bdf0298c" containerName="mariadb-database-create" Jan 22 07:17:11 crc kubenswrapper[4933]: I0122 07:17:11.901686 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="5419c0b4-6d57-47e9-a285-c7ddf188d895" containerName="mariadb-account-create-update" Jan 22 07:17:11 crc kubenswrapper[4933]: I0122 07:17:11.903392 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sh4tn" Jan 22 07:17:11 crc kubenswrapper[4933]: I0122 07:17:11.911329 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sh4tn"] Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.042439 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksf9m\" (UniqueName: \"kubernetes.io/projected/90bd2b0d-a117-4d38-999a-ac51046bd69c-kube-api-access-ksf9m\") pod \"certified-operators-sh4tn\" (UID: \"90bd2b0d-a117-4d38-999a-ac51046bd69c\") " pod="openshift-marketplace/certified-operators-sh4tn" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.042771 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90bd2b0d-a117-4d38-999a-ac51046bd69c-catalog-content\") pod \"certified-operators-sh4tn\" (UID: \"90bd2b0d-a117-4d38-999a-ac51046bd69c\") " pod="openshift-marketplace/certified-operators-sh4tn" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.042861 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90bd2b0d-a117-4d38-999a-ac51046bd69c-utilities\") pod \"certified-operators-sh4tn\" (UID: \"90bd2b0d-a117-4d38-999a-ac51046bd69c\") " pod="openshift-marketplace/certified-operators-sh4tn" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.144727 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ksf9m\" (UniqueName: \"kubernetes.io/projected/90bd2b0d-a117-4d38-999a-ac51046bd69c-kube-api-access-ksf9m\") pod \"certified-operators-sh4tn\" (UID: \"90bd2b0d-a117-4d38-999a-ac51046bd69c\") " pod="openshift-marketplace/certified-operators-sh4tn" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.144828 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90bd2b0d-a117-4d38-999a-ac51046bd69c-catalog-content\") pod \"certified-operators-sh4tn\" (UID: \"90bd2b0d-a117-4d38-999a-ac51046bd69c\") " pod="openshift-marketplace/certified-operators-sh4tn" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.144857 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90bd2b0d-a117-4d38-999a-ac51046bd69c-utilities\") pod \"certified-operators-sh4tn\" (UID: \"90bd2b0d-a117-4d38-999a-ac51046bd69c\") " pod="openshift-marketplace/certified-operators-sh4tn" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.145290 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90bd2b0d-a117-4d38-999a-ac51046bd69c-catalog-content\") pod \"certified-operators-sh4tn\" (UID: \"90bd2b0d-a117-4d38-999a-ac51046bd69c\") " pod="openshift-marketplace/certified-operators-sh4tn" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.145359 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90bd2b0d-a117-4d38-999a-ac51046bd69c-utilities\") pod \"certified-operators-sh4tn\" (UID: \"90bd2b0d-a117-4d38-999a-ac51046bd69c\") " pod="openshift-marketplace/certified-operators-sh4tn" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.172705 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ksf9m\" (UniqueName: \"kubernetes.io/projected/90bd2b0d-a117-4d38-999a-ac51046bd69c-kube-api-access-ksf9m\") pod \"certified-operators-sh4tn\" (UID: \"90bd2b0d-a117-4d38-999a-ac51046bd69c\") " pod="openshift-marketplace/certified-operators-sh4tn" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.226143 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sh4tn" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.264096 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-bmwtw"] Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.265699 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-bmwtw" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.268665 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-7rrwv" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.268729 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.269243 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.269270 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-bmwtw"] Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.348688 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pp7wp\" (UniqueName: \"kubernetes.io/projected/c47ced8e-600a-41b0-a665-338664f8c335-kube-api-access-pp7wp\") pod \"neutron-db-sync-bmwtw\" (UID: \"c47ced8e-600a-41b0-a665-338664f8c335\") " pod="openstack/neutron-db-sync-bmwtw" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.348826 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c47ced8e-600a-41b0-a665-338664f8c335-config\") pod \"neutron-db-sync-bmwtw\" (UID: \"c47ced8e-600a-41b0-a665-338664f8c335\") " pod="openstack/neutron-db-sync-bmwtw" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.348892 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c47ced8e-600a-41b0-a665-338664f8c335-combined-ca-bundle\") pod \"neutron-db-sync-bmwtw\" (UID: \"c47ced8e-600a-41b0-a665-338664f8c335\") " pod="openstack/neutron-db-sync-bmwtw" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.452623 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c47ced8e-600a-41b0-a665-338664f8c335-combined-ca-bundle\") pod \"neutron-db-sync-bmwtw\" (UID: \"c47ced8e-600a-41b0-a665-338664f8c335\") " pod="openstack/neutron-db-sync-bmwtw" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.452754 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pp7wp\" (UniqueName: \"kubernetes.io/projected/c47ced8e-600a-41b0-a665-338664f8c335-kube-api-access-pp7wp\") pod \"neutron-db-sync-bmwtw\" (UID: \"c47ced8e-600a-41b0-a665-338664f8c335\") " pod="openstack/neutron-db-sync-bmwtw" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.452840 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c47ced8e-600a-41b0-a665-338664f8c335-config\") pod \"neutron-db-sync-bmwtw\" (UID: \"c47ced8e-600a-41b0-a665-338664f8c335\") " pod="openstack/neutron-db-sync-bmwtw" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.460258 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c47ced8e-600a-41b0-a665-338664f8c335-combined-ca-bundle\") pod \"neutron-db-sync-bmwtw\" (UID: \"c47ced8e-600a-41b0-a665-338664f8c335\") " pod="openstack/neutron-db-sync-bmwtw" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.472038 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pp7wp\" (UniqueName: \"kubernetes.io/projected/c47ced8e-600a-41b0-a665-338664f8c335-kube-api-access-pp7wp\") pod \"neutron-db-sync-bmwtw\" (UID: \"c47ced8e-600a-41b0-a665-338664f8c335\") " pod="openstack/neutron-db-sync-bmwtw" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.475614 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/c47ced8e-600a-41b0-a665-338664f8c335-config\") pod \"neutron-db-sync-bmwtw\" (UID: \"c47ced8e-600a-41b0-a665-338664f8c335\") " pod="openstack/neutron-db-sync-bmwtw" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.640448 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-bmwtw" Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.725744 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sh4tn"] Jan 22 07:17:12 crc kubenswrapper[4933]: I0122 07:17:12.838135 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sh4tn" event={"ID":"90bd2b0d-a117-4d38-999a-ac51046bd69c","Type":"ContainerStarted","Data":"33209d312cff85625b4cf7be405dd85b2580d07dc4cf1f160e367914cb6d383f"} Jan 22 07:17:13 crc kubenswrapper[4933]: W0122 07:17:13.079780 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc47ced8e_600a_41b0_a665_338664f8c335.slice/crio-60785874c0478c370f7acaf880b5a78c5c9351b13623874b42408fe332655372 WatchSource:0}: Error finding container 60785874c0478c370f7acaf880b5a78c5c9351b13623874b42408fe332655372: Status 404 returned error can't find the container with id 60785874c0478c370f7acaf880b5a78c5c9351b13623874b42408fe332655372 Jan 22 07:17:13 crc kubenswrapper[4933]: I0122 07:17:13.084968 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-bmwtw"] Jan 22 07:17:13 crc kubenswrapper[4933]: I0122 07:17:13.849289 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-bmwtw" event={"ID":"c47ced8e-600a-41b0-a665-338664f8c335","Type":"ContainerStarted","Data":"012f5b30b5dfe06df7cad6f76ec25bd9517de258cfbb5f39e51fa5b810d07926"} Jan 22 07:17:13 crc kubenswrapper[4933]: I0122 07:17:13.849641 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-bmwtw" event={"ID":"c47ced8e-600a-41b0-a665-338664f8c335","Type":"ContainerStarted","Data":"60785874c0478c370f7acaf880b5a78c5c9351b13623874b42408fe332655372"} Jan 22 07:17:13 crc kubenswrapper[4933]: I0122 07:17:13.850822 4933 generic.go:334] "Generic (PLEG): container finished" podID="90bd2b0d-a117-4d38-999a-ac51046bd69c" containerID="6528df23d291da2b146adc976714eb43abd84cb19bb3f00bdebb0b9c91a1e8de" exitCode=0 Jan 22 07:17:13 crc kubenswrapper[4933]: I0122 07:17:13.850937 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sh4tn" event={"ID":"90bd2b0d-a117-4d38-999a-ac51046bd69c","Type":"ContainerDied","Data":"6528df23d291da2b146adc976714eb43abd84cb19bb3f00bdebb0b9c91a1e8de"} Jan 22 07:17:13 crc kubenswrapper[4933]: I0122 07:17:13.873804 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-bmwtw" podStartSLOduration=1.8737783449999998 podStartE2EDuration="1.873778345s" podCreationTimestamp="2026-01-22 07:17:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:17:13.869600934 +0000 UTC m=+5481.706726367" watchObservedRunningTime="2026-01-22 07:17:13.873778345 +0000 UTC m=+5481.710903698" Jan 22 07:17:14 crc kubenswrapper[4933]: I0122 07:17:14.869067 4933 generic.go:334] "Generic (PLEG): container finished" podID="90bd2b0d-a117-4d38-999a-ac51046bd69c" containerID="a94ab46e662f2ccc5eb39b23b48955f59f19716b53c2bb4fbe06f834663a5364" exitCode=0 Jan 22 07:17:14 crc kubenswrapper[4933]: I0122 07:17:14.869174 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sh4tn" event={"ID":"90bd2b0d-a117-4d38-999a-ac51046bd69c","Type":"ContainerDied","Data":"a94ab46e662f2ccc5eb39b23b48955f59f19716b53c2bb4fbe06f834663a5364"} Jan 22 07:17:15 crc kubenswrapper[4933]: I0122 07:17:15.880567 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sh4tn" event={"ID":"90bd2b0d-a117-4d38-999a-ac51046bd69c","Type":"ContainerStarted","Data":"fd95795de5d0f259eb224f86587b88841707d3505d5df2e8b700fe5b2aecad74"} Jan 22 07:17:15 crc kubenswrapper[4933]: I0122 07:17:15.909349 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sh4tn" podStartSLOduration=3.358652122 podStartE2EDuration="4.909328929s" podCreationTimestamp="2026-01-22 07:17:11 +0000 UTC" firstStartedPulling="2026-01-22 07:17:13.853649955 +0000 UTC m=+5481.690775338" lastFinishedPulling="2026-01-22 07:17:15.404326792 +0000 UTC m=+5483.241452145" observedRunningTime="2026-01-22 07:17:15.904654365 +0000 UTC m=+5483.741779738" watchObservedRunningTime="2026-01-22 07:17:15.909328929 +0000 UTC m=+5483.746454282" Jan 22 07:17:16 crc kubenswrapper[4933]: I0122 07:17:16.490594 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:17:16 crc kubenswrapper[4933]: E0122 07:17:16.490816 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:17:17 crc kubenswrapper[4933]: I0122 07:17:17.898130 4933 generic.go:334] "Generic (PLEG): container finished" podID="c47ced8e-600a-41b0-a665-338664f8c335" containerID="012f5b30b5dfe06df7cad6f76ec25bd9517de258cfbb5f39e51fa5b810d07926" exitCode=0 Jan 22 07:17:17 crc kubenswrapper[4933]: I0122 07:17:17.898245 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-bmwtw" event={"ID":"c47ced8e-600a-41b0-a665-338664f8c335","Type":"ContainerDied","Data":"012f5b30b5dfe06df7cad6f76ec25bd9517de258cfbb5f39e51fa5b810d07926"} Jan 22 07:17:19 crc kubenswrapper[4933]: I0122 07:17:19.196847 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-bmwtw" Jan 22 07:17:19 crc kubenswrapper[4933]: I0122 07:17:19.283054 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c47ced8e-600a-41b0-a665-338664f8c335-config\") pod \"c47ced8e-600a-41b0-a665-338664f8c335\" (UID: \"c47ced8e-600a-41b0-a665-338664f8c335\") " Jan 22 07:17:19 crc kubenswrapper[4933]: I0122 07:17:19.283208 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c47ced8e-600a-41b0-a665-338664f8c335-combined-ca-bundle\") pod \"c47ced8e-600a-41b0-a665-338664f8c335\" (UID: \"c47ced8e-600a-41b0-a665-338664f8c335\") " Jan 22 07:17:19 crc kubenswrapper[4933]: I0122 07:17:19.283259 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pp7wp\" (UniqueName: \"kubernetes.io/projected/c47ced8e-600a-41b0-a665-338664f8c335-kube-api-access-pp7wp\") pod \"c47ced8e-600a-41b0-a665-338664f8c335\" (UID: \"c47ced8e-600a-41b0-a665-338664f8c335\") " Jan 22 07:17:19 crc kubenswrapper[4933]: I0122 07:17:19.288506 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c47ced8e-600a-41b0-a665-338664f8c335-kube-api-access-pp7wp" (OuterVolumeSpecName: "kube-api-access-pp7wp") pod "c47ced8e-600a-41b0-a665-338664f8c335" (UID: "c47ced8e-600a-41b0-a665-338664f8c335"). InnerVolumeSpecName "kube-api-access-pp7wp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:17:19 crc kubenswrapper[4933]: I0122 07:17:19.305477 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c47ced8e-600a-41b0-a665-338664f8c335-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c47ced8e-600a-41b0-a665-338664f8c335" (UID: "c47ced8e-600a-41b0-a665-338664f8c335"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:17:19 crc kubenswrapper[4933]: I0122 07:17:19.311235 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c47ced8e-600a-41b0-a665-338664f8c335-config" (OuterVolumeSpecName: "config") pod "c47ced8e-600a-41b0-a665-338664f8c335" (UID: "c47ced8e-600a-41b0-a665-338664f8c335"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:17:19 crc kubenswrapper[4933]: I0122 07:17:19.384851 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/c47ced8e-600a-41b0-a665-338664f8c335-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:19 crc kubenswrapper[4933]: I0122 07:17:19.384899 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c47ced8e-600a-41b0-a665-338664f8c335-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:19 crc kubenswrapper[4933]: I0122 07:17:19.384914 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pp7wp\" (UniqueName: \"kubernetes.io/projected/c47ced8e-600a-41b0-a665-338664f8c335-kube-api-access-pp7wp\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:19 crc kubenswrapper[4933]: I0122 07:17:19.917717 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-bmwtw" event={"ID":"c47ced8e-600a-41b0-a665-338664f8c335","Type":"ContainerDied","Data":"60785874c0478c370f7acaf880b5a78c5c9351b13623874b42408fe332655372"} Jan 22 07:17:19 crc kubenswrapper[4933]: I0122 07:17:19.917758 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="60785874c0478c370f7acaf880b5a78c5c9351b13623874b42408fe332655372" Jan 22 07:17:19 crc kubenswrapper[4933]: I0122 07:17:19.917811 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-bmwtw" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.155753 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6847fd6c7c-m4jnx"] Jan 22 07:17:20 crc kubenswrapper[4933]: E0122 07:17:20.156099 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c47ced8e-600a-41b0-a665-338664f8c335" containerName="neutron-db-sync" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.156117 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c47ced8e-600a-41b0-a665-338664f8c335" containerName="neutron-db-sync" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.156272 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="c47ced8e-600a-41b0-a665-338664f8c335" containerName="neutron-db-sync" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.157092 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.183319 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6847fd6c7c-m4jnx"] Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.199538 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-ovsdbserver-nb\") pod \"dnsmasq-dns-6847fd6c7c-m4jnx\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.199583 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-config\") pod \"dnsmasq-dns-6847fd6c7c-m4jnx\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.199625 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rh2qt\" (UniqueName: \"kubernetes.io/projected/34e8f6f8-ea53-4956-9820-41ffb518d03d-kube-api-access-rh2qt\") pod \"dnsmasq-dns-6847fd6c7c-m4jnx\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.199817 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-ovsdbserver-sb\") pod \"dnsmasq-dns-6847fd6c7c-m4jnx\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.199871 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-dns-svc\") pod \"dnsmasq-dns-6847fd6c7c-m4jnx\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.301146 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rh2qt\" (UniqueName: \"kubernetes.io/projected/34e8f6f8-ea53-4956-9820-41ffb518d03d-kube-api-access-rh2qt\") pod \"dnsmasq-dns-6847fd6c7c-m4jnx\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.301226 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-ovsdbserver-sb\") pod \"dnsmasq-dns-6847fd6c7c-m4jnx\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.301247 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-dns-svc\") pod \"dnsmasq-dns-6847fd6c7c-m4jnx\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.301339 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-ovsdbserver-nb\") pod \"dnsmasq-dns-6847fd6c7c-m4jnx\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.301367 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-config\") pod \"dnsmasq-dns-6847fd6c7c-m4jnx\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.302282 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-ovsdbserver-sb\") pod \"dnsmasq-dns-6847fd6c7c-m4jnx\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.302338 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-config\") pod \"dnsmasq-dns-6847fd6c7c-m4jnx\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.302443 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-dns-svc\") pod \"dnsmasq-dns-6847fd6c7c-m4jnx\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.302863 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-ovsdbserver-nb\") pod \"dnsmasq-dns-6847fd6c7c-m4jnx\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.308230 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7f8795948d-x6pzc"] Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.312789 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.314520 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.317378 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.317471 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.321765 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-7rrwv" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.326453 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7f8795948d-x6pzc"] Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.337732 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rh2qt\" (UniqueName: \"kubernetes.io/projected/34e8f6f8-ea53-4956-9820-41ffb518d03d-kube-api-access-rh2qt\") pod \"dnsmasq-dns-6847fd6c7c-m4jnx\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.403829 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-combined-ca-bundle\") pod \"neutron-7f8795948d-x6pzc\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.403885 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b894k\" (UniqueName: \"kubernetes.io/projected/4cc6afe1-b20e-4a9c-be39-9b34fae99393-kube-api-access-b894k\") pod \"neutron-7f8795948d-x6pzc\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.403931 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-config\") pod \"neutron-7f8795948d-x6pzc\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.404031 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-httpd-config\") pod \"neutron-7f8795948d-x6pzc\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.404179 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-ovndb-tls-certs\") pod \"neutron-7f8795948d-x6pzc\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.476961 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.508879 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-ovndb-tls-certs\") pod \"neutron-7f8795948d-x6pzc\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.509011 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-combined-ca-bundle\") pod \"neutron-7f8795948d-x6pzc\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.509041 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b894k\" (UniqueName: \"kubernetes.io/projected/4cc6afe1-b20e-4a9c-be39-9b34fae99393-kube-api-access-b894k\") pod \"neutron-7f8795948d-x6pzc\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.509092 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-config\") pod \"neutron-7f8795948d-x6pzc\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.509125 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-httpd-config\") pod \"neutron-7f8795948d-x6pzc\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.515102 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-ovndb-tls-certs\") pod \"neutron-7f8795948d-x6pzc\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.515545 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-config\") pod \"neutron-7f8795948d-x6pzc\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.520818 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-httpd-config\") pod \"neutron-7f8795948d-x6pzc\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.527830 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-combined-ca-bundle\") pod \"neutron-7f8795948d-x6pzc\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.537829 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b894k\" (UniqueName: \"kubernetes.io/projected/4cc6afe1-b20e-4a9c-be39-9b34fae99393-kube-api-access-b894k\") pod \"neutron-7f8795948d-x6pzc\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:20 crc kubenswrapper[4933]: I0122 07:17:20.672776 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:21 crc kubenswrapper[4933]: I0122 07:17:21.095672 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6847fd6c7c-m4jnx"] Jan 22 07:17:21 crc kubenswrapper[4933]: W0122 07:17:21.288592 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4cc6afe1_b20e_4a9c_be39_9b34fae99393.slice/crio-ae894575878fcb26d47927eb13dd5e745d1f0c4722e4e87202c759f36c4ea9ef WatchSource:0}: Error finding container ae894575878fcb26d47927eb13dd5e745d1f0c4722e4e87202c759f36c4ea9ef: Status 404 returned error can't find the container with id ae894575878fcb26d47927eb13dd5e745d1f0c4722e4e87202c759f36c4ea9ef Jan 22 07:17:21 crc kubenswrapper[4933]: I0122 07:17:21.293243 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7f8795948d-x6pzc"] Jan 22 07:17:21 crc kubenswrapper[4933]: I0122 07:17:21.938191 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f8795948d-x6pzc" event={"ID":"4cc6afe1-b20e-4a9c-be39-9b34fae99393","Type":"ContainerStarted","Data":"96fb9e115a3f44636c33d5b2bcf1a8f82bae9e160c5ee5a631403461b9a4002b"} Jan 22 07:17:21 crc kubenswrapper[4933]: I0122 07:17:21.938550 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f8795948d-x6pzc" event={"ID":"4cc6afe1-b20e-4a9c-be39-9b34fae99393","Type":"ContainerStarted","Data":"ef991805163fcd84e7cd5c6692ac32ad31cbc609bf5328e87df993e12f4b8749"} Jan 22 07:17:21 crc kubenswrapper[4933]: I0122 07:17:21.938569 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f8795948d-x6pzc" event={"ID":"4cc6afe1-b20e-4a9c-be39-9b34fae99393","Type":"ContainerStarted","Data":"ae894575878fcb26d47927eb13dd5e745d1f0c4722e4e87202c759f36c4ea9ef"} Jan 22 07:17:21 crc kubenswrapper[4933]: I0122 07:17:21.938590 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:21 crc kubenswrapper[4933]: I0122 07:17:21.939828 4933 generic.go:334] "Generic (PLEG): container finished" podID="34e8f6f8-ea53-4956-9820-41ffb518d03d" containerID="8103d2603b2a8553d944224164a70fcc82b00b3c60a19a9cb1fdff48b0c5f86b" exitCode=0 Jan 22 07:17:21 crc kubenswrapper[4933]: I0122 07:17:21.939875 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" event={"ID":"34e8f6f8-ea53-4956-9820-41ffb518d03d","Type":"ContainerDied","Data":"8103d2603b2a8553d944224164a70fcc82b00b3c60a19a9cb1fdff48b0c5f86b"} Jan 22 07:17:21 crc kubenswrapper[4933]: I0122 07:17:21.939904 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" event={"ID":"34e8f6f8-ea53-4956-9820-41ffb518d03d","Type":"ContainerStarted","Data":"778e6403c225e84737746ca08f371bf229bbacc03792b23d53a708facb54f44b"} Jan 22 07:17:21 crc kubenswrapper[4933]: I0122 07:17:21.974309 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7f8795948d-x6pzc" podStartSLOduration=1.974287906 podStartE2EDuration="1.974287906s" podCreationTimestamp="2026-01-22 07:17:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:17:21.959326883 +0000 UTC m=+5489.796452256" watchObservedRunningTime="2026-01-22 07:17:21.974287906 +0000 UTC m=+5489.811413269" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.226617 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sh4tn" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.226969 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sh4tn" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.290796 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sh4tn" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.753310 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5dd49c8657-ww7nj"] Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.755168 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.758933 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.759130 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.775784 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5dd49c8657-ww7nj"] Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.858342 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5da1fca-9300-498a-834d-0a3eed388385-public-tls-certs\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.858408 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5da1fca-9300-498a-834d-0a3eed388385-internal-tls-certs\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.858445 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5da1fca-9300-498a-834d-0a3eed388385-combined-ca-bundle\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.858468 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6hjg\" (UniqueName: \"kubernetes.io/projected/e5da1fca-9300-498a-834d-0a3eed388385-kube-api-access-b6hjg\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.858642 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e5da1fca-9300-498a-834d-0a3eed388385-httpd-config\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.858773 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e5da1fca-9300-498a-834d-0a3eed388385-config\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.858809 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5da1fca-9300-498a-834d-0a3eed388385-ovndb-tls-certs\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.950595 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" event={"ID":"34e8f6f8-ea53-4956-9820-41ffb518d03d","Type":"ContainerStarted","Data":"4c377764b65fc991ea343e30b1110eb624a78bf9195fcddbc4472f3309c3edca"} Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.960271 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6hjg\" (UniqueName: \"kubernetes.io/projected/e5da1fca-9300-498a-834d-0a3eed388385-kube-api-access-b6hjg\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.960342 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e5da1fca-9300-498a-834d-0a3eed388385-httpd-config\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.960393 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e5da1fca-9300-498a-834d-0a3eed388385-config\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.960428 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5da1fca-9300-498a-834d-0a3eed388385-ovndb-tls-certs\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.960515 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5da1fca-9300-498a-834d-0a3eed388385-public-tls-certs\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.960553 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5da1fca-9300-498a-834d-0a3eed388385-internal-tls-certs\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.960607 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5da1fca-9300-498a-834d-0a3eed388385-combined-ca-bundle\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.968020 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5da1fca-9300-498a-834d-0a3eed388385-internal-tls-certs\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.970680 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5da1fca-9300-498a-834d-0a3eed388385-ovndb-tls-certs\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.973552 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/e5da1fca-9300-498a-834d-0a3eed388385-config\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.973579 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5da1fca-9300-498a-834d-0a3eed388385-public-tls-certs\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.974010 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e5da1fca-9300-498a-834d-0a3eed388385-httpd-config\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.975021 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5da1fca-9300-498a-834d-0a3eed388385-combined-ca-bundle\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.976856 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" podStartSLOduration=2.9768218969999998 podStartE2EDuration="2.976821897s" podCreationTimestamp="2026-01-22 07:17:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:17:22.973963428 +0000 UTC m=+5490.811088791" watchObservedRunningTime="2026-01-22 07:17:22.976821897 +0000 UTC m=+5490.813947250" Jan 22 07:17:22 crc kubenswrapper[4933]: I0122 07:17:22.994681 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6hjg\" (UniqueName: \"kubernetes.io/projected/e5da1fca-9300-498a-834d-0a3eed388385-kube-api-access-b6hjg\") pod \"neutron-5dd49c8657-ww7nj\" (UID: \"e5da1fca-9300-498a-834d-0a3eed388385\") " pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:23 crc kubenswrapper[4933]: I0122 07:17:23.003946 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sh4tn" Jan 22 07:17:23 crc kubenswrapper[4933]: I0122 07:17:23.057770 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sh4tn"] Jan 22 07:17:23 crc kubenswrapper[4933]: I0122 07:17:23.077695 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:23 crc kubenswrapper[4933]: I0122 07:17:23.617443 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5dd49c8657-ww7nj"] Jan 22 07:17:23 crc kubenswrapper[4933]: W0122 07:17:23.631269 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode5da1fca_9300_498a_834d_0a3eed388385.slice/crio-655968831ef0d3d3ec379384c657f8b3e26b81194a238c7e9e643bc89a8da697 WatchSource:0}: Error finding container 655968831ef0d3d3ec379384c657f8b3e26b81194a238c7e9e643bc89a8da697: Status 404 returned error can't find the container with id 655968831ef0d3d3ec379384c657f8b3e26b81194a238c7e9e643bc89a8da697 Jan 22 07:17:23 crc kubenswrapper[4933]: I0122 07:17:23.968879 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5dd49c8657-ww7nj" event={"ID":"e5da1fca-9300-498a-834d-0a3eed388385","Type":"ContainerStarted","Data":"2128c937c11ab12bba5f9a192dc9aeccdf9352a8b101673f746c8e543f098df3"} Jan 22 07:17:23 crc kubenswrapper[4933]: I0122 07:17:23.969282 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5dd49c8657-ww7nj" event={"ID":"e5da1fca-9300-498a-834d-0a3eed388385","Type":"ContainerStarted","Data":"655968831ef0d3d3ec379384c657f8b3e26b81194a238c7e9e643bc89a8da697"} Jan 22 07:17:23 crc kubenswrapper[4933]: I0122 07:17:23.969299 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:17:24 crc kubenswrapper[4933]: I0122 07:17:24.051204 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-8wnkm"] Jan 22 07:17:24 crc kubenswrapper[4933]: I0122 07:17:24.058225 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-8wnkm"] Jan 22 07:17:24 crc kubenswrapper[4933]: I0122 07:17:24.503025 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f5b934c-9efe-4baa-8eb9-993e1539d601" path="/var/lib/kubelet/pods/6f5b934c-9efe-4baa-8eb9-993e1539d601/volumes" Jan 22 07:17:24 crc kubenswrapper[4933]: I0122 07:17:24.980721 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5dd49c8657-ww7nj" event={"ID":"e5da1fca-9300-498a-834d-0a3eed388385","Type":"ContainerStarted","Data":"cc9bc954d5a7d37a694a64ce09dbb43e9117c8811f7b3c399fe19ba69a4ec859"} Jan 22 07:17:24 crc kubenswrapper[4933]: I0122 07:17:24.980942 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-sh4tn" podUID="90bd2b0d-a117-4d38-999a-ac51046bd69c" containerName="registry-server" containerID="cri-o://fd95795de5d0f259eb224f86587b88841707d3505d5df2e8b700fe5b2aecad74" gracePeriod=2 Jan 22 07:17:24 crc kubenswrapper[4933]: I0122 07:17:24.981237 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:25 crc kubenswrapper[4933]: I0122 07:17:25.019765 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5dd49c8657-ww7nj" podStartSLOduration=3.01974602 podStartE2EDuration="3.01974602s" podCreationTimestamp="2026-01-22 07:17:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:17:25.0118684 +0000 UTC m=+5492.848993763" watchObservedRunningTime="2026-01-22 07:17:25.01974602 +0000 UTC m=+5492.856871383" Jan 22 07:17:25 crc kubenswrapper[4933]: I0122 07:17:25.492616 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sh4tn" Jan 22 07:17:25 crc kubenswrapper[4933]: I0122 07:17:25.604896 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90bd2b0d-a117-4d38-999a-ac51046bd69c-catalog-content\") pod \"90bd2b0d-a117-4d38-999a-ac51046bd69c\" (UID: \"90bd2b0d-a117-4d38-999a-ac51046bd69c\") " Jan 22 07:17:25 crc kubenswrapper[4933]: I0122 07:17:25.605272 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90bd2b0d-a117-4d38-999a-ac51046bd69c-utilities\") pod \"90bd2b0d-a117-4d38-999a-ac51046bd69c\" (UID: \"90bd2b0d-a117-4d38-999a-ac51046bd69c\") " Jan 22 07:17:25 crc kubenswrapper[4933]: I0122 07:17:25.606050 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ksf9m\" (UniqueName: \"kubernetes.io/projected/90bd2b0d-a117-4d38-999a-ac51046bd69c-kube-api-access-ksf9m\") pod \"90bd2b0d-a117-4d38-999a-ac51046bd69c\" (UID: \"90bd2b0d-a117-4d38-999a-ac51046bd69c\") " Jan 22 07:17:25 crc kubenswrapper[4933]: I0122 07:17:25.605963 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90bd2b0d-a117-4d38-999a-ac51046bd69c-utilities" (OuterVolumeSpecName: "utilities") pod "90bd2b0d-a117-4d38-999a-ac51046bd69c" (UID: "90bd2b0d-a117-4d38-999a-ac51046bd69c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:17:25 crc kubenswrapper[4933]: I0122 07:17:25.607425 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90bd2b0d-a117-4d38-999a-ac51046bd69c-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:25 crc kubenswrapper[4933]: I0122 07:17:25.611179 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90bd2b0d-a117-4d38-999a-ac51046bd69c-kube-api-access-ksf9m" (OuterVolumeSpecName: "kube-api-access-ksf9m") pod "90bd2b0d-a117-4d38-999a-ac51046bd69c" (UID: "90bd2b0d-a117-4d38-999a-ac51046bd69c"). InnerVolumeSpecName "kube-api-access-ksf9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:17:25 crc kubenswrapper[4933]: I0122 07:17:25.655214 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90bd2b0d-a117-4d38-999a-ac51046bd69c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "90bd2b0d-a117-4d38-999a-ac51046bd69c" (UID: "90bd2b0d-a117-4d38-999a-ac51046bd69c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:17:25 crc kubenswrapper[4933]: I0122 07:17:25.708931 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ksf9m\" (UniqueName: \"kubernetes.io/projected/90bd2b0d-a117-4d38-999a-ac51046bd69c-kube-api-access-ksf9m\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:25 crc kubenswrapper[4933]: I0122 07:17:25.708971 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90bd2b0d-a117-4d38-999a-ac51046bd69c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:25 crc kubenswrapper[4933]: I0122 07:17:25.991809 4933 generic.go:334] "Generic (PLEG): container finished" podID="90bd2b0d-a117-4d38-999a-ac51046bd69c" containerID="fd95795de5d0f259eb224f86587b88841707d3505d5df2e8b700fe5b2aecad74" exitCode=0 Jan 22 07:17:25 crc kubenswrapper[4933]: I0122 07:17:25.991845 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sh4tn" event={"ID":"90bd2b0d-a117-4d38-999a-ac51046bd69c","Type":"ContainerDied","Data":"fd95795de5d0f259eb224f86587b88841707d3505d5df2e8b700fe5b2aecad74"} Jan 22 07:17:25 crc kubenswrapper[4933]: I0122 07:17:25.991988 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sh4tn" Jan 22 07:17:25 crc kubenswrapper[4933]: I0122 07:17:25.992836 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sh4tn" event={"ID":"90bd2b0d-a117-4d38-999a-ac51046bd69c","Type":"ContainerDied","Data":"33209d312cff85625b4cf7be405dd85b2580d07dc4cf1f160e367914cb6d383f"} Jan 22 07:17:25 crc kubenswrapper[4933]: I0122 07:17:25.993010 4933 scope.go:117] "RemoveContainer" containerID="fd95795de5d0f259eb224f86587b88841707d3505d5df2e8b700fe5b2aecad74" Jan 22 07:17:26 crc kubenswrapper[4933]: I0122 07:17:26.046177 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sh4tn"] Jan 22 07:17:26 crc kubenswrapper[4933]: I0122 07:17:26.047559 4933 scope.go:117] "RemoveContainer" containerID="a94ab46e662f2ccc5eb39b23b48955f59f19716b53c2bb4fbe06f834663a5364" Jan 22 07:17:26 crc kubenswrapper[4933]: I0122 07:17:26.055862 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-sh4tn"] Jan 22 07:17:26 crc kubenswrapper[4933]: I0122 07:17:26.073774 4933 scope.go:117] "RemoveContainer" containerID="6528df23d291da2b146adc976714eb43abd84cb19bb3f00bdebb0b9c91a1e8de" Jan 22 07:17:26 crc kubenswrapper[4933]: I0122 07:17:26.138601 4933 scope.go:117] "RemoveContainer" containerID="fd95795de5d0f259eb224f86587b88841707d3505d5df2e8b700fe5b2aecad74" Jan 22 07:17:26 crc kubenswrapper[4933]: E0122 07:17:26.139110 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd95795de5d0f259eb224f86587b88841707d3505d5df2e8b700fe5b2aecad74\": container with ID starting with fd95795de5d0f259eb224f86587b88841707d3505d5df2e8b700fe5b2aecad74 not found: ID does not exist" containerID="fd95795de5d0f259eb224f86587b88841707d3505d5df2e8b700fe5b2aecad74" Jan 22 07:17:26 crc kubenswrapper[4933]: I0122 07:17:26.139141 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd95795de5d0f259eb224f86587b88841707d3505d5df2e8b700fe5b2aecad74"} err="failed to get container status \"fd95795de5d0f259eb224f86587b88841707d3505d5df2e8b700fe5b2aecad74\": rpc error: code = NotFound desc = could not find container \"fd95795de5d0f259eb224f86587b88841707d3505d5df2e8b700fe5b2aecad74\": container with ID starting with fd95795de5d0f259eb224f86587b88841707d3505d5df2e8b700fe5b2aecad74 not found: ID does not exist" Jan 22 07:17:26 crc kubenswrapper[4933]: I0122 07:17:26.139165 4933 scope.go:117] "RemoveContainer" containerID="a94ab46e662f2ccc5eb39b23b48955f59f19716b53c2bb4fbe06f834663a5364" Jan 22 07:17:26 crc kubenswrapper[4933]: E0122 07:17:26.139501 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a94ab46e662f2ccc5eb39b23b48955f59f19716b53c2bb4fbe06f834663a5364\": container with ID starting with a94ab46e662f2ccc5eb39b23b48955f59f19716b53c2bb4fbe06f834663a5364 not found: ID does not exist" containerID="a94ab46e662f2ccc5eb39b23b48955f59f19716b53c2bb4fbe06f834663a5364" Jan 22 07:17:26 crc kubenswrapper[4933]: I0122 07:17:26.139520 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a94ab46e662f2ccc5eb39b23b48955f59f19716b53c2bb4fbe06f834663a5364"} err="failed to get container status \"a94ab46e662f2ccc5eb39b23b48955f59f19716b53c2bb4fbe06f834663a5364\": rpc error: code = NotFound desc = could not find container \"a94ab46e662f2ccc5eb39b23b48955f59f19716b53c2bb4fbe06f834663a5364\": container with ID starting with a94ab46e662f2ccc5eb39b23b48955f59f19716b53c2bb4fbe06f834663a5364 not found: ID does not exist" Jan 22 07:17:26 crc kubenswrapper[4933]: I0122 07:17:26.139536 4933 scope.go:117] "RemoveContainer" containerID="6528df23d291da2b146adc976714eb43abd84cb19bb3f00bdebb0b9c91a1e8de" Jan 22 07:17:26 crc kubenswrapper[4933]: E0122 07:17:26.139753 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6528df23d291da2b146adc976714eb43abd84cb19bb3f00bdebb0b9c91a1e8de\": container with ID starting with 6528df23d291da2b146adc976714eb43abd84cb19bb3f00bdebb0b9c91a1e8de not found: ID does not exist" containerID="6528df23d291da2b146adc976714eb43abd84cb19bb3f00bdebb0b9c91a1e8de" Jan 22 07:17:26 crc kubenswrapper[4933]: I0122 07:17:26.139771 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6528df23d291da2b146adc976714eb43abd84cb19bb3f00bdebb0b9c91a1e8de"} err="failed to get container status \"6528df23d291da2b146adc976714eb43abd84cb19bb3f00bdebb0b9c91a1e8de\": rpc error: code = NotFound desc = could not find container \"6528df23d291da2b146adc976714eb43abd84cb19bb3f00bdebb0b9c91a1e8de\": container with ID starting with 6528df23d291da2b146adc976714eb43abd84cb19bb3f00bdebb0b9c91a1e8de not found: ID does not exist" Jan 22 07:17:26 crc kubenswrapper[4933]: I0122 07:17:26.502696 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90bd2b0d-a117-4d38-999a-ac51046bd69c" path="/var/lib/kubelet/pods/90bd2b0d-a117-4d38-999a-ac51046bd69c/volumes" Jan 22 07:17:28 crc kubenswrapper[4933]: I0122 07:17:28.491364 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:17:28 crc kubenswrapper[4933]: E0122 07:17:28.492450 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:17:30 crc kubenswrapper[4933]: I0122 07:17:30.479112 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:17:30 crc kubenswrapper[4933]: I0122 07:17:30.533980 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-556d77dddf-j2dhs"] Jan 22 07:17:30 crc kubenswrapper[4933]: I0122 07:17:30.534254 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" podUID="7d3635eb-5e80-4d1b-b01b-ec8de50d7357" containerName="dnsmasq-dns" containerID="cri-o://32d75eb167ed591863169b95b4cebbb71b547d938f20085555d7a0d80d1d00b6" gracePeriod=10 Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.031982 4933 generic.go:334] "Generic (PLEG): container finished" podID="7d3635eb-5e80-4d1b-b01b-ec8de50d7357" containerID="32d75eb167ed591863169b95b4cebbb71b547d938f20085555d7a0d80d1d00b6" exitCode=0 Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.032120 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" event={"ID":"7d3635eb-5e80-4d1b-b01b-ec8de50d7357","Type":"ContainerDied","Data":"32d75eb167ed591863169b95b4cebbb71b547d938f20085555d7a0d80d1d00b6"} Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.032382 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" event={"ID":"7d3635eb-5e80-4d1b-b01b-ec8de50d7357","Type":"ContainerDied","Data":"377c9e1df9f56aad39e54305496366ff55f7e962253da1b540fcf96f2663eeeb"} Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.032400 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="377c9e1df9f56aad39e54305496366ff55f7e962253da1b540fcf96f2663eeeb" Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.043039 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.109778 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-dns-svc\") pod \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.109872 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pwk29\" (UniqueName: \"kubernetes.io/projected/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-kube-api-access-pwk29\") pod \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.109893 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-ovsdbserver-nb\") pod \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.109977 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-ovsdbserver-sb\") pod \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.109996 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-config\") pod \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\" (UID: \"7d3635eb-5e80-4d1b-b01b-ec8de50d7357\") " Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.114810 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-kube-api-access-pwk29" (OuterVolumeSpecName: "kube-api-access-pwk29") pod "7d3635eb-5e80-4d1b-b01b-ec8de50d7357" (UID: "7d3635eb-5e80-4d1b-b01b-ec8de50d7357"). InnerVolumeSpecName "kube-api-access-pwk29". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.149040 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7d3635eb-5e80-4d1b-b01b-ec8de50d7357" (UID: "7d3635eb-5e80-4d1b-b01b-ec8de50d7357"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.151629 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7d3635eb-5e80-4d1b-b01b-ec8de50d7357" (UID: "7d3635eb-5e80-4d1b-b01b-ec8de50d7357"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.152202 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7d3635eb-5e80-4d1b-b01b-ec8de50d7357" (UID: "7d3635eb-5e80-4d1b-b01b-ec8de50d7357"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.157558 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-config" (OuterVolumeSpecName: "config") pod "7d3635eb-5e80-4d1b-b01b-ec8de50d7357" (UID: "7d3635eb-5e80-4d1b-b01b-ec8de50d7357"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.212622 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.212656 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.212665 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.212674 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pwk29\" (UniqueName: \"kubernetes.io/projected/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-kube-api-access-pwk29\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:31 crc kubenswrapper[4933]: I0122 07:17:31.212683 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7d3635eb-5e80-4d1b-b01b-ec8de50d7357-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:32 crc kubenswrapper[4933]: I0122 07:17:32.043986 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-556d77dddf-j2dhs" Jan 22 07:17:32 crc kubenswrapper[4933]: I0122 07:17:32.101909 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-556d77dddf-j2dhs"] Jan 22 07:17:32 crc kubenswrapper[4933]: I0122 07:17:32.108144 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-556d77dddf-j2dhs"] Jan 22 07:17:32 crc kubenswrapper[4933]: I0122 07:17:32.501852 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d3635eb-5e80-4d1b-b01b-ec8de50d7357" path="/var/lib/kubelet/pods/7d3635eb-5e80-4d1b-b01b-ec8de50d7357/volumes" Jan 22 07:17:43 crc kubenswrapper[4933]: I0122 07:17:43.490673 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:17:43 crc kubenswrapper[4933]: E0122 07:17:43.491378 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:17:50 crc kubenswrapper[4933]: I0122 07:17:50.682379 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:53 crc kubenswrapper[4933]: I0122 07:17:53.095742 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5dd49c8657-ww7nj" Jan 22 07:17:53 crc kubenswrapper[4933]: I0122 07:17:53.180915 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7f8795948d-x6pzc"] Jan 22 07:17:53 crc kubenswrapper[4933]: I0122 07:17:53.181181 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7f8795948d-x6pzc" podUID="4cc6afe1-b20e-4a9c-be39-9b34fae99393" containerName="neutron-api" containerID="cri-o://ef991805163fcd84e7cd5c6692ac32ad31cbc609bf5328e87df993e12f4b8749" gracePeriod=30 Jan 22 07:17:53 crc kubenswrapper[4933]: I0122 07:17:53.181575 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7f8795948d-x6pzc" podUID="4cc6afe1-b20e-4a9c-be39-9b34fae99393" containerName="neutron-httpd" containerID="cri-o://96fb9e115a3f44636c33d5b2bcf1a8f82bae9e160c5ee5a631403461b9a4002b" gracePeriod=30 Jan 22 07:17:54 crc kubenswrapper[4933]: I0122 07:17:54.244978 4933 generic.go:334] "Generic (PLEG): container finished" podID="4cc6afe1-b20e-4a9c-be39-9b34fae99393" containerID="96fb9e115a3f44636c33d5b2bcf1a8f82bae9e160c5ee5a631403461b9a4002b" exitCode=0 Jan 22 07:17:54 crc kubenswrapper[4933]: I0122 07:17:54.245063 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f8795948d-x6pzc" event={"ID":"4cc6afe1-b20e-4a9c-be39-9b34fae99393","Type":"ContainerDied","Data":"96fb9e115a3f44636c33d5b2bcf1a8f82bae9e160c5ee5a631403461b9a4002b"} Jan 22 07:17:55 crc kubenswrapper[4933]: I0122 07:17:55.490972 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:17:55 crc kubenswrapper[4933]: E0122 07:17:55.491605 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:17:55 crc kubenswrapper[4933]: I0122 07:17:55.914361 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.060195 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-config\") pod \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.060489 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-ovndb-tls-certs\") pod \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.060685 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b894k\" (UniqueName: \"kubernetes.io/projected/4cc6afe1-b20e-4a9c-be39-9b34fae99393-kube-api-access-b894k\") pod \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.060768 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-httpd-config\") pod \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.060882 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-combined-ca-bundle\") pod \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\" (UID: \"4cc6afe1-b20e-4a9c-be39-9b34fae99393\") " Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.066062 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "4cc6afe1-b20e-4a9c-be39-9b34fae99393" (UID: "4cc6afe1-b20e-4a9c-be39-9b34fae99393"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.067170 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cc6afe1-b20e-4a9c-be39-9b34fae99393-kube-api-access-b894k" (OuterVolumeSpecName: "kube-api-access-b894k") pod "4cc6afe1-b20e-4a9c-be39-9b34fae99393" (UID: "4cc6afe1-b20e-4a9c-be39-9b34fae99393"). InnerVolumeSpecName "kube-api-access-b894k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.100477 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4cc6afe1-b20e-4a9c-be39-9b34fae99393" (UID: "4cc6afe1-b20e-4a9c-be39-9b34fae99393"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.105605 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-config" (OuterVolumeSpecName: "config") pod "4cc6afe1-b20e-4a9c-be39-9b34fae99393" (UID: "4cc6afe1-b20e-4a9c-be39-9b34fae99393"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.118722 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "4cc6afe1-b20e-4a9c-be39-9b34fae99393" (UID: "4cc6afe1-b20e-4a9c-be39-9b34fae99393"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.162788 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.162830 4933 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.162844 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b894k\" (UniqueName: \"kubernetes.io/projected/4cc6afe1-b20e-4a9c-be39-9b34fae99393-kube-api-access-b894k\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.162856 4933 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.162868 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4cc6afe1-b20e-4a9c-be39-9b34fae99393-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.264547 4933 generic.go:334] "Generic (PLEG): container finished" podID="4cc6afe1-b20e-4a9c-be39-9b34fae99393" containerID="ef991805163fcd84e7cd5c6692ac32ad31cbc609bf5328e87df993e12f4b8749" exitCode=0 Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.264604 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f8795948d-x6pzc" event={"ID":"4cc6afe1-b20e-4a9c-be39-9b34fae99393","Type":"ContainerDied","Data":"ef991805163fcd84e7cd5c6692ac32ad31cbc609bf5328e87df993e12f4b8749"} Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.264609 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f8795948d-x6pzc" Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.264641 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f8795948d-x6pzc" event={"ID":"4cc6afe1-b20e-4a9c-be39-9b34fae99393","Type":"ContainerDied","Data":"ae894575878fcb26d47927eb13dd5e745d1f0c4722e4e87202c759f36c4ea9ef"} Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.264665 4933 scope.go:117] "RemoveContainer" containerID="96fb9e115a3f44636c33d5b2bcf1a8f82bae9e160c5ee5a631403461b9a4002b" Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.296976 4933 scope.go:117] "RemoveContainer" containerID="ef991805163fcd84e7cd5c6692ac32ad31cbc609bf5328e87df993e12f4b8749" Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.301784 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7f8795948d-x6pzc"] Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.309265 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7f8795948d-x6pzc"] Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.320226 4933 scope.go:117] "RemoveContainer" containerID="96fb9e115a3f44636c33d5b2bcf1a8f82bae9e160c5ee5a631403461b9a4002b" Jan 22 07:17:56 crc kubenswrapper[4933]: E0122 07:17:56.320700 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96fb9e115a3f44636c33d5b2bcf1a8f82bae9e160c5ee5a631403461b9a4002b\": container with ID starting with 96fb9e115a3f44636c33d5b2bcf1a8f82bae9e160c5ee5a631403461b9a4002b not found: ID does not exist" containerID="96fb9e115a3f44636c33d5b2bcf1a8f82bae9e160c5ee5a631403461b9a4002b" Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.320737 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96fb9e115a3f44636c33d5b2bcf1a8f82bae9e160c5ee5a631403461b9a4002b"} err="failed to get container status \"96fb9e115a3f44636c33d5b2bcf1a8f82bae9e160c5ee5a631403461b9a4002b\": rpc error: code = NotFound desc = could not find container \"96fb9e115a3f44636c33d5b2bcf1a8f82bae9e160c5ee5a631403461b9a4002b\": container with ID starting with 96fb9e115a3f44636c33d5b2bcf1a8f82bae9e160c5ee5a631403461b9a4002b not found: ID does not exist" Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.320770 4933 scope.go:117] "RemoveContainer" containerID="ef991805163fcd84e7cd5c6692ac32ad31cbc609bf5328e87df993e12f4b8749" Jan 22 07:17:56 crc kubenswrapper[4933]: E0122 07:17:56.321259 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef991805163fcd84e7cd5c6692ac32ad31cbc609bf5328e87df993e12f4b8749\": container with ID starting with ef991805163fcd84e7cd5c6692ac32ad31cbc609bf5328e87df993e12f4b8749 not found: ID does not exist" containerID="ef991805163fcd84e7cd5c6692ac32ad31cbc609bf5328e87df993e12f4b8749" Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.321288 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef991805163fcd84e7cd5c6692ac32ad31cbc609bf5328e87df993e12f4b8749"} err="failed to get container status \"ef991805163fcd84e7cd5c6692ac32ad31cbc609bf5328e87df993e12f4b8749\": rpc error: code = NotFound desc = could not find container \"ef991805163fcd84e7cd5c6692ac32ad31cbc609bf5328e87df993e12f4b8749\": container with ID starting with ef991805163fcd84e7cd5c6692ac32ad31cbc609bf5328e87df993e12f4b8749 not found: ID does not exist" Jan 22 07:17:56 crc kubenswrapper[4933]: I0122 07:17:56.502304 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4cc6afe1-b20e-4a9c-be39-9b34fae99393" path="/var/lib/kubelet/pods/4cc6afe1-b20e-4a9c-be39-9b34fae99393/volumes" Jan 22 07:18:04 crc kubenswrapper[4933]: I0122 07:18:04.371055 4933 scope.go:117] "RemoveContainer" containerID="e6ce589238bebb9b42d774a4092b46b24aa2d1bab9aec7ef5f3f7ebb4c1bd621" Jan 22 07:18:06 crc kubenswrapper[4933]: I0122 07:18:06.491121 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:18:06 crc kubenswrapper[4933]: E0122 07:18:06.492005 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:18:17 crc kubenswrapper[4933]: I0122 07:18:17.490917 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:18:17 crc kubenswrapper[4933]: E0122 07:18:17.492953 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.112343 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-gtw5n"] Jan 22 07:18:25 crc kubenswrapper[4933]: E0122 07:18:25.113285 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cc6afe1-b20e-4a9c-be39-9b34fae99393" containerName="neutron-api" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.113305 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cc6afe1-b20e-4a9c-be39-9b34fae99393" containerName="neutron-api" Jan 22 07:18:25 crc kubenswrapper[4933]: E0122 07:18:25.113324 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d3635eb-5e80-4d1b-b01b-ec8de50d7357" containerName="init" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.113332 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d3635eb-5e80-4d1b-b01b-ec8de50d7357" containerName="init" Jan 22 07:18:25 crc kubenswrapper[4933]: E0122 07:18:25.113341 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d3635eb-5e80-4d1b-b01b-ec8de50d7357" containerName="dnsmasq-dns" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.113349 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d3635eb-5e80-4d1b-b01b-ec8de50d7357" containerName="dnsmasq-dns" Jan 22 07:18:25 crc kubenswrapper[4933]: E0122 07:18:25.113360 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90bd2b0d-a117-4d38-999a-ac51046bd69c" containerName="extract-content" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.113368 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="90bd2b0d-a117-4d38-999a-ac51046bd69c" containerName="extract-content" Jan 22 07:18:25 crc kubenswrapper[4933]: E0122 07:18:25.113390 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90bd2b0d-a117-4d38-999a-ac51046bd69c" containerName="registry-server" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.113396 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="90bd2b0d-a117-4d38-999a-ac51046bd69c" containerName="registry-server" Jan 22 07:18:25 crc kubenswrapper[4933]: E0122 07:18:25.113407 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cc6afe1-b20e-4a9c-be39-9b34fae99393" containerName="neutron-httpd" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.113414 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cc6afe1-b20e-4a9c-be39-9b34fae99393" containerName="neutron-httpd" Jan 22 07:18:25 crc kubenswrapper[4933]: E0122 07:18:25.113443 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90bd2b0d-a117-4d38-999a-ac51046bd69c" containerName="extract-utilities" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.113452 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="90bd2b0d-a117-4d38-999a-ac51046bd69c" containerName="extract-utilities" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.113622 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="90bd2b0d-a117-4d38-999a-ac51046bd69c" containerName="registry-server" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.113641 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d3635eb-5e80-4d1b-b01b-ec8de50d7357" containerName="dnsmasq-dns" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.113651 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cc6afe1-b20e-4a9c-be39-9b34fae99393" containerName="neutron-httpd" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.113676 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cc6afe1-b20e-4a9c-be39-9b34fae99393" containerName="neutron-api" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.114425 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.118508 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.118591 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.118720 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.118762 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.119146 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-tbdwm" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.129738 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-gtw5n"] Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.187240 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-ring-data-devices\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.187293 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-swiftconf\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.187315 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-etc-swift\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.187341 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-combined-ca-bundle\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.187368 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b66h4\" (UniqueName: \"kubernetes.io/projected/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-kube-api-access-b66h4\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.187444 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-dispersionconf\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.187471 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-scripts\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.198712 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8486788887-z5njj"] Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.201396 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.257906 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8486788887-z5njj"] Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.289370 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-ovsdbserver-sb\") pod \"dnsmasq-dns-8486788887-z5njj\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.289458 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-dispersionconf\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.289526 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-ovsdbserver-nb\") pod \"dnsmasq-dns-8486788887-z5njj\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.289556 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-scripts\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.289635 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-ring-data-devices\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.289689 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-swiftconf\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.289719 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-etc-swift\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.289775 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-combined-ca-bundle\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.289799 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-config\") pod \"dnsmasq-dns-8486788887-z5njj\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.289842 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b66h4\" (UniqueName: \"kubernetes.io/projected/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-kube-api-access-b66h4\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.289875 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lz5n\" (UniqueName: \"kubernetes.io/projected/f438afbc-73ea-44fc-856c-faf0ae11e192-kube-api-access-8lz5n\") pod \"dnsmasq-dns-8486788887-z5njj\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.289952 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-dns-svc\") pod \"dnsmasq-dns-8486788887-z5njj\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.292821 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-etc-swift\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.293537 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-scripts\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.293766 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-ring-data-devices\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.321273 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-combined-ca-bundle\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.322034 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-swiftconf\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.322440 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-dispersionconf\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.340141 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b66h4\" (UniqueName: \"kubernetes.io/projected/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-kube-api-access-b66h4\") pod \"swift-ring-rebalance-gtw5n\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.391839 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-ovsdbserver-nb\") pod \"dnsmasq-dns-8486788887-z5njj\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.391952 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-config\") pod \"dnsmasq-dns-8486788887-z5njj\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.391997 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lz5n\" (UniqueName: \"kubernetes.io/projected/f438afbc-73ea-44fc-856c-faf0ae11e192-kube-api-access-8lz5n\") pod \"dnsmasq-dns-8486788887-z5njj\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.392046 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-dns-svc\") pod \"dnsmasq-dns-8486788887-z5njj\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.392128 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-ovsdbserver-sb\") pod \"dnsmasq-dns-8486788887-z5njj\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.392754 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-ovsdbserver-nb\") pod \"dnsmasq-dns-8486788887-z5njj\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.392992 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-ovsdbserver-sb\") pod \"dnsmasq-dns-8486788887-z5njj\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.393562 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-config\") pod \"dnsmasq-dns-8486788887-z5njj\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.394097 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-dns-svc\") pod \"dnsmasq-dns-8486788887-z5njj\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.410868 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lz5n\" (UniqueName: \"kubernetes.io/projected/f438afbc-73ea-44fc-856c-faf0ae11e192-kube-api-access-8lz5n\") pod \"dnsmasq-dns-8486788887-z5njj\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.452572 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.519869 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:18:25 crc kubenswrapper[4933]: I0122 07:18:25.925615 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-gtw5n"] Jan 22 07:18:26 crc kubenswrapper[4933]: W0122 07:18:26.064941 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf438afbc_73ea_44fc_856c_faf0ae11e192.slice/crio-923227d0c4fa5ffe78917f885161d24cf6d4750f0f6267cf9c12b5e2bc2c096d WatchSource:0}: Error finding container 923227d0c4fa5ffe78917f885161d24cf6d4750f0f6267cf9c12b5e2bc2c096d: Status 404 returned error can't find the container with id 923227d0c4fa5ffe78917f885161d24cf6d4750f0f6267cf9c12b5e2bc2c096d Jan 22 07:18:26 crc kubenswrapper[4933]: I0122 07:18:26.065880 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8486788887-z5njj"] Jan 22 07:18:26 crc kubenswrapper[4933]: I0122 07:18:26.526877 4933 generic.go:334] "Generic (PLEG): container finished" podID="f438afbc-73ea-44fc-856c-faf0ae11e192" containerID="87eb03b0b7f322a1c2108b351a99636e927634210653b05810f1d11ead559cc5" exitCode=0 Jan 22 07:18:26 crc kubenswrapper[4933]: I0122 07:18:26.527203 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8486788887-z5njj" event={"ID":"f438afbc-73ea-44fc-856c-faf0ae11e192","Type":"ContainerDied","Data":"87eb03b0b7f322a1c2108b351a99636e927634210653b05810f1d11ead559cc5"} Jan 22 07:18:26 crc kubenswrapper[4933]: I0122 07:18:26.527361 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8486788887-z5njj" event={"ID":"f438afbc-73ea-44fc-856c-faf0ae11e192","Type":"ContainerStarted","Data":"923227d0c4fa5ffe78917f885161d24cf6d4750f0f6267cf9c12b5e2bc2c096d"} Jan 22 07:18:26 crc kubenswrapper[4933]: I0122 07:18:26.528803 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-gtw5n" event={"ID":"a2c3e8e6-9165-4104-b7d2-3940c4801bb3","Type":"ContainerStarted","Data":"9de03f2fd6ff20b1253a37573c8282bd0942fa4a5956a56bf332dc0219abcef8"} Jan 22 07:18:26 crc kubenswrapper[4933]: I0122 07:18:26.528838 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-gtw5n" event={"ID":"a2c3e8e6-9165-4104-b7d2-3940c4801bb3","Type":"ContainerStarted","Data":"f4fc308a31a2c3418740cc65f16c53fec4e5782e58e6484e366a947e0344086b"} Jan 22 07:18:26 crc kubenswrapper[4933]: I0122 07:18:26.567897 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-gtw5n" podStartSLOduration=1.5678728020000001 podStartE2EDuration="1.567872802s" podCreationTimestamp="2026-01-22 07:18:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:18:26.567483213 +0000 UTC m=+5554.404608596" watchObservedRunningTime="2026-01-22 07:18:26.567872802 +0000 UTC m=+5554.404998195" Jan 22 07:18:27 crc kubenswrapper[4933]: I0122 07:18:27.539751 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8486788887-z5njj" event={"ID":"f438afbc-73ea-44fc-856c-faf0ae11e192","Type":"ContainerStarted","Data":"3f5c96235c4a2933af26512896b90574e1936ab976c41c7652a2226bcf9daf35"} Jan 22 07:18:27 crc kubenswrapper[4933]: I0122 07:18:27.575581 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8486788887-z5njj" podStartSLOduration=2.575552129 podStartE2EDuration="2.575552129s" podCreationTimestamp="2026-01-22 07:18:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:18:27.562015669 +0000 UTC m=+5555.399141062" watchObservedRunningTime="2026-01-22 07:18:27.575552129 +0000 UTC m=+5555.412677502" Jan 22 07:18:27 crc kubenswrapper[4933]: I0122 07:18:27.896615 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-689f4477cd-5248v"] Jan 22 07:18:27 crc kubenswrapper[4933]: I0122 07:18:27.899035 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:27 crc kubenswrapper[4933]: I0122 07:18:27.902768 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 22 07:18:27 crc kubenswrapper[4933]: I0122 07:18:27.928727 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-689f4477cd-5248v"] Jan 22 07:18:27 crc kubenswrapper[4933]: I0122 07:18:27.972463 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-etc-swift\") pod \"swift-proxy-689f4477cd-5248v\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:27 crc kubenswrapper[4933]: I0122 07:18:27.972540 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-config-data\") pod \"swift-proxy-689f4477cd-5248v\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:27 crc kubenswrapper[4933]: I0122 07:18:27.972586 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-run-httpd\") pod \"swift-proxy-689f4477cd-5248v\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:27 crc kubenswrapper[4933]: I0122 07:18:27.972613 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-log-httpd\") pod \"swift-proxy-689f4477cd-5248v\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:27 crc kubenswrapper[4933]: I0122 07:18:27.972638 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fshp\" (UniqueName: \"kubernetes.io/projected/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-kube-api-access-5fshp\") pod \"swift-proxy-689f4477cd-5248v\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:27 crc kubenswrapper[4933]: I0122 07:18:27.972678 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-combined-ca-bundle\") pod \"swift-proxy-689f4477cd-5248v\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:28 crc kubenswrapper[4933]: I0122 07:18:28.073904 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-run-httpd\") pod \"swift-proxy-689f4477cd-5248v\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:28 crc kubenswrapper[4933]: I0122 07:18:28.073965 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-log-httpd\") pod \"swift-proxy-689f4477cd-5248v\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:28 crc kubenswrapper[4933]: I0122 07:18:28.073996 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fshp\" (UniqueName: \"kubernetes.io/projected/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-kube-api-access-5fshp\") pod \"swift-proxy-689f4477cd-5248v\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:28 crc kubenswrapper[4933]: I0122 07:18:28.074041 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-combined-ca-bundle\") pod \"swift-proxy-689f4477cd-5248v\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:28 crc kubenswrapper[4933]: I0122 07:18:28.074063 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-etc-swift\") pod \"swift-proxy-689f4477cd-5248v\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:28 crc kubenswrapper[4933]: I0122 07:18:28.074132 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-config-data\") pod \"swift-proxy-689f4477cd-5248v\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:28 crc kubenswrapper[4933]: I0122 07:18:28.074470 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-log-httpd\") pod \"swift-proxy-689f4477cd-5248v\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:28 crc kubenswrapper[4933]: I0122 07:18:28.074536 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-run-httpd\") pod \"swift-proxy-689f4477cd-5248v\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:28 crc kubenswrapper[4933]: I0122 07:18:28.086562 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-etc-swift\") pod \"swift-proxy-689f4477cd-5248v\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:28 crc kubenswrapper[4933]: I0122 07:18:28.086915 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-config-data\") pod \"swift-proxy-689f4477cd-5248v\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:28 crc kubenswrapper[4933]: I0122 07:18:28.087774 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-combined-ca-bundle\") pod \"swift-proxy-689f4477cd-5248v\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:28 crc kubenswrapper[4933]: I0122 07:18:28.090696 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fshp\" (UniqueName: \"kubernetes.io/projected/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-kube-api-access-5fshp\") pod \"swift-proxy-689f4477cd-5248v\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:28 crc kubenswrapper[4933]: I0122 07:18:28.231432 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:28 crc kubenswrapper[4933]: I0122 07:18:28.548161 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:18:28 crc kubenswrapper[4933]: W0122 07:18:28.671369 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1ff49dac_4a23_4973_9473_dbcb0b6dff4f.slice/crio-308726674e44c75534ade929bf14a505a4b3d758c999c25ae6a66bb61adc997f WatchSource:0}: Error finding container 308726674e44c75534ade929bf14a505a4b3d758c999c25ae6a66bb61adc997f: Status 404 returned error can't find the container with id 308726674e44c75534ade929bf14a505a4b3d758c999c25ae6a66bb61adc997f Jan 22 07:18:28 crc kubenswrapper[4933]: I0122 07:18:28.673356 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-689f4477cd-5248v"] Jan 22 07:18:29 crc kubenswrapper[4933]: I0122 07:18:29.490771 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:18:29 crc kubenswrapper[4933]: E0122 07:18:29.492072 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:18:29 crc kubenswrapper[4933]: I0122 07:18:29.558185 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-689f4477cd-5248v" event={"ID":"1ff49dac-4a23-4973-9473-dbcb0b6dff4f","Type":"ContainerStarted","Data":"4bc2420c4976e77e919852914fe6c63d2fbfed869b1266afd0723ce18e5f18a9"} Jan 22 07:18:29 crc kubenswrapper[4933]: I0122 07:18:29.559153 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-689f4477cd-5248v" event={"ID":"1ff49dac-4a23-4973-9473-dbcb0b6dff4f","Type":"ContainerStarted","Data":"6fc4d3bb6128ce57c3ec56aafb111be09f0eb92b0aa49686abbb27229cf831dc"} Jan 22 07:18:29 crc kubenswrapper[4933]: I0122 07:18:29.559276 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-689f4477cd-5248v" event={"ID":"1ff49dac-4a23-4973-9473-dbcb0b6dff4f","Type":"ContainerStarted","Data":"308726674e44c75534ade929bf14a505a4b3d758c999c25ae6a66bb61adc997f"} Jan 22 07:18:29 crc kubenswrapper[4933]: I0122 07:18:29.559349 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:29 crc kubenswrapper[4933]: I0122 07:18:29.559416 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:29 crc kubenswrapper[4933]: I0122 07:18:29.580036 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-689f4477cd-5248v" podStartSLOduration=2.580016186 podStartE2EDuration="2.580016186s" podCreationTimestamp="2026-01-22 07:18:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:18:29.576547282 +0000 UTC m=+5557.413672635" watchObservedRunningTime="2026-01-22 07:18:29.580016186 +0000 UTC m=+5557.417141539" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.569267 4933 generic.go:334] "Generic (PLEG): container finished" podID="a2c3e8e6-9165-4104-b7d2-3940c4801bb3" containerID="9de03f2fd6ff20b1253a37573c8282bd0942fa4a5956a56bf332dc0219abcef8" exitCode=0 Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.569342 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-gtw5n" event={"ID":"a2c3e8e6-9165-4104-b7d2-3940c4801bb3","Type":"ContainerDied","Data":"9de03f2fd6ff20b1253a37573c8282bd0942fa4a5956a56bf332dc0219abcef8"} Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.630923 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-65dd78db8b-67tnw"] Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.632741 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.635857 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.636046 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.638466 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-65dd78db8b-67tnw"] Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.826389 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37f95090-a891-4563-adcf-7aa34d7ff34c-public-tls-certs\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.826456 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37f95090-a891-4563-adcf-7aa34d7ff34c-log-httpd\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.826489 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/37f95090-a891-4563-adcf-7aa34d7ff34c-internal-tls-certs\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.826514 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37f95090-a891-4563-adcf-7aa34d7ff34c-run-httpd\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.826584 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37f95090-a891-4563-adcf-7aa34d7ff34c-config-data\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.826618 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/37f95090-a891-4563-adcf-7aa34d7ff34c-etc-swift\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.826663 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wr295\" (UniqueName: \"kubernetes.io/projected/37f95090-a891-4563-adcf-7aa34d7ff34c-kube-api-access-wr295\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.826751 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37f95090-a891-4563-adcf-7aa34d7ff34c-combined-ca-bundle\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.928579 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/37f95090-a891-4563-adcf-7aa34d7ff34c-etc-swift\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.928663 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wr295\" (UniqueName: \"kubernetes.io/projected/37f95090-a891-4563-adcf-7aa34d7ff34c-kube-api-access-wr295\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.928744 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37f95090-a891-4563-adcf-7aa34d7ff34c-combined-ca-bundle\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.928788 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37f95090-a891-4563-adcf-7aa34d7ff34c-public-tls-certs\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.928810 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37f95090-a891-4563-adcf-7aa34d7ff34c-log-httpd\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.928830 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/37f95090-a891-4563-adcf-7aa34d7ff34c-internal-tls-certs\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.928865 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37f95090-a891-4563-adcf-7aa34d7ff34c-run-httpd\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.928923 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37f95090-a891-4563-adcf-7aa34d7ff34c-config-data\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.929521 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37f95090-a891-4563-adcf-7aa34d7ff34c-run-httpd\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.929852 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37f95090-a891-4563-adcf-7aa34d7ff34c-log-httpd\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.934294 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37f95090-a891-4563-adcf-7aa34d7ff34c-combined-ca-bundle\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.934589 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37f95090-a891-4563-adcf-7aa34d7ff34c-public-tls-certs\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.934628 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/37f95090-a891-4563-adcf-7aa34d7ff34c-internal-tls-certs\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.939218 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/37f95090-a891-4563-adcf-7aa34d7ff34c-etc-swift\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.946880 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37f95090-a891-4563-adcf-7aa34d7ff34c-config-data\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.947859 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wr295\" (UniqueName: \"kubernetes.io/projected/37f95090-a891-4563-adcf-7aa34d7ff34c-kube-api-access-wr295\") pod \"swift-proxy-65dd78db8b-67tnw\" (UID: \"37f95090-a891-4563-adcf-7aa34d7ff34c\") " pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:30 crc kubenswrapper[4933]: I0122 07:18:30.948282 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:31 crc kubenswrapper[4933]: I0122 07:18:31.597461 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-65dd78db8b-67tnw"] Jan 22 07:18:31 crc kubenswrapper[4933]: I0122 07:18:31.853853 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.045973 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b66h4\" (UniqueName: \"kubernetes.io/projected/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-kube-api-access-b66h4\") pod \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.046046 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-etc-swift\") pod \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.046108 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-scripts\") pod \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.046133 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-swiftconf\") pod \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.046150 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-combined-ca-bundle\") pod \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.046200 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-dispersionconf\") pod \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.046242 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-ring-data-devices\") pod \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\" (UID: \"a2c3e8e6-9165-4104-b7d2-3940c4801bb3\") " Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.047387 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "a2c3e8e6-9165-4104-b7d2-3940c4801bb3" (UID: "a2c3e8e6-9165-4104-b7d2-3940c4801bb3"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.047443 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "a2c3e8e6-9165-4104-b7d2-3940c4801bb3" (UID: "a2c3e8e6-9165-4104-b7d2-3940c4801bb3"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.063892 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-kube-api-access-b66h4" (OuterVolumeSpecName: "kube-api-access-b66h4") pod "a2c3e8e6-9165-4104-b7d2-3940c4801bb3" (UID: "a2c3e8e6-9165-4104-b7d2-3940c4801bb3"). InnerVolumeSpecName "kube-api-access-b66h4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.068140 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "a2c3e8e6-9165-4104-b7d2-3940c4801bb3" (UID: "a2c3e8e6-9165-4104-b7d2-3940c4801bb3"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.071531 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-scripts" (OuterVolumeSpecName: "scripts") pod "a2c3e8e6-9165-4104-b7d2-3940c4801bb3" (UID: "a2c3e8e6-9165-4104-b7d2-3940c4801bb3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.072094 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a2c3e8e6-9165-4104-b7d2-3940c4801bb3" (UID: "a2c3e8e6-9165-4104-b7d2-3940c4801bb3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.074399 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "a2c3e8e6-9165-4104-b7d2-3940c4801bb3" (UID: "a2c3e8e6-9165-4104-b7d2-3940c4801bb3"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.147950 4933 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.147986 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b66h4\" (UniqueName: \"kubernetes.io/projected/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-kube-api-access-b66h4\") on node \"crc\" DevicePath \"\"" Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.147999 4933 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.148009 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.148018 4933 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.148026 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.148034 4933 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a2c3e8e6-9165-4104-b7d2-3940c4801bb3-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.585555 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-gtw5n" Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.585555 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-gtw5n" event={"ID":"a2c3e8e6-9165-4104-b7d2-3940c4801bb3","Type":"ContainerDied","Data":"f4fc308a31a2c3418740cc65f16c53fec4e5782e58e6484e366a947e0344086b"} Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.585948 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f4fc308a31a2c3418740cc65f16c53fec4e5782e58e6484e366a947e0344086b" Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.587228 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-65dd78db8b-67tnw" event={"ID":"37f95090-a891-4563-adcf-7aa34d7ff34c","Type":"ContainerStarted","Data":"bd6c82c824bce9742e63e813b6bc767fb737f8d98a2aed936d7e55f9a9dc5a36"} Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.587256 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-65dd78db8b-67tnw" event={"ID":"37f95090-a891-4563-adcf-7aa34d7ff34c","Type":"ContainerStarted","Data":"08fd5d435c78be109ad03deb60d8e6739820d90ac9c8e81b2d9576b7c006c2b0"} Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.587266 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-65dd78db8b-67tnw" event={"ID":"37f95090-a891-4563-adcf-7aa34d7ff34c","Type":"ContainerStarted","Data":"49106d0e52a32881781e7bd3077bd548c063cbbfe85577d50f7f2337718c93d6"} Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.587409 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:32 crc kubenswrapper[4933]: I0122 07:18:32.611973 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-65dd78db8b-67tnw" podStartSLOduration=2.611955012 podStartE2EDuration="2.611955012s" podCreationTimestamp="2026-01-22 07:18:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:18:32.609500912 +0000 UTC m=+5560.446626275" watchObservedRunningTime="2026-01-22 07:18:32.611955012 +0000 UTC m=+5560.449080365" Jan 22 07:18:33 crc kubenswrapper[4933]: I0122 07:18:33.252611 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:33 crc kubenswrapper[4933]: I0122 07:18:33.595838 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:35 crc kubenswrapper[4933]: I0122 07:18:35.522340 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:18:35 crc kubenswrapper[4933]: I0122 07:18:35.594659 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6847fd6c7c-m4jnx"] Jan 22 07:18:35 crc kubenswrapper[4933]: I0122 07:18:35.595227 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" podUID="34e8f6f8-ea53-4956-9820-41ffb518d03d" containerName="dnsmasq-dns" containerID="cri-o://4c377764b65fc991ea343e30b1110eb624a78bf9195fcddbc4472f3309c3edca" gracePeriod=10 Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.051443 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.221647 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rh2qt\" (UniqueName: \"kubernetes.io/projected/34e8f6f8-ea53-4956-9820-41ffb518d03d-kube-api-access-rh2qt\") pod \"34e8f6f8-ea53-4956-9820-41ffb518d03d\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.221738 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-dns-svc\") pod \"34e8f6f8-ea53-4956-9820-41ffb518d03d\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.221786 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-ovsdbserver-sb\") pod \"34e8f6f8-ea53-4956-9820-41ffb518d03d\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.221850 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-config\") pod \"34e8f6f8-ea53-4956-9820-41ffb518d03d\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.221882 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-ovsdbserver-nb\") pod \"34e8f6f8-ea53-4956-9820-41ffb518d03d\" (UID: \"34e8f6f8-ea53-4956-9820-41ffb518d03d\") " Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.238619 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34e8f6f8-ea53-4956-9820-41ffb518d03d-kube-api-access-rh2qt" (OuterVolumeSpecName: "kube-api-access-rh2qt") pod "34e8f6f8-ea53-4956-9820-41ffb518d03d" (UID: "34e8f6f8-ea53-4956-9820-41ffb518d03d"). InnerVolumeSpecName "kube-api-access-rh2qt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.271857 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "34e8f6f8-ea53-4956-9820-41ffb518d03d" (UID: "34e8f6f8-ea53-4956-9820-41ffb518d03d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.278934 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-config" (OuterVolumeSpecName: "config") pod "34e8f6f8-ea53-4956-9820-41ffb518d03d" (UID: "34e8f6f8-ea53-4956-9820-41ffb518d03d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.281108 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "34e8f6f8-ea53-4956-9820-41ffb518d03d" (UID: "34e8f6f8-ea53-4956-9820-41ffb518d03d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.289289 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "34e8f6f8-ea53-4956-9820-41ffb518d03d" (UID: "34e8f6f8-ea53-4956-9820-41ffb518d03d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.324144 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rh2qt\" (UniqueName: \"kubernetes.io/projected/34e8f6f8-ea53-4956-9820-41ffb518d03d-kube-api-access-rh2qt\") on node \"crc\" DevicePath \"\"" Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.324186 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.324201 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.324213 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.324226 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/34e8f6f8-ea53-4956-9820-41ffb518d03d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.626484 4933 generic.go:334] "Generic (PLEG): container finished" podID="34e8f6f8-ea53-4956-9820-41ffb518d03d" containerID="4c377764b65fc991ea343e30b1110eb624a78bf9195fcddbc4472f3309c3edca" exitCode=0 Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.626580 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" event={"ID":"34e8f6f8-ea53-4956-9820-41ffb518d03d","Type":"ContainerDied","Data":"4c377764b65fc991ea343e30b1110eb624a78bf9195fcddbc4472f3309c3edca"} Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.626595 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.626633 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6847fd6c7c-m4jnx" event={"ID":"34e8f6f8-ea53-4956-9820-41ffb518d03d","Type":"ContainerDied","Data":"778e6403c225e84737746ca08f371bf229bbacc03792b23d53a708facb54f44b"} Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.626698 4933 scope.go:117] "RemoveContainer" containerID="4c377764b65fc991ea343e30b1110eb624a78bf9195fcddbc4472f3309c3edca" Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.654618 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6847fd6c7c-m4jnx"] Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.661225 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6847fd6c7c-m4jnx"] Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.664258 4933 scope.go:117] "RemoveContainer" containerID="8103d2603b2a8553d944224164a70fcc82b00b3c60a19a9cb1fdff48b0c5f86b" Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.691360 4933 scope.go:117] "RemoveContainer" containerID="4c377764b65fc991ea343e30b1110eb624a78bf9195fcddbc4472f3309c3edca" Jan 22 07:18:36 crc kubenswrapper[4933]: E0122 07:18:36.691835 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c377764b65fc991ea343e30b1110eb624a78bf9195fcddbc4472f3309c3edca\": container with ID starting with 4c377764b65fc991ea343e30b1110eb624a78bf9195fcddbc4472f3309c3edca not found: ID does not exist" containerID="4c377764b65fc991ea343e30b1110eb624a78bf9195fcddbc4472f3309c3edca" Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.691869 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c377764b65fc991ea343e30b1110eb624a78bf9195fcddbc4472f3309c3edca"} err="failed to get container status \"4c377764b65fc991ea343e30b1110eb624a78bf9195fcddbc4472f3309c3edca\": rpc error: code = NotFound desc = could not find container \"4c377764b65fc991ea343e30b1110eb624a78bf9195fcddbc4472f3309c3edca\": container with ID starting with 4c377764b65fc991ea343e30b1110eb624a78bf9195fcddbc4472f3309c3edca not found: ID does not exist" Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.691891 4933 scope.go:117] "RemoveContainer" containerID="8103d2603b2a8553d944224164a70fcc82b00b3c60a19a9cb1fdff48b0c5f86b" Jan 22 07:18:36 crc kubenswrapper[4933]: E0122 07:18:36.692216 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8103d2603b2a8553d944224164a70fcc82b00b3c60a19a9cb1fdff48b0c5f86b\": container with ID starting with 8103d2603b2a8553d944224164a70fcc82b00b3c60a19a9cb1fdff48b0c5f86b not found: ID does not exist" containerID="8103d2603b2a8553d944224164a70fcc82b00b3c60a19a9cb1fdff48b0c5f86b" Jan 22 07:18:36 crc kubenswrapper[4933]: I0122 07:18:36.692257 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8103d2603b2a8553d944224164a70fcc82b00b3c60a19a9cb1fdff48b0c5f86b"} err="failed to get container status \"8103d2603b2a8553d944224164a70fcc82b00b3c60a19a9cb1fdff48b0c5f86b\": rpc error: code = NotFound desc = could not find container \"8103d2603b2a8553d944224164a70fcc82b00b3c60a19a9cb1fdff48b0c5f86b\": container with ID starting with 8103d2603b2a8553d944224164a70fcc82b00b3c60a19a9cb1fdff48b0c5f86b not found: ID does not exist" Jan 22 07:18:38 crc kubenswrapper[4933]: I0122 07:18:38.282610 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:38 crc kubenswrapper[4933]: I0122 07:18:38.499744 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34e8f6f8-ea53-4956-9820-41ffb518d03d" path="/var/lib/kubelet/pods/34e8f6f8-ea53-4956-9820-41ffb518d03d/volumes" Jan 22 07:18:40 crc kubenswrapper[4933]: I0122 07:18:40.955920 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:40 crc kubenswrapper[4933]: I0122 07:18:40.959734 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-65dd78db8b-67tnw" Jan 22 07:18:41 crc kubenswrapper[4933]: I0122 07:18:41.045037 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-689f4477cd-5248v"] Jan 22 07:18:41 crc kubenswrapper[4933]: I0122 07:18:41.048056 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-689f4477cd-5248v" podUID="1ff49dac-4a23-4973-9473-dbcb0b6dff4f" containerName="proxy-httpd" containerID="cri-o://6fc4d3bb6128ce57c3ec56aafb111be09f0eb92b0aa49686abbb27229cf831dc" gracePeriod=30 Jan 22 07:18:41 crc kubenswrapper[4933]: I0122 07:18:41.048517 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-689f4477cd-5248v" podUID="1ff49dac-4a23-4973-9473-dbcb0b6dff4f" containerName="proxy-server" containerID="cri-o://4bc2420c4976e77e919852914fe6c63d2fbfed869b1266afd0723ce18e5f18a9" gracePeriod=30 Jan 22 07:18:41 crc kubenswrapper[4933]: I0122 07:18:41.676410 4933 generic.go:334] "Generic (PLEG): container finished" podID="1ff49dac-4a23-4973-9473-dbcb0b6dff4f" containerID="6fc4d3bb6128ce57c3ec56aafb111be09f0eb92b0aa49686abbb27229cf831dc" exitCode=0 Jan 22 07:18:41 crc kubenswrapper[4933]: I0122 07:18:41.677767 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-689f4477cd-5248v" event={"ID":"1ff49dac-4a23-4973-9473-dbcb0b6dff4f","Type":"ContainerDied","Data":"6fc4d3bb6128ce57c3ec56aafb111be09f0eb92b0aa49686abbb27229cf831dc"} Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.375807 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.531225 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-run-httpd\") pod \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.531263 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-config-data\") pod \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.531626 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1ff49dac-4a23-4973-9473-dbcb0b6dff4f" (UID: "1ff49dac-4a23-4973-9473-dbcb0b6dff4f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.532186 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-log-httpd\") pod \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.532246 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-combined-ca-bundle\") pod \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.532275 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5fshp\" (UniqueName: \"kubernetes.io/projected/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-kube-api-access-5fshp\") pod \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.532322 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-etc-swift\") pod \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\" (UID: \"1ff49dac-4a23-4973-9473-dbcb0b6dff4f\") " Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.532625 4933 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.532680 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1ff49dac-4a23-4973-9473-dbcb0b6dff4f" (UID: "1ff49dac-4a23-4973-9473-dbcb0b6dff4f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.536693 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-kube-api-access-5fshp" (OuterVolumeSpecName: "kube-api-access-5fshp") pod "1ff49dac-4a23-4973-9473-dbcb0b6dff4f" (UID: "1ff49dac-4a23-4973-9473-dbcb0b6dff4f"). InnerVolumeSpecName "kube-api-access-5fshp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.536844 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "1ff49dac-4a23-4973-9473-dbcb0b6dff4f" (UID: "1ff49dac-4a23-4973-9473-dbcb0b6dff4f"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.588154 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1ff49dac-4a23-4973-9473-dbcb0b6dff4f" (UID: "1ff49dac-4a23-4973-9473-dbcb0b6dff4f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.593936 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-config-data" (OuterVolumeSpecName: "config-data") pod "1ff49dac-4a23-4973-9473-dbcb0b6dff4f" (UID: "1ff49dac-4a23-4973-9473-dbcb0b6dff4f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.633696 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.633728 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5fshp\" (UniqueName: \"kubernetes.io/projected/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-kube-api-access-5fshp\") on node \"crc\" DevicePath \"\"" Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.633739 4933 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.633748 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.633756 4933 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ff49dac-4a23-4973-9473-dbcb0b6dff4f-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.684697 4933 generic.go:334] "Generic (PLEG): container finished" podID="1ff49dac-4a23-4973-9473-dbcb0b6dff4f" containerID="4bc2420c4976e77e919852914fe6c63d2fbfed869b1266afd0723ce18e5f18a9" exitCode=0 Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.684750 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-689f4477cd-5248v" event={"ID":"1ff49dac-4a23-4973-9473-dbcb0b6dff4f","Type":"ContainerDied","Data":"4bc2420c4976e77e919852914fe6c63d2fbfed869b1266afd0723ce18e5f18a9"} Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.684782 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-689f4477cd-5248v" event={"ID":"1ff49dac-4a23-4973-9473-dbcb0b6dff4f","Type":"ContainerDied","Data":"308726674e44c75534ade929bf14a505a4b3d758c999c25ae6a66bb61adc997f"} Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.684801 4933 scope.go:117] "RemoveContainer" containerID="4bc2420c4976e77e919852914fe6c63d2fbfed869b1266afd0723ce18e5f18a9" Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.684951 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-689f4477cd-5248v" Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.707346 4933 scope.go:117] "RemoveContainer" containerID="6fc4d3bb6128ce57c3ec56aafb111be09f0eb92b0aa49686abbb27229cf831dc" Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.718754 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-689f4477cd-5248v"] Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.725713 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-689f4477cd-5248v"] Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.730234 4933 scope.go:117] "RemoveContainer" containerID="4bc2420c4976e77e919852914fe6c63d2fbfed869b1266afd0723ce18e5f18a9" Jan 22 07:18:42 crc kubenswrapper[4933]: E0122 07:18:42.731499 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bc2420c4976e77e919852914fe6c63d2fbfed869b1266afd0723ce18e5f18a9\": container with ID starting with 4bc2420c4976e77e919852914fe6c63d2fbfed869b1266afd0723ce18e5f18a9 not found: ID does not exist" containerID="4bc2420c4976e77e919852914fe6c63d2fbfed869b1266afd0723ce18e5f18a9" Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.731540 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bc2420c4976e77e919852914fe6c63d2fbfed869b1266afd0723ce18e5f18a9"} err="failed to get container status \"4bc2420c4976e77e919852914fe6c63d2fbfed869b1266afd0723ce18e5f18a9\": rpc error: code = NotFound desc = could not find container \"4bc2420c4976e77e919852914fe6c63d2fbfed869b1266afd0723ce18e5f18a9\": container with ID starting with 4bc2420c4976e77e919852914fe6c63d2fbfed869b1266afd0723ce18e5f18a9 not found: ID does not exist" Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.731567 4933 scope.go:117] "RemoveContainer" containerID="6fc4d3bb6128ce57c3ec56aafb111be09f0eb92b0aa49686abbb27229cf831dc" Jan 22 07:18:42 crc kubenswrapper[4933]: E0122 07:18:42.731897 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6fc4d3bb6128ce57c3ec56aafb111be09f0eb92b0aa49686abbb27229cf831dc\": container with ID starting with 6fc4d3bb6128ce57c3ec56aafb111be09f0eb92b0aa49686abbb27229cf831dc not found: ID does not exist" containerID="6fc4d3bb6128ce57c3ec56aafb111be09f0eb92b0aa49686abbb27229cf831dc" Jan 22 07:18:42 crc kubenswrapper[4933]: I0122 07:18:42.731942 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fc4d3bb6128ce57c3ec56aafb111be09f0eb92b0aa49686abbb27229cf831dc"} err="failed to get container status \"6fc4d3bb6128ce57c3ec56aafb111be09f0eb92b0aa49686abbb27229cf831dc\": rpc error: code = NotFound desc = could not find container \"6fc4d3bb6128ce57c3ec56aafb111be09f0eb92b0aa49686abbb27229cf831dc\": container with ID starting with 6fc4d3bb6128ce57c3ec56aafb111be09f0eb92b0aa49686abbb27229cf831dc not found: ID does not exist" Jan 22 07:18:44 crc kubenswrapper[4933]: I0122 07:18:44.490525 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:18:44 crc kubenswrapper[4933]: E0122 07:18:44.491034 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:18:44 crc kubenswrapper[4933]: I0122 07:18:44.509802 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ff49dac-4a23-4973-9473-dbcb0b6dff4f" path="/var/lib/kubelet/pods/1ff49dac-4a23-4973-9473-dbcb0b6dff4f/volumes" Jan 22 07:18:59 crc kubenswrapper[4933]: I0122 07:18:59.491200 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:18:59 crc kubenswrapper[4933]: E0122 07:18:59.491963 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:19:04 crc kubenswrapper[4933]: I0122 07:19:04.466183 4933 scope.go:117] "RemoveContainer" containerID="45f34f6b3c271c0eb074fa62ad30f465b0f43458294c117d97e75e350c4dbdb3" Jan 22 07:19:04 crc kubenswrapper[4933]: I0122 07:19:04.505673 4933 scope.go:117] "RemoveContainer" containerID="ec113587b5bfed50751f8fa7295d74522bfa44dcde3e2c5a134daf030b7d26d3" Jan 22 07:19:10 crc kubenswrapper[4933]: I0122 07:19:10.490891 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:19:10 crc kubenswrapper[4933]: E0122 07:19:10.491605 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.443003 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-lgf6v"] Jan 22 07:19:13 crc kubenswrapper[4933]: E0122 07:19:13.443679 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ff49dac-4a23-4973-9473-dbcb0b6dff4f" containerName="proxy-httpd" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.443695 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ff49dac-4a23-4973-9473-dbcb0b6dff4f" containerName="proxy-httpd" Jan 22 07:19:13 crc kubenswrapper[4933]: E0122 07:19:13.443712 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34e8f6f8-ea53-4956-9820-41ffb518d03d" containerName="dnsmasq-dns" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.443721 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="34e8f6f8-ea53-4956-9820-41ffb518d03d" containerName="dnsmasq-dns" Jan 22 07:19:13 crc kubenswrapper[4933]: E0122 07:19:13.443735 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ff49dac-4a23-4973-9473-dbcb0b6dff4f" containerName="proxy-server" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.443743 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ff49dac-4a23-4973-9473-dbcb0b6dff4f" containerName="proxy-server" Jan 22 07:19:13 crc kubenswrapper[4933]: E0122 07:19:13.443762 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2c3e8e6-9165-4104-b7d2-3940c4801bb3" containerName="swift-ring-rebalance" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.443770 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2c3e8e6-9165-4104-b7d2-3940c4801bb3" containerName="swift-ring-rebalance" Jan 22 07:19:13 crc kubenswrapper[4933]: E0122 07:19:13.443802 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34e8f6f8-ea53-4956-9820-41ffb518d03d" containerName="init" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.443811 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="34e8f6f8-ea53-4956-9820-41ffb518d03d" containerName="init" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.444015 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2c3e8e6-9165-4104-b7d2-3940c4801bb3" containerName="swift-ring-rebalance" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.444037 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ff49dac-4a23-4973-9473-dbcb0b6dff4f" containerName="proxy-httpd" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.444054 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="34e8f6f8-ea53-4956-9820-41ffb518d03d" containerName="dnsmasq-dns" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.444095 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ff49dac-4a23-4973-9473-dbcb0b6dff4f" containerName="proxy-server" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.444764 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-lgf6v" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.465030 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-lgf6v"] Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.520006 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/265ce84e-0c9e-4e32-9eaa-8821b19bc29d-operator-scripts\") pod \"cinder-db-create-lgf6v\" (UID: \"265ce84e-0c9e-4e32-9eaa-8821b19bc29d\") " pod="openstack/cinder-db-create-lgf6v" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.520310 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4p25j\" (UniqueName: \"kubernetes.io/projected/265ce84e-0c9e-4e32-9eaa-8821b19bc29d-kube-api-access-4p25j\") pod \"cinder-db-create-lgf6v\" (UID: \"265ce84e-0c9e-4e32-9eaa-8821b19bc29d\") " pod="openstack/cinder-db-create-lgf6v" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.541106 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-5e60-account-create-update-jwn9r"] Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.542618 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5e60-account-create-update-jwn9r" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.545885 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.550538 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-5e60-account-create-update-jwn9r"] Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.622210 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4p25j\" (UniqueName: \"kubernetes.io/projected/265ce84e-0c9e-4e32-9eaa-8821b19bc29d-kube-api-access-4p25j\") pod \"cinder-db-create-lgf6v\" (UID: \"265ce84e-0c9e-4e32-9eaa-8821b19bc29d\") " pod="openstack/cinder-db-create-lgf6v" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.622282 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ns6h\" (UniqueName: \"kubernetes.io/projected/79198723-552a-4002-8ac2-f66a008a26ae-kube-api-access-6ns6h\") pod \"cinder-5e60-account-create-update-jwn9r\" (UID: \"79198723-552a-4002-8ac2-f66a008a26ae\") " pod="openstack/cinder-5e60-account-create-update-jwn9r" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.622370 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/265ce84e-0c9e-4e32-9eaa-8821b19bc29d-operator-scripts\") pod \"cinder-db-create-lgf6v\" (UID: \"265ce84e-0c9e-4e32-9eaa-8821b19bc29d\") " pod="openstack/cinder-db-create-lgf6v" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.622455 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79198723-552a-4002-8ac2-f66a008a26ae-operator-scripts\") pod \"cinder-5e60-account-create-update-jwn9r\" (UID: \"79198723-552a-4002-8ac2-f66a008a26ae\") " pod="openstack/cinder-5e60-account-create-update-jwn9r" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.623103 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/265ce84e-0c9e-4e32-9eaa-8821b19bc29d-operator-scripts\") pod \"cinder-db-create-lgf6v\" (UID: \"265ce84e-0c9e-4e32-9eaa-8821b19bc29d\") " pod="openstack/cinder-db-create-lgf6v" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.646097 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4p25j\" (UniqueName: \"kubernetes.io/projected/265ce84e-0c9e-4e32-9eaa-8821b19bc29d-kube-api-access-4p25j\") pod \"cinder-db-create-lgf6v\" (UID: \"265ce84e-0c9e-4e32-9eaa-8821b19bc29d\") " pod="openstack/cinder-db-create-lgf6v" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.723975 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79198723-552a-4002-8ac2-f66a008a26ae-operator-scripts\") pod \"cinder-5e60-account-create-update-jwn9r\" (UID: \"79198723-552a-4002-8ac2-f66a008a26ae\") " pod="openstack/cinder-5e60-account-create-update-jwn9r" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.724439 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ns6h\" (UniqueName: \"kubernetes.io/projected/79198723-552a-4002-8ac2-f66a008a26ae-kube-api-access-6ns6h\") pod \"cinder-5e60-account-create-update-jwn9r\" (UID: \"79198723-552a-4002-8ac2-f66a008a26ae\") " pod="openstack/cinder-5e60-account-create-update-jwn9r" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.724785 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79198723-552a-4002-8ac2-f66a008a26ae-operator-scripts\") pod \"cinder-5e60-account-create-update-jwn9r\" (UID: \"79198723-552a-4002-8ac2-f66a008a26ae\") " pod="openstack/cinder-5e60-account-create-update-jwn9r" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.741970 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ns6h\" (UniqueName: \"kubernetes.io/projected/79198723-552a-4002-8ac2-f66a008a26ae-kube-api-access-6ns6h\") pod \"cinder-5e60-account-create-update-jwn9r\" (UID: \"79198723-552a-4002-8ac2-f66a008a26ae\") " pod="openstack/cinder-5e60-account-create-update-jwn9r" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.773226 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-lgf6v" Jan 22 07:19:13 crc kubenswrapper[4933]: I0122 07:19:13.866047 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5e60-account-create-update-jwn9r" Jan 22 07:19:14 crc kubenswrapper[4933]: I0122 07:19:14.180917 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-5e60-account-create-update-jwn9r"] Jan 22 07:19:14 crc kubenswrapper[4933]: I0122 07:19:14.300364 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-lgf6v"] Jan 22 07:19:14 crc kubenswrapper[4933]: W0122 07:19:14.302984 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod265ce84e_0c9e_4e32_9eaa_8821b19bc29d.slice/crio-7df6e1a38fca64d04e47f70e7cbf71b73c4f3950b4f8a703324a7171f8c12bd3 WatchSource:0}: Error finding container 7df6e1a38fca64d04e47f70e7cbf71b73c4f3950b4f8a703324a7171f8c12bd3: Status 404 returned error can't find the container with id 7df6e1a38fca64d04e47f70e7cbf71b73c4f3950b4f8a703324a7171f8c12bd3 Jan 22 07:19:14 crc kubenswrapper[4933]: I0122 07:19:14.987520 4933 generic.go:334] "Generic (PLEG): container finished" podID="265ce84e-0c9e-4e32-9eaa-8821b19bc29d" containerID="5b73a1e83346b1af58e93d754aad46f652bd9a279b47cb32e2c19b8e6d8f9c17" exitCode=0 Jan 22 07:19:14 crc kubenswrapper[4933]: I0122 07:19:14.987555 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-lgf6v" event={"ID":"265ce84e-0c9e-4e32-9eaa-8821b19bc29d","Type":"ContainerDied","Data":"5b73a1e83346b1af58e93d754aad46f652bd9a279b47cb32e2c19b8e6d8f9c17"} Jan 22 07:19:14 crc kubenswrapper[4933]: I0122 07:19:14.987923 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-lgf6v" event={"ID":"265ce84e-0c9e-4e32-9eaa-8821b19bc29d","Type":"ContainerStarted","Data":"7df6e1a38fca64d04e47f70e7cbf71b73c4f3950b4f8a703324a7171f8c12bd3"} Jan 22 07:19:14 crc kubenswrapper[4933]: I0122 07:19:14.992447 4933 generic.go:334] "Generic (PLEG): container finished" podID="79198723-552a-4002-8ac2-f66a008a26ae" containerID="012d065a57804f23df9a5ceb4e2d90f446ec846602b84adb74e9c66dfbabc23b" exitCode=0 Jan 22 07:19:14 crc kubenswrapper[4933]: I0122 07:19:14.992490 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5e60-account-create-update-jwn9r" event={"ID":"79198723-552a-4002-8ac2-f66a008a26ae","Type":"ContainerDied","Data":"012d065a57804f23df9a5ceb4e2d90f446ec846602b84adb74e9c66dfbabc23b"} Jan 22 07:19:14 crc kubenswrapper[4933]: I0122 07:19:14.992514 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5e60-account-create-update-jwn9r" event={"ID":"79198723-552a-4002-8ac2-f66a008a26ae","Type":"ContainerStarted","Data":"387c07cf8d73348d095842dae19f642d292ff0c5bd3b4609dafb2eb49f8039e7"} Jan 22 07:19:16 crc kubenswrapper[4933]: I0122 07:19:16.392306 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-lgf6v" Jan 22 07:19:16 crc kubenswrapper[4933]: I0122 07:19:16.397590 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5e60-account-create-update-jwn9r" Jan 22 07:19:16 crc kubenswrapper[4933]: I0122 07:19:16.478990 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ns6h\" (UniqueName: \"kubernetes.io/projected/79198723-552a-4002-8ac2-f66a008a26ae-kube-api-access-6ns6h\") pod \"79198723-552a-4002-8ac2-f66a008a26ae\" (UID: \"79198723-552a-4002-8ac2-f66a008a26ae\") " Jan 22 07:19:16 crc kubenswrapper[4933]: I0122 07:19:16.479197 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4p25j\" (UniqueName: \"kubernetes.io/projected/265ce84e-0c9e-4e32-9eaa-8821b19bc29d-kube-api-access-4p25j\") pod \"265ce84e-0c9e-4e32-9eaa-8821b19bc29d\" (UID: \"265ce84e-0c9e-4e32-9eaa-8821b19bc29d\") " Jan 22 07:19:16 crc kubenswrapper[4933]: I0122 07:19:16.479272 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/265ce84e-0c9e-4e32-9eaa-8821b19bc29d-operator-scripts\") pod \"265ce84e-0c9e-4e32-9eaa-8821b19bc29d\" (UID: \"265ce84e-0c9e-4e32-9eaa-8821b19bc29d\") " Jan 22 07:19:16 crc kubenswrapper[4933]: I0122 07:19:16.479300 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79198723-552a-4002-8ac2-f66a008a26ae-operator-scripts\") pod \"79198723-552a-4002-8ac2-f66a008a26ae\" (UID: \"79198723-552a-4002-8ac2-f66a008a26ae\") " Jan 22 07:19:16 crc kubenswrapper[4933]: I0122 07:19:16.479745 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/265ce84e-0c9e-4e32-9eaa-8821b19bc29d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "265ce84e-0c9e-4e32-9eaa-8821b19bc29d" (UID: "265ce84e-0c9e-4e32-9eaa-8821b19bc29d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:19:16 crc kubenswrapper[4933]: I0122 07:19:16.479803 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79198723-552a-4002-8ac2-f66a008a26ae-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "79198723-552a-4002-8ac2-f66a008a26ae" (UID: "79198723-552a-4002-8ac2-f66a008a26ae"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:19:16 crc kubenswrapper[4933]: I0122 07:19:16.484301 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/265ce84e-0c9e-4e32-9eaa-8821b19bc29d-kube-api-access-4p25j" (OuterVolumeSpecName: "kube-api-access-4p25j") pod "265ce84e-0c9e-4e32-9eaa-8821b19bc29d" (UID: "265ce84e-0c9e-4e32-9eaa-8821b19bc29d"). InnerVolumeSpecName "kube-api-access-4p25j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:19:16 crc kubenswrapper[4933]: I0122 07:19:16.484634 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79198723-552a-4002-8ac2-f66a008a26ae-kube-api-access-6ns6h" (OuterVolumeSpecName: "kube-api-access-6ns6h") pod "79198723-552a-4002-8ac2-f66a008a26ae" (UID: "79198723-552a-4002-8ac2-f66a008a26ae"). InnerVolumeSpecName "kube-api-access-6ns6h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:19:16 crc kubenswrapper[4933]: I0122 07:19:16.582307 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4p25j\" (UniqueName: \"kubernetes.io/projected/265ce84e-0c9e-4e32-9eaa-8821b19bc29d-kube-api-access-4p25j\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:16 crc kubenswrapper[4933]: I0122 07:19:16.582344 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/265ce84e-0c9e-4e32-9eaa-8821b19bc29d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:16 crc kubenswrapper[4933]: I0122 07:19:16.582354 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/79198723-552a-4002-8ac2-f66a008a26ae-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:16 crc kubenswrapper[4933]: I0122 07:19:16.582364 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ns6h\" (UniqueName: \"kubernetes.io/projected/79198723-552a-4002-8ac2-f66a008a26ae-kube-api-access-6ns6h\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:17 crc kubenswrapper[4933]: I0122 07:19:17.010012 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5e60-account-create-update-jwn9r" event={"ID":"79198723-552a-4002-8ac2-f66a008a26ae","Type":"ContainerDied","Data":"387c07cf8d73348d095842dae19f642d292ff0c5bd3b4609dafb2eb49f8039e7"} Jan 22 07:19:17 crc kubenswrapper[4933]: I0122 07:19:17.010523 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="387c07cf8d73348d095842dae19f642d292ff0c5bd3b4609dafb2eb49f8039e7" Jan 22 07:19:17 crc kubenswrapper[4933]: I0122 07:19:17.010028 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5e60-account-create-update-jwn9r" Jan 22 07:19:17 crc kubenswrapper[4933]: I0122 07:19:17.012213 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-lgf6v" event={"ID":"265ce84e-0c9e-4e32-9eaa-8821b19bc29d","Type":"ContainerDied","Data":"7df6e1a38fca64d04e47f70e7cbf71b73c4f3950b4f8a703324a7171f8c12bd3"} Jan 22 07:19:17 crc kubenswrapper[4933]: I0122 07:19:17.012254 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7df6e1a38fca64d04e47f70e7cbf71b73c4f3950b4f8a703324a7171f8c12bd3" Jan 22 07:19:17 crc kubenswrapper[4933]: I0122 07:19:17.012640 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-lgf6v" Jan 22 07:19:18 crc kubenswrapper[4933]: I0122 07:19:18.786109 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-x2t8l"] Jan 22 07:19:18 crc kubenswrapper[4933]: E0122 07:19:18.787476 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="265ce84e-0c9e-4e32-9eaa-8821b19bc29d" containerName="mariadb-database-create" Jan 22 07:19:18 crc kubenswrapper[4933]: I0122 07:19:18.787590 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="265ce84e-0c9e-4e32-9eaa-8821b19bc29d" containerName="mariadb-database-create" Jan 22 07:19:18 crc kubenswrapper[4933]: E0122 07:19:18.787683 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79198723-552a-4002-8ac2-f66a008a26ae" containerName="mariadb-account-create-update" Jan 22 07:19:18 crc kubenswrapper[4933]: I0122 07:19:18.787777 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="79198723-552a-4002-8ac2-f66a008a26ae" containerName="mariadb-account-create-update" Jan 22 07:19:18 crc kubenswrapper[4933]: I0122 07:19:18.788010 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="79198723-552a-4002-8ac2-f66a008a26ae" containerName="mariadb-account-create-update" Jan 22 07:19:18 crc kubenswrapper[4933]: I0122 07:19:18.788110 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="265ce84e-0c9e-4e32-9eaa-8821b19bc29d" containerName="mariadb-database-create" Jan 22 07:19:18 crc kubenswrapper[4933]: I0122 07:19:18.788783 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:18 crc kubenswrapper[4933]: I0122 07:19:18.791290 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 22 07:19:18 crc kubenswrapper[4933]: I0122 07:19:18.791502 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-4h4bl" Jan 22 07:19:18 crc kubenswrapper[4933]: I0122 07:19:18.797381 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-x2t8l"] Jan 22 07:19:18 crc kubenswrapper[4933]: I0122 07:19:18.808542 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 22 07:19:18 crc kubenswrapper[4933]: I0122 07:19:18.926323 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-config-data\") pod \"cinder-db-sync-x2t8l\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:18 crc kubenswrapper[4933]: I0122 07:19:18.926668 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-scripts\") pod \"cinder-db-sync-x2t8l\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:18 crc kubenswrapper[4933]: I0122 07:19:18.926811 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-combined-ca-bundle\") pod \"cinder-db-sync-x2t8l\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:18 crc kubenswrapper[4933]: I0122 07:19:18.926961 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96zjh\" (UniqueName: \"kubernetes.io/projected/3a51acc4-f217-4041-8538-dd03b67531a1-kube-api-access-96zjh\") pod \"cinder-db-sync-x2t8l\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:18 crc kubenswrapper[4933]: I0122 07:19:18.927112 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3a51acc4-f217-4041-8538-dd03b67531a1-etc-machine-id\") pod \"cinder-db-sync-x2t8l\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:18 crc kubenswrapper[4933]: I0122 07:19:18.927252 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-db-sync-config-data\") pod \"cinder-db-sync-x2t8l\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:19 crc kubenswrapper[4933]: I0122 07:19:19.029524 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96zjh\" (UniqueName: \"kubernetes.io/projected/3a51acc4-f217-4041-8538-dd03b67531a1-kube-api-access-96zjh\") pod \"cinder-db-sync-x2t8l\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:19 crc kubenswrapper[4933]: I0122 07:19:19.029605 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3a51acc4-f217-4041-8538-dd03b67531a1-etc-machine-id\") pod \"cinder-db-sync-x2t8l\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:19 crc kubenswrapper[4933]: I0122 07:19:19.029653 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-db-sync-config-data\") pod \"cinder-db-sync-x2t8l\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:19 crc kubenswrapper[4933]: I0122 07:19:19.029720 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-config-data\") pod \"cinder-db-sync-x2t8l\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:19 crc kubenswrapper[4933]: I0122 07:19:19.029770 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-scripts\") pod \"cinder-db-sync-x2t8l\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:19 crc kubenswrapper[4933]: I0122 07:19:19.029792 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3a51acc4-f217-4041-8538-dd03b67531a1-etc-machine-id\") pod \"cinder-db-sync-x2t8l\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:19 crc kubenswrapper[4933]: I0122 07:19:19.029812 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-combined-ca-bundle\") pod \"cinder-db-sync-x2t8l\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:19 crc kubenswrapper[4933]: I0122 07:19:19.035509 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-config-data\") pod \"cinder-db-sync-x2t8l\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:19 crc kubenswrapper[4933]: I0122 07:19:19.035544 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-combined-ca-bundle\") pod \"cinder-db-sync-x2t8l\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:19 crc kubenswrapper[4933]: I0122 07:19:19.035596 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-scripts\") pod \"cinder-db-sync-x2t8l\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:19 crc kubenswrapper[4933]: I0122 07:19:19.035654 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-db-sync-config-data\") pod \"cinder-db-sync-x2t8l\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:19 crc kubenswrapper[4933]: I0122 07:19:19.050749 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96zjh\" (UniqueName: \"kubernetes.io/projected/3a51acc4-f217-4041-8538-dd03b67531a1-kube-api-access-96zjh\") pod \"cinder-db-sync-x2t8l\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:19 crc kubenswrapper[4933]: I0122 07:19:19.119634 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:19 crc kubenswrapper[4933]: I0122 07:19:19.584428 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-x2t8l"] Jan 22 07:19:20 crc kubenswrapper[4933]: I0122 07:19:20.037185 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-x2t8l" event={"ID":"3a51acc4-f217-4041-8538-dd03b67531a1","Type":"ContainerStarted","Data":"7827692e333ea8691f4fba0b0103804cede24511402eb7c56a0016f0a952b90f"} Jan 22 07:19:21 crc kubenswrapper[4933]: I0122 07:19:21.051600 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-x2t8l" event={"ID":"3a51acc4-f217-4041-8538-dd03b67531a1","Type":"ContainerStarted","Data":"375fda8391f5b73ad69840756f1f1e095578a60dc7d58d1370d16cb7acb33b37"} Jan 22 07:19:21 crc kubenswrapper[4933]: I0122 07:19:21.088832 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-x2t8l" podStartSLOduration=3.088803703 podStartE2EDuration="3.088803703s" podCreationTimestamp="2026-01-22 07:19:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:19:21.074551406 +0000 UTC m=+5608.911676799" watchObservedRunningTime="2026-01-22 07:19:21.088803703 +0000 UTC m=+5608.925929086" Jan 22 07:19:23 crc kubenswrapper[4933]: I0122 07:19:23.067424 4933 generic.go:334] "Generic (PLEG): container finished" podID="3a51acc4-f217-4041-8538-dd03b67531a1" containerID="375fda8391f5b73ad69840756f1f1e095578a60dc7d58d1370d16cb7acb33b37" exitCode=0 Jan 22 07:19:23 crc kubenswrapper[4933]: I0122 07:19:23.067465 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-x2t8l" event={"ID":"3a51acc4-f217-4041-8538-dd03b67531a1","Type":"ContainerDied","Data":"375fda8391f5b73ad69840756f1f1e095578a60dc7d58d1370d16cb7acb33b37"} Jan 22 07:19:23 crc kubenswrapper[4933]: I0122 07:19:23.490528 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.080032 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"df77120a276e772bee7dad3e476bcf8749c071d28246be8da71e670976f60157"} Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.416754 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.451400 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-config-data\") pod \"3a51acc4-f217-4041-8538-dd03b67531a1\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.451469 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3a51acc4-f217-4041-8538-dd03b67531a1-etc-machine-id\") pod \"3a51acc4-f217-4041-8538-dd03b67531a1\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.451506 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-96zjh\" (UniqueName: \"kubernetes.io/projected/3a51acc4-f217-4041-8538-dd03b67531a1-kube-api-access-96zjh\") pod \"3a51acc4-f217-4041-8538-dd03b67531a1\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.451994 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-scripts\") pod \"3a51acc4-f217-4041-8538-dd03b67531a1\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.451998 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a51acc4-f217-4041-8538-dd03b67531a1-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "3a51acc4-f217-4041-8538-dd03b67531a1" (UID: "3a51acc4-f217-4041-8538-dd03b67531a1"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.454191 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-db-sync-config-data\") pod \"3a51acc4-f217-4041-8538-dd03b67531a1\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.454252 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-combined-ca-bundle\") pod \"3a51acc4-f217-4041-8538-dd03b67531a1\" (UID: \"3a51acc4-f217-4041-8538-dd03b67531a1\") " Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.455058 4933 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3a51acc4-f217-4041-8538-dd03b67531a1-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.471613 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "3a51acc4-f217-4041-8538-dd03b67531a1" (UID: "3a51acc4-f217-4041-8538-dd03b67531a1"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.471873 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a51acc4-f217-4041-8538-dd03b67531a1-kube-api-access-96zjh" (OuterVolumeSpecName: "kube-api-access-96zjh") pod "3a51acc4-f217-4041-8538-dd03b67531a1" (UID: "3a51acc4-f217-4041-8538-dd03b67531a1"). InnerVolumeSpecName "kube-api-access-96zjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.482273 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-scripts" (OuterVolumeSpecName: "scripts") pod "3a51acc4-f217-4041-8538-dd03b67531a1" (UID: "3a51acc4-f217-4041-8538-dd03b67531a1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.490222 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3a51acc4-f217-4041-8538-dd03b67531a1" (UID: "3a51acc4-f217-4041-8538-dd03b67531a1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.502434 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-config-data" (OuterVolumeSpecName: "config-data") pod "3a51acc4-f217-4041-8538-dd03b67531a1" (UID: "3a51acc4-f217-4041-8538-dd03b67531a1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.556648 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.556681 4933 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.556693 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.556705 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a51acc4-f217-4041-8538-dd03b67531a1-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:24 crc kubenswrapper[4933]: I0122 07:19:24.556716 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-96zjh\" (UniqueName: \"kubernetes.io/projected/3a51acc4-f217-4041-8538-dd03b67531a1-kube-api-access-96zjh\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.090410 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-x2t8l" event={"ID":"3a51acc4-f217-4041-8538-dd03b67531a1","Type":"ContainerDied","Data":"7827692e333ea8691f4fba0b0103804cede24511402eb7c56a0016f0a952b90f"} Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.090457 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7827692e333ea8691f4fba0b0103804cede24511402eb7c56a0016f0a952b90f" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.090463 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-x2t8l" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.404690 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-69c775f4cc-wqbrt"] Jan 22 07:19:25 crc kubenswrapper[4933]: E0122 07:19:25.405048 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a51acc4-f217-4041-8538-dd03b67531a1" containerName="cinder-db-sync" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.405064 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a51acc4-f217-4041-8538-dd03b67531a1" containerName="cinder-db-sync" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.405258 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a51acc4-f217-4041-8538-dd03b67531a1" containerName="cinder-db-sync" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.406172 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.416484 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-69c775f4cc-wqbrt"] Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.469613 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-ovsdbserver-sb\") pod \"dnsmasq-dns-69c775f4cc-wqbrt\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.469708 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-dns-svc\") pod \"dnsmasq-dns-69c775f4cc-wqbrt\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.469744 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-config\") pod \"dnsmasq-dns-69c775f4cc-wqbrt\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.469792 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-ovsdbserver-nb\") pod \"dnsmasq-dns-69c775f4cc-wqbrt\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.469821 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gdzt\" (UniqueName: \"kubernetes.io/projected/f2e0cadd-0b02-471b-bf57-0ae43b550014-kube-api-access-9gdzt\") pod \"dnsmasq-dns-69c775f4cc-wqbrt\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.470855 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.472725 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.475750 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-4h4bl" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.475779 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.476101 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.476232 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.483786 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.572027 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-ovsdbserver-sb\") pod \"dnsmasq-dns-69c775f4cc-wqbrt\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.572466 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a17c6b58-5756-49ae-86a9-1f7919f137af-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.572554 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-config-data-custom\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.572584 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-scripts\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.572613 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-config-data\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.572638 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.572677 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-dns-svc\") pod \"dnsmasq-dns-69c775f4cc-wqbrt\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.572717 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-config\") pod \"dnsmasq-dns-69c775f4cc-wqbrt\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.572778 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-ovsdbserver-nb\") pod \"dnsmasq-dns-69c775f4cc-wqbrt\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.572797 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gdzt\" (UniqueName: \"kubernetes.io/projected/f2e0cadd-0b02-471b-bf57-0ae43b550014-kube-api-access-9gdzt\") pod \"dnsmasq-dns-69c775f4cc-wqbrt\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.572832 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdb5n\" (UniqueName: \"kubernetes.io/projected/a17c6b58-5756-49ae-86a9-1f7919f137af-kube-api-access-xdb5n\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.572906 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a17c6b58-5756-49ae-86a9-1f7919f137af-logs\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.573097 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-ovsdbserver-sb\") pod \"dnsmasq-dns-69c775f4cc-wqbrt\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.574039 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-dns-svc\") pod \"dnsmasq-dns-69c775f4cc-wqbrt\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.574423 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-config\") pod \"dnsmasq-dns-69c775f4cc-wqbrt\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.574965 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-ovsdbserver-nb\") pod \"dnsmasq-dns-69c775f4cc-wqbrt\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.600911 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gdzt\" (UniqueName: \"kubernetes.io/projected/f2e0cadd-0b02-471b-bf57-0ae43b550014-kube-api-access-9gdzt\") pod \"dnsmasq-dns-69c775f4cc-wqbrt\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.674862 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-config-data-custom\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.674912 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-scripts\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.674942 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-config-data\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.674966 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.675089 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdb5n\" (UniqueName: \"kubernetes.io/projected/a17c6b58-5756-49ae-86a9-1f7919f137af-kube-api-access-xdb5n\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.675142 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a17c6b58-5756-49ae-86a9-1f7919f137af-logs\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.675200 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a17c6b58-5756-49ae-86a9-1f7919f137af-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.675816 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a17c6b58-5756-49ae-86a9-1f7919f137af-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.675890 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a17c6b58-5756-49ae-86a9-1f7919f137af-logs\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.678835 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-scripts\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.679565 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.686388 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-config-data-custom\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.687306 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-config-data\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.695468 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdb5n\" (UniqueName: \"kubernetes.io/projected/a17c6b58-5756-49ae-86a9-1f7919f137af-kube-api-access-xdb5n\") pod \"cinder-api-0\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " pod="openstack/cinder-api-0" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.720964 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:19:25 crc kubenswrapper[4933]: I0122 07:19:25.789489 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 07:19:26 crc kubenswrapper[4933]: I0122 07:19:26.233693 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-69c775f4cc-wqbrt"] Jan 22 07:19:26 crc kubenswrapper[4933]: I0122 07:19:26.321168 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 22 07:19:26 crc kubenswrapper[4933]: W0122 07:19:26.334457 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda17c6b58_5756_49ae_86a9_1f7919f137af.slice/crio-700f1660d6906255306754c2a4d67510d7e45e4d51e1d881c16cd20604df34da WatchSource:0}: Error finding container 700f1660d6906255306754c2a4d67510d7e45e4d51e1d881c16cd20604df34da: Status 404 returned error can't find the container with id 700f1660d6906255306754c2a4d67510d7e45e4d51e1d881c16cd20604df34da Jan 22 07:19:27 crc kubenswrapper[4933]: I0122 07:19:27.108108 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a17c6b58-5756-49ae-86a9-1f7919f137af","Type":"ContainerStarted","Data":"0b8bae7746dc410417356de0e3186e5c26f14477857826f2de4078f871b6547f"} Jan 22 07:19:27 crc kubenswrapper[4933]: I0122 07:19:27.108625 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a17c6b58-5756-49ae-86a9-1f7919f137af","Type":"ContainerStarted","Data":"700f1660d6906255306754c2a4d67510d7e45e4d51e1d881c16cd20604df34da"} Jan 22 07:19:27 crc kubenswrapper[4933]: I0122 07:19:27.110005 4933 generic.go:334] "Generic (PLEG): container finished" podID="f2e0cadd-0b02-471b-bf57-0ae43b550014" containerID="edcdf10ab69b051dfe880a8adbe12c8c2234f419402a6352ae33ebec38b130f3" exitCode=0 Jan 22 07:19:27 crc kubenswrapper[4933]: I0122 07:19:27.110175 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" event={"ID":"f2e0cadd-0b02-471b-bf57-0ae43b550014","Type":"ContainerDied","Data":"edcdf10ab69b051dfe880a8adbe12c8c2234f419402a6352ae33ebec38b130f3"} Jan 22 07:19:27 crc kubenswrapper[4933]: I0122 07:19:27.110291 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" event={"ID":"f2e0cadd-0b02-471b-bf57-0ae43b550014","Type":"ContainerStarted","Data":"886e1f19b6945cc076cdeeb1744382b10f6d1c0b636a8bb38723a167b290fa40"} Jan 22 07:19:27 crc kubenswrapper[4933]: I0122 07:19:27.688670 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 22 07:19:28 crc kubenswrapper[4933]: I0122 07:19:28.121977 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a17c6b58-5756-49ae-86a9-1f7919f137af","Type":"ContainerStarted","Data":"e96346b582b2e428b34ec3736f0b34fc1b16d281ecf3dc7a79949a4e0f4005df"} Jan 22 07:19:28 crc kubenswrapper[4933]: I0122 07:19:28.122436 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="a17c6b58-5756-49ae-86a9-1f7919f137af" containerName="cinder-api-log" containerID="cri-o://0b8bae7746dc410417356de0e3186e5c26f14477857826f2de4078f871b6547f" gracePeriod=30 Jan 22 07:19:28 crc kubenswrapper[4933]: I0122 07:19:28.122785 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 22 07:19:28 crc kubenswrapper[4933]: I0122 07:19:28.123115 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="a17c6b58-5756-49ae-86a9-1f7919f137af" containerName="cinder-api" containerID="cri-o://e96346b582b2e428b34ec3736f0b34fc1b16d281ecf3dc7a79949a4e0f4005df" gracePeriod=30 Jan 22 07:19:28 crc kubenswrapper[4933]: I0122 07:19:28.135150 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" event={"ID":"f2e0cadd-0b02-471b-bf57-0ae43b550014","Type":"ContainerStarted","Data":"5c4367cccf6c2afd88eba9adaad30a63283e8640287805e2c1333fac918bbd7b"} Jan 22 07:19:28 crc kubenswrapper[4933]: I0122 07:19:28.135412 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:19:28 crc kubenswrapper[4933]: I0122 07:19:28.144999 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.144979756 podStartE2EDuration="3.144979756s" podCreationTimestamp="2026-01-22 07:19:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:19:28.138194331 +0000 UTC m=+5615.975319694" watchObservedRunningTime="2026-01-22 07:19:28.144979756 +0000 UTC m=+5615.982105109" Jan 22 07:19:28 crc kubenswrapper[4933]: I0122 07:19:28.161184 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" podStartSLOduration=3.16116204 podStartE2EDuration="3.16116204s" podCreationTimestamp="2026-01-22 07:19:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:19:28.160585915 +0000 UTC m=+5615.997711268" watchObservedRunningTime="2026-01-22 07:19:28.16116204 +0000 UTC m=+5615.998287403" Jan 22 07:19:29 crc kubenswrapper[4933]: I0122 07:19:29.144245 4933 generic.go:334] "Generic (PLEG): container finished" podID="a17c6b58-5756-49ae-86a9-1f7919f137af" containerID="0b8bae7746dc410417356de0e3186e5c26f14477857826f2de4078f871b6547f" exitCode=143 Jan 22 07:19:29 crc kubenswrapper[4933]: I0122 07:19:29.144338 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a17c6b58-5756-49ae-86a9-1f7919f137af","Type":"ContainerDied","Data":"0b8bae7746dc410417356de0e3186e5c26f14477857826f2de4078f871b6547f"} Jan 22 07:19:32 crc kubenswrapper[4933]: I0122 07:19:32.031703 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dwzhf"] Jan 22 07:19:32 crc kubenswrapper[4933]: I0122 07:19:32.034155 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dwzhf" Jan 22 07:19:32 crc kubenswrapper[4933]: I0122 07:19:32.050632 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwzhf"] Jan 22 07:19:32 crc kubenswrapper[4933]: I0122 07:19:32.077842 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23fb5c4e-7288-4d2e-b4bc-881936d93e4e-utilities\") pod \"redhat-marketplace-dwzhf\" (UID: \"23fb5c4e-7288-4d2e-b4bc-881936d93e4e\") " pod="openshift-marketplace/redhat-marketplace-dwzhf" Jan 22 07:19:32 crc kubenswrapper[4933]: I0122 07:19:32.077916 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23fb5c4e-7288-4d2e-b4bc-881936d93e4e-catalog-content\") pod \"redhat-marketplace-dwzhf\" (UID: \"23fb5c4e-7288-4d2e-b4bc-881936d93e4e\") " pod="openshift-marketplace/redhat-marketplace-dwzhf" Jan 22 07:19:32 crc kubenswrapper[4933]: I0122 07:19:32.078003 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrbcd\" (UniqueName: \"kubernetes.io/projected/23fb5c4e-7288-4d2e-b4bc-881936d93e4e-kube-api-access-xrbcd\") pod \"redhat-marketplace-dwzhf\" (UID: \"23fb5c4e-7288-4d2e-b4bc-881936d93e4e\") " pod="openshift-marketplace/redhat-marketplace-dwzhf" Jan 22 07:19:32 crc kubenswrapper[4933]: I0122 07:19:32.178838 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrbcd\" (UniqueName: \"kubernetes.io/projected/23fb5c4e-7288-4d2e-b4bc-881936d93e4e-kube-api-access-xrbcd\") pod \"redhat-marketplace-dwzhf\" (UID: \"23fb5c4e-7288-4d2e-b4bc-881936d93e4e\") " pod="openshift-marketplace/redhat-marketplace-dwzhf" Jan 22 07:19:32 crc kubenswrapper[4933]: I0122 07:19:32.178941 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23fb5c4e-7288-4d2e-b4bc-881936d93e4e-utilities\") pod \"redhat-marketplace-dwzhf\" (UID: \"23fb5c4e-7288-4d2e-b4bc-881936d93e4e\") " pod="openshift-marketplace/redhat-marketplace-dwzhf" Jan 22 07:19:32 crc kubenswrapper[4933]: I0122 07:19:32.178974 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23fb5c4e-7288-4d2e-b4bc-881936d93e4e-catalog-content\") pod \"redhat-marketplace-dwzhf\" (UID: \"23fb5c4e-7288-4d2e-b4bc-881936d93e4e\") " pod="openshift-marketplace/redhat-marketplace-dwzhf" Jan 22 07:19:32 crc kubenswrapper[4933]: I0122 07:19:32.179417 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23fb5c4e-7288-4d2e-b4bc-881936d93e4e-catalog-content\") pod \"redhat-marketplace-dwzhf\" (UID: \"23fb5c4e-7288-4d2e-b4bc-881936d93e4e\") " pod="openshift-marketplace/redhat-marketplace-dwzhf" Jan 22 07:19:32 crc kubenswrapper[4933]: I0122 07:19:32.179488 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23fb5c4e-7288-4d2e-b4bc-881936d93e4e-utilities\") pod \"redhat-marketplace-dwzhf\" (UID: \"23fb5c4e-7288-4d2e-b4bc-881936d93e4e\") " pod="openshift-marketplace/redhat-marketplace-dwzhf" Jan 22 07:19:32 crc kubenswrapper[4933]: I0122 07:19:32.200358 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrbcd\" (UniqueName: \"kubernetes.io/projected/23fb5c4e-7288-4d2e-b4bc-881936d93e4e-kube-api-access-xrbcd\") pod \"redhat-marketplace-dwzhf\" (UID: \"23fb5c4e-7288-4d2e-b4bc-881936d93e4e\") " pod="openshift-marketplace/redhat-marketplace-dwzhf" Jan 22 07:19:32 crc kubenswrapper[4933]: I0122 07:19:32.371629 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dwzhf" Jan 22 07:19:32 crc kubenswrapper[4933]: I0122 07:19:32.850069 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwzhf"] Jan 22 07:19:33 crc kubenswrapper[4933]: I0122 07:19:33.180320 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwzhf" event={"ID":"23fb5c4e-7288-4d2e-b4bc-881936d93e4e","Type":"ContainerStarted","Data":"3aa2cf376fa98729abb0e8529647bc3ae999e8e48e9b6be1eef430c293bde94a"} Jan 22 07:19:34 crc kubenswrapper[4933]: I0122 07:19:34.194248 4933 generic.go:334] "Generic (PLEG): container finished" podID="23fb5c4e-7288-4d2e-b4bc-881936d93e4e" containerID="e4570f6a21b60d1634006fad467127b5b2cfd96ac3b8c6b2ddb6a81b01936d59" exitCode=0 Jan 22 07:19:34 crc kubenswrapper[4933]: I0122 07:19:34.194354 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwzhf" event={"ID":"23fb5c4e-7288-4d2e-b4bc-881936d93e4e","Type":"ContainerDied","Data":"e4570f6a21b60d1634006fad467127b5b2cfd96ac3b8c6b2ddb6a81b01936d59"} Jan 22 07:19:34 crc kubenswrapper[4933]: I0122 07:19:34.196993 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 07:19:35 crc kubenswrapper[4933]: I0122 07:19:35.207814 4933 generic.go:334] "Generic (PLEG): container finished" podID="23fb5c4e-7288-4d2e-b4bc-881936d93e4e" containerID="6c089dea6b50c60d000db8b006afc6f3f99ac8bf8a587152aa6df6678e8ef2b7" exitCode=0 Jan 22 07:19:35 crc kubenswrapper[4933]: I0122 07:19:35.207852 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwzhf" event={"ID":"23fb5c4e-7288-4d2e-b4bc-881936d93e4e","Type":"ContainerDied","Data":"6c089dea6b50c60d000db8b006afc6f3f99ac8bf8a587152aa6df6678e8ef2b7"} Jan 22 07:19:35 crc kubenswrapper[4933]: I0122 07:19:35.723160 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:19:35 crc kubenswrapper[4933]: I0122 07:19:35.802825 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8486788887-z5njj"] Jan 22 07:19:35 crc kubenswrapper[4933]: I0122 07:19:35.803058 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8486788887-z5njj" podUID="f438afbc-73ea-44fc-856c-faf0ae11e192" containerName="dnsmasq-dns" containerID="cri-o://3f5c96235c4a2933af26512896b90574e1936ab976c41c7652a2226bcf9daf35" gracePeriod=10 Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.231975 4933 generic.go:334] "Generic (PLEG): container finished" podID="f438afbc-73ea-44fc-856c-faf0ae11e192" containerID="3f5c96235c4a2933af26512896b90574e1936ab976c41c7652a2226bcf9daf35" exitCode=0 Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.232172 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8486788887-z5njj" event={"ID":"f438afbc-73ea-44fc-856c-faf0ae11e192","Type":"ContainerDied","Data":"3f5c96235c4a2933af26512896b90574e1936ab976c41c7652a2226bcf9daf35"} Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.236361 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwzhf" event={"ID":"23fb5c4e-7288-4d2e-b4bc-881936d93e4e","Type":"ContainerStarted","Data":"2a274f33b29843d2ba351c2ba1db8ae013cd71027b6ed59135ca5a3a5def9943"} Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.260756 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dwzhf" podStartSLOduration=2.738590755 podStartE2EDuration="4.260739209s" podCreationTimestamp="2026-01-22 07:19:32 +0000 UTC" firstStartedPulling="2026-01-22 07:19:34.196753173 +0000 UTC m=+5622.033878526" lastFinishedPulling="2026-01-22 07:19:35.718901627 +0000 UTC m=+5623.556026980" observedRunningTime="2026-01-22 07:19:36.25589627 +0000 UTC m=+5624.093021633" watchObservedRunningTime="2026-01-22 07:19:36.260739209 +0000 UTC m=+5624.097864562" Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.373790 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.460018 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8lz5n\" (UniqueName: \"kubernetes.io/projected/f438afbc-73ea-44fc-856c-faf0ae11e192-kube-api-access-8lz5n\") pod \"f438afbc-73ea-44fc-856c-faf0ae11e192\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.460248 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-ovsdbserver-nb\") pod \"f438afbc-73ea-44fc-856c-faf0ae11e192\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.460284 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-config\") pod \"f438afbc-73ea-44fc-856c-faf0ae11e192\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.460328 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-dns-svc\") pod \"f438afbc-73ea-44fc-856c-faf0ae11e192\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.461066 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-ovsdbserver-sb\") pod \"f438afbc-73ea-44fc-856c-faf0ae11e192\" (UID: \"f438afbc-73ea-44fc-856c-faf0ae11e192\") " Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.467007 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f438afbc-73ea-44fc-856c-faf0ae11e192-kube-api-access-8lz5n" (OuterVolumeSpecName: "kube-api-access-8lz5n") pod "f438afbc-73ea-44fc-856c-faf0ae11e192" (UID: "f438afbc-73ea-44fc-856c-faf0ae11e192"). InnerVolumeSpecName "kube-api-access-8lz5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.507065 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f438afbc-73ea-44fc-856c-faf0ae11e192" (UID: "f438afbc-73ea-44fc-856c-faf0ae11e192"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.512092 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-config" (OuterVolumeSpecName: "config") pod "f438afbc-73ea-44fc-856c-faf0ae11e192" (UID: "f438afbc-73ea-44fc-856c-faf0ae11e192"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.518461 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f438afbc-73ea-44fc-856c-faf0ae11e192" (UID: "f438afbc-73ea-44fc-856c-faf0ae11e192"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.535928 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f438afbc-73ea-44fc-856c-faf0ae11e192" (UID: "f438afbc-73ea-44fc-856c-faf0ae11e192"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.562709 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.562741 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.562751 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.562760 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f438afbc-73ea-44fc-856c-faf0ae11e192-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:36 crc kubenswrapper[4933]: I0122 07:19:36.562769 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8lz5n\" (UniqueName: \"kubernetes.io/projected/f438afbc-73ea-44fc-856c-faf0ae11e192-kube-api-access-8lz5n\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:37 crc kubenswrapper[4933]: I0122 07:19:37.245747 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8486788887-z5njj" Jan 22 07:19:37 crc kubenswrapper[4933]: I0122 07:19:37.245753 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8486788887-z5njj" event={"ID":"f438afbc-73ea-44fc-856c-faf0ae11e192","Type":"ContainerDied","Data":"923227d0c4fa5ffe78917f885161d24cf6d4750f0f6267cf9c12b5e2bc2c096d"} Jan 22 07:19:37 crc kubenswrapper[4933]: I0122 07:19:37.246232 4933 scope.go:117] "RemoveContainer" containerID="3f5c96235c4a2933af26512896b90574e1936ab976c41c7652a2226bcf9daf35" Jan 22 07:19:37 crc kubenswrapper[4933]: I0122 07:19:37.284698 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8486788887-z5njj"] Jan 22 07:19:37 crc kubenswrapper[4933]: I0122 07:19:37.285636 4933 scope.go:117] "RemoveContainer" containerID="87eb03b0b7f322a1c2108b351a99636e927634210653b05810f1d11ead559cc5" Jan 22 07:19:37 crc kubenswrapper[4933]: I0122 07:19:37.294285 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8486788887-z5njj"] Jan 22 07:19:37 crc kubenswrapper[4933]: I0122 07:19:37.797316 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 22 07:19:38 crc kubenswrapper[4933]: I0122 07:19:38.514031 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f438afbc-73ea-44fc-856c-faf0ae11e192" path="/var/lib/kubelet/pods/f438afbc-73ea-44fc-856c-faf0ae11e192/volumes" Jan 22 07:19:42 crc kubenswrapper[4933]: I0122 07:19:42.371986 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dwzhf" Jan 22 07:19:42 crc kubenswrapper[4933]: I0122 07:19:42.372381 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dwzhf" Jan 22 07:19:42 crc kubenswrapper[4933]: I0122 07:19:42.417898 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dwzhf" Jan 22 07:19:43 crc kubenswrapper[4933]: I0122 07:19:43.357103 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dwzhf" Jan 22 07:19:43 crc kubenswrapper[4933]: I0122 07:19:43.419252 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwzhf"] Jan 22 07:19:45 crc kubenswrapper[4933]: I0122 07:19:45.313397 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dwzhf" podUID="23fb5c4e-7288-4d2e-b4bc-881936d93e4e" containerName="registry-server" containerID="cri-o://2a274f33b29843d2ba351c2ba1db8ae013cd71027b6ed59135ca5a3a5def9943" gracePeriod=2 Jan 22 07:19:46 crc kubenswrapper[4933]: I0122 07:19:46.328827 4933 generic.go:334] "Generic (PLEG): container finished" podID="23fb5c4e-7288-4d2e-b4bc-881936d93e4e" containerID="2a274f33b29843d2ba351c2ba1db8ae013cd71027b6ed59135ca5a3a5def9943" exitCode=0 Jan 22 07:19:46 crc kubenswrapper[4933]: I0122 07:19:46.328875 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwzhf" event={"ID":"23fb5c4e-7288-4d2e-b4bc-881936d93e4e","Type":"ContainerDied","Data":"2a274f33b29843d2ba351c2ba1db8ae013cd71027b6ed59135ca5a3a5def9943"} Jan 22 07:19:46 crc kubenswrapper[4933]: I0122 07:19:46.882143 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dwzhf" Jan 22 07:19:46 crc kubenswrapper[4933]: I0122 07:19:46.960699 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23fb5c4e-7288-4d2e-b4bc-881936d93e4e-utilities\") pod \"23fb5c4e-7288-4d2e-b4bc-881936d93e4e\" (UID: \"23fb5c4e-7288-4d2e-b4bc-881936d93e4e\") " Jan 22 07:19:46 crc kubenswrapper[4933]: I0122 07:19:46.960756 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23fb5c4e-7288-4d2e-b4bc-881936d93e4e-catalog-content\") pod \"23fb5c4e-7288-4d2e-b4bc-881936d93e4e\" (UID: \"23fb5c4e-7288-4d2e-b4bc-881936d93e4e\") " Jan 22 07:19:46 crc kubenswrapper[4933]: I0122 07:19:46.960874 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrbcd\" (UniqueName: \"kubernetes.io/projected/23fb5c4e-7288-4d2e-b4bc-881936d93e4e-kube-api-access-xrbcd\") pod \"23fb5c4e-7288-4d2e-b4bc-881936d93e4e\" (UID: \"23fb5c4e-7288-4d2e-b4bc-881936d93e4e\") " Jan 22 07:19:46 crc kubenswrapper[4933]: I0122 07:19:46.962039 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23fb5c4e-7288-4d2e-b4bc-881936d93e4e-utilities" (OuterVolumeSpecName: "utilities") pod "23fb5c4e-7288-4d2e-b4bc-881936d93e4e" (UID: "23fb5c4e-7288-4d2e-b4bc-881936d93e4e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:19:46 crc kubenswrapper[4933]: I0122 07:19:46.969381 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23fb5c4e-7288-4d2e-b4bc-881936d93e4e-kube-api-access-xrbcd" (OuterVolumeSpecName: "kube-api-access-xrbcd") pod "23fb5c4e-7288-4d2e-b4bc-881936d93e4e" (UID: "23fb5c4e-7288-4d2e-b4bc-881936d93e4e"). InnerVolumeSpecName "kube-api-access-xrbcd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:19:46 crc kubenswrapper[4933]: I0122 07:19:46.985189 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23fb5c4e-7288-4d2e-b4bc-881936d93e4e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "23fb5c4e-7288-4d2e-b4bc-881936d93e4e" (UID: "23fb5c4e-7288-4d2e-b4bc-881936d93e4e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:19:47 crc kubenswrapper[4933]: I0122 07:19:47.062857 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23fb5c4e-7288-4d2e-b4bc-881936d93e4e-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:47 crc kubenswrapper[4933]: I0122 07:19:47.062899 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23fb5c4e-7288-4d2e-b4bc-881936d93e4e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:47 crc kubenswrapper[4933]: I0122 07:19:47.062913 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrbcd\" (UniqueName: \"kubernetes.io/projected/23fb5c4e-7288-4d2e-b4bc-881936d93e4e-kube-api-access-xrbcd\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:47 crc kubenswrapper[4933]: I0122 07:19:47.340775 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwzhf" event={"ID":"23fb5c4e-7288-4d2e-b4bc-881936d93e4e","Type":"ContainerDied","Data":"3aa2cf376fa98729abb0e8529647bc3ae999e8e48e9b6be1eef430c293bde94a"} Jan 22 07:19:47 crc kubenswrapper[4933]: I0122 07:19:47.340832 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dwzhf" Jan 22 07:19:47 crc kubenswrapper[4933]: I0122 07:19:47.341105 4933 scope.go:117] "RemoveContainer" containerID="2a274f33b29843d2ba351c2ba1db8ae013cd71027b6ed59135ca5a3a5def9943" Jan 22 07:19:47 crc kubenswrapper[4933]: I0122 07:19:47.361377 4933 scope.go:117] "RemoveContainer" containerID="6c089dea6b50c60d000db8b006afc6f3f99ac8bf8a587152aa6df6678e8ef2b7" Jan 22 07:19:47 crc kubenswrapper[4933]: I0122 07:19:47.393548 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwzhf"] Jan 22 07:19:47 crc kubenswrapper[4933]: I0122 07:19:47.402775 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwzhf"] Jan 22 07:19:47 crc kubenswrapper[4933]: I0122 07:19:47.404769 4933 scope.go:117] "RemoveContainer" containerID="e4570f6a21b60d1634006fad467127b5b2cfd96ac3b8c6b2ddb6a81b01936d59" Jan 22 07:19:48 crc kubenswrapper[4933]: I0122 07:19:48.504966 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23fb5c4e-7288-4d2e-b4bc-881936d93e4e" path="/var/lib/kubelet/pods/23fb5c4e-7288-4d2e-b4bc-881936d93e4e/volumes" Jan 22 07:19:51 crc kubenswrapper[4933]: E0122 07:19:51.273185 4933 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.163:59384->38.102.83.163:45627: write tcp 38.102.83.163:59384->38.102.83.163:45627: write: broken pipe Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.461395 4933 generic.go:334] "Generic (PLEG): container finished" podID="a17c6b58-5756-49ae-86a9-1f7919f137af" containerID="e96346b582b2e428b34ec3736f0b34fc1b16d281ecf3dc7a79949a4e0f4005df" exitCode=137 Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.461504 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a17c6b58-5756-49ae-86a9-1f7919f137af","Type":"ContainerDied","Data":"e96346b582b2e428b34ec3736f0b34fc1b16d281ecf3dc7a79949a4e0f4005df"} Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.641189 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.776611 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xdb5n\" (UniqueName: \"kubernetes.io/projected/a17c6b58-5756-49ae-86a9-1f7919f137af-kube-api-access-xdb5n\") pod \"a17c6b58-5756-49ae-86a9-1f7919f137af\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.776758 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a17c6b58-5756-49ae-86a9-1f7919f137af-logs\") pod \"a17c6b58-5756-49ae-86a9-1f7919f137af\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.776793 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-config-data\") pod \"a17c6b58-5756-49ae-86a9-1f7919f137af\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.776813 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-config-data-custom\") pod \"a17c6b58-5756-49ae-86a9-1f7919f137af\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.776852 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-scripts\") pod \"a17c6b58-5756-49ae-86a9-1f7919f137af\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.776887 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a17c6b58-5756-49ae-86a9-1f7919f137af-etc-machine-id\") pod \"a17c6b58-5756-49ae-86a9-1f7919f137af\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.777020 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-combined-ca-bundle\") pod \"a17c6b58-5756-49ae-86a9-1f7919f137af\" (UID: \"a17c6b58-5756-49ae-86a9-1f7919f137af\") " Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.779326 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a17c6b58-5756-49ae-86a9-1f7919f137af-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "a17c6b58-5756-49ae-86a9-1f7919f137af" (UID: "a17c6b58-5756-49ae-86a9-1f7919f137af"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.780147 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a17c6b58-5756-49ae-86a9-1f7919f137af-logs" (OuterVolumeSpecName: "logs") pod "a17c6b58-5756-49ae-86a9-1f7919f137af" (UID: "a17c6b58-5756-49ae-86a9-1f7919f137af"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.783105 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-scripts" (OuterVolumeSpecName: "scripts") pod "a17c6b58-5756-49ae-86a9-1f7919f137af" (UID: "a17c6b58-5756-49ae-86a9-1f7919f137af"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.783172 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a17c6b58-5756-49ae-86a9-1f7919f137af" (UID: "a17c6b58-5756-49ae-86a9-1f7919f137af"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.789765 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a17c6b58-5756-49ae-86a9-1f7919f137af-kube-api-access-xdb5n" (OuterVolumeSpecName: "kube-api-access-xdb5n") pod "a17c6b58-5756-49ae-86a9-1f7919f137af" (UID: "a17c6b58-5756-49ae-86a9-1f7919f137af"). InnerVolumeSpecName "kube-api-access-xdb5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.812208 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a17c6b58-5756-49ae-86a9-1f7919f137af" (UID: "a17c6b58-5756-49ae-86a9-1f7919f137af"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.829364 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-config-data" (OuterVolumeSpecName: "config-data") pod "a17c6b58-5756-49ae-86a9-1f7919f137af" (UID: "a17c6b58-5756-49ae-86a9-1f7919f137af"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.881456 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.881493 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xdb5n\" (UniqueName: \"kubernetes.io/projected/a17c6b58-5756-49ae-86a9-1f7919f137af-kube-api-access-xdb5n\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.881543 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a17c6b58-5756-49ae-86a9-1f7919f137af-logs\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.881597 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.881607 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.881616 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a17c6b58-5756-49ae-86a9-1f7919f137af-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:58 crc kubenswrapper[4933]: I0122 07:19:58.881648 4933 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a17c6b58-5756-49ae-86a9-1f7919f137af-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.474115 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a17c6b58-5756-49ae-86a9-1f7919f137af","Type":"ContainerDied","Data":"700f1660d6906255306754c2a4d67510d7e45e4d51e1d881c16cd20604df34da"} Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.474196 4933 scope.go:117] "RemoveContainer" containerID="e96346b582b2e428b34ec3736f0b34fc1b16d281ecf3dc7a79949a4e0f4005df" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.474272 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.506954 4933 scope.go:117] "RemoveContainer" containerID="0b8bae7746dc410417356de0e3186e5c26f14477857826f2de4078f871b6547f" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.533031 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.551594 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.562201 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 22 07:19:59 crc kubenswrapper[4933]: E0122 07:19:59.562547 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23fb5c4e-7288-4d2e-b4bc-881936d93e4e" containerName="registry-server" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.562565 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="23fb5c4e-7288-4d2e-b4bc-881936d93e4e" containerName="registry-server" Jan 22 07:19:59 crc kubenswrapper[4933]: E0122 07:19:59.562576 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f438afbc-73ea-44fc-856c-faf0ae11e192" containerName="dnsmasq-dns" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.562614 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f438afbc-73ea-44fc-856c-faf0ae11e192" containerName="dnsmasq-dns" Jan 22 07:19:59 crc kubenswrapper[4933]: E0122 07:19:59.562624 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23fb5c4e-7288-4d2e-b4bc-881936d93e4e" containerName="extract-content" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.562630 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="23fb5c4e-7288-4d2e-b4bc-881936d93e4e" containerName="extract-content" Jan 22 07:19:59 crc kubenswrapper[4933]: E0122 07:19:59.562644 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f438afbc-73ea-44fc-856c-faf0ae11e192" containerName="init" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.562650 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f438afbc-73ea-44fc-856c-faf0ae11e192" containerName="init" Jan 22 07:19:59 crc kubenswrapper[4933]: E0122 07:19:59.562669 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a17c6b58-5756-49ae-86a9-1f7919f137af" containerName="cinder-api-log" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.562676 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a17c6b58-5756-49ae-86a9-1f7919f137af" containerName="cinder-api-log" Jan 22 07:19:59 crc kubenswrapper[4933]: E0122 07:19:59.562683 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23fb5c4e-7288-4d2e-b4bc-881936d93e4e" containerName="extract-utilities" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.562689 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="23fb5c4e-7288-4d2e-b4bc-881936d93e4e" containerName="extract-utilities" Jan 22 07:19:59 crc kubenswrapper[4933]: E0122 07:19:59.562708 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a17c6b58-5756-49ae-86a9-1f7919f137af" containerName="cinder-api" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.562714 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a17c6b58-5756-49ae-86a9-1f7919f137af" containerName="cinder-api" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.562854 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a17c6b58-5756-49ae-86a9-1f7919f137af" containerName="cinder-api-log" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.562868 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a17c6b58-5756-49ae-86a9-1f7919f137af" containerName="cinder-api" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.562895 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f438afbc-73ea-44fc-856c-faf0ae11e192" containerName="dnsmasq-dns" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.562908 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="23fb5c4e-7288-4d2e-b4bc-881936d93e4e" containerName="registry-server" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.563934 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.567579 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.567596 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.567633 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.567991 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.568015 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-4h4bl" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.568064 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.576211 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.693488 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qw8wg\" (UniqueName: \"kubernetes.io/projected/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-kube-api-access-qw8wg\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.693559 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.693586 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-config-data\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.693629 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-config-data-custom\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.693661 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.693703 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-etc-machine-id\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.693722 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-scripts\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.693826 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-public-tls-certs\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.693871 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-logs\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.795999 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-etc-machine-id\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.796053 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-scripts\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.796140 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-etc-machine-id\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.796150 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-public-tls-certs\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.796214 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-logs\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.796274 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qw8wg\" (UniqueName: \"kubernetes.io/projected/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-kube-api-access-qw8wg\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.796335 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.796366 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-config-data\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.796395 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-config-data-custom\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.796415 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.796929 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-logs\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.801981 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.801982 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.803099 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-config-data-custom\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.809108 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-config-data\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.813656 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-scripts\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.814687 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-public-tls-certs\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.815985 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qw8wg\" (UniqueName: \"kubernetes.io/projected/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-kube-api-access-qw8wg\") pod \"cinder-api-0\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " pod="openstack/cinder-api-0" Jan 22 07:19:59 crc kubenswrapper[4933]: I0122 07:19:59.900991 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 07:20:00 crc kubenswrapper[4933]: I0122 07:20:00.323906 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 22 07:20:00 crc kubenswrapper[4933]: W0122 07:20:00.328581 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf93fee5_3dbb_42ac_bf69_8cc6270fd6b1.slice/crio-fa4e086e3a87f171f6e5c4324f4dcbbe408c3744d5474ee736619f168a8f7f7e WatchSource:0}: Error finding container fa4e086e3a87f171f6e5c4324f4dcbbe408c3744d5474ee736619f168a8f7f7e: Status 404 returned error can't find the container with id fa4e086e3a87f171f6e5c4324f4dcbbe408c3744d5474ee736619f168a8f7f7e Jan 22 07:20:00 crc kubenswrapper[4933]: I0122 07:20:00.485086 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1","Type":"ContainerStarted","Data":"fa4e086e3a87f171f6e5c4324f4dcbbe408c3744d5474ee736619f168a8f7f7e"} Jan 22 07:20:00 crc kubenswrapper[4933]: I0122 07:20:00.501035 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a17c6b58-5756-49ae-86a9-1f7919f137af" path="/var/lib/kubelet/pods/a17c6b58-5756-49ae-86a9-1f7919f137af/volumes" Jan 22 07:20:01 crc kubenswrapper[4933]: I0122 07:20:01.504203 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1","Type":"ContainerStarted","Data":"d910aee000113c72735e01a4cc84d174be3c925f57ab25bcc1a9e496447a43d2"} Jan 22 07:20:02 crc kubenswrapper[4933]: I0122 07:20:02.521424 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1","Type":"ContainerStarted","Data":"6ab5b195b2a79d5aca3da2b89b5b634f51225b92b9594ce993107c2ee54625e2"} Jan 22 07:20:02 crc kubenswrapper[4933]: I0122 07:20:02.521796 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 22 07:20:02 crc kubenswrapper[4933]: I0122 07:20:02.544276 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.544252775 podStartE2EDuration="3.544252775s" podCreationTimestamp="2026-01-22 07:19:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:20:02.540658027 +0000 UTC m=+5650.377783410" watchObservedRunningTime="2026-01-22 07:20:02.544252775 +0000 UTC m=+5650.381378138" Jan 22 07:20:11 crc kubenswrapper[4933]: I0122 07:20:11.683399 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.601430 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.603763 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.605468 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.615726 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.701430 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.701510 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sld9h\" (UniqueName: \"kubernetes.io/projected/1290d724-64e5-4bb7-890d-971660668122-kube-api-access-sld9h\") pod \"cinder-scheduler-0\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.701681 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-scripts\") pod \"cinder-scheduler-0\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.701995 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-config-data\") pod \"cinder-scheduler-0\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.702188 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.702219 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1290d724-64e5-4bb7-890d-971660668122-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.803780 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1290d724-64e5-4bb7-890d-971660668122-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.803846 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.803936 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.804025 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sld9h\" (UniqueName: \"kubernetes.io/projected/1290d724-64e5-4bb7-890d-971660668122-kube-api-access-sld9h\") pod \"cinder-scheduler-0\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.804126 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-scripts\") pod \"cinder-scheduler-0\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.803927 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1290d724-64e5-4bb7-890d-971660668122-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.804237 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-config-data\") pod \"cinder-scheduler-0\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.811947 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-config-data\") pod \"cinder-scheduler-0\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.812228 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-scripts\") pod \"cinder-scheduler-0\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.813307 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.821819 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.829411 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sld9h\" (UniqueName: \"kubernetes.io/projected/1290d724-64e5-4bb7-890d-971660668122-kube-api-access-sld9h\") pod \"cinder-scheduler-0\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:30 crc kubenswrapper[4933]: I0122 07:20:30.921162 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 07:20:31 crc kubenswrapper[4933]: I0122 07:20:31.353188 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 07:20:31 crc kubenswrapper[4933]: I0122 07:20:31.780290 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1290d724-64e5-4bb7-890d-971660668122","Type":"ContainerStarted","Data":"1aee764575b79262c93ed8cd1d9c58b98bbb54373bb7857b5ad63ed5b20dd40d"} Jan 22 07:20:31 crc kubenswrapper[4933]: I0122 07:20:31.842060 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 22 07:20:31 crc kubenswrapper[4933]: I0122 07:20:31.842319 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" containerName="cinder-api-log" containerID="cri-o://d910aee000113c72735e01a4cc84d174be3c925f57ab25bcc1a9e496447a43d2" gracePeriod=30 Jan 22 07:20:31 crc kubenswrapper[4933]: I0122 07:20:31.842381 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" containerName="cinder-api" containerID="cri-o://6ab5b195b2a79d5aca3da2b89b5b634f51225b92b9594ce993107c2ee54625e2" gracePeriod=30 Jan 22 07:20:32 crc kubenswrapper[4933]: I0122 07:20:32.796345 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1290d724-64e5-4bb7-890d-971660668122","Type":"ContainerStarted","Data":"b56655f873282b23738867afd098237604c256a3a88bf4c3bc29944cf97a023e"} Jan 22 07:20:32 crc kubenswrapper[4933]: I0122 07:20:32.796753 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1290d724-64e5-4bb7-890d-971660668122","Type":"ContainerStarted","Data":"359953db659ebac2d746a63c6061c237179febd7a9e4156a5dc1793df4f272c1"} Jan 22 07:20:32 crc kubenswrapper[4933]: I0122 07:20:32.831085 4933 generic.go:334] "Generic (PLEG): container finished" podID="df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" containerID="d910aee000113c72735e01a4cc84d174be3c925f57ab25bcc1a9e496447a43d2" exitCode=143 Jan 22 07:20:32 crc kubenswrapper[4933]: I0122 07:20:32.831145 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1","Type":"ContainerDied","Data":"d910aee000113c72735e01a4cc84d174be3c925f57ab25bcc1a9e496447a43d2"} Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.016597 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.1.53:8776/healthcheck\": read tcp 10.217.0.2:57940->10.217.1.53:8776: read: connection reset by peer" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.437806 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.461485 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=5.461462055 podStartE2EDuration="5.461462055s" podCreationTimestamp="2026-01-22 07:20:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:20:32.835388165 +0000 UTC m=+5680.672513518" watchObservedRunningTime="2026-01-22 07:20:35.461462055 +0000 UTC m=+5683.298587408" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.500356 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-public-tls-certs\") pod \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.500657 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-logs\") pod \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.500706 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-config-data-custom\") pod \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.500766 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-etc-machine-id\") pod \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.500800 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-scripts\") pod \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.500826 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-internal-tls-certs\") pod \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.500845 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-config-data\") pod \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.500885 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-combined-ca-bundle\") pod \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.500944 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qw8wg\" (UniqueName: \"kubernetes.io/projected/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-kube-api-access-qw8wg\") pod \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\" (UID: \"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1\") " Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.501973 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-logs" (OuterVolumeSpecName: "logs") pod "df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" (UID: "df93fee5-3dbb-42ac-bf69-8cc6270fd6b1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.505911 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" (UID: "df93fee5-3dbb-42ac-bf69-8cc6270fd6b1"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.506220 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" (UID: "df93fee5-3dbb-42ac-bf69-8cc6270fd6b1"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.509951 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-scripts" (OuterVolumeSpecName: "scripts") pod "df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" (UID: "df93fee5-3dbb-42ac-bf69-8cc6270fd6b1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.523046 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-kube-api-access-qw8wg" (OuterVolumeSpecName: "kube-api-access-qw8wg") pod "df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" (UID: "df93fee5-3dbb-42ac-bf69-8cc6270fd6b1"). InnerVolumeSpecName "kube-api-access-qw8wg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.540381 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" (UID: "df93fee5-3dbb-42ac-bf69-8cc6270fd6b1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.553895 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" (UID: "df93fee5-3dbb-42ac-bf69-8cc6270fd6b1"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.559835 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" (UID: "df93fee5-3dbb-42ac-bf69-8cc6270fd6b1"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.562290 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-config-data" (OuterVolumeSpecName: "config-data") pod "df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" (UID: "df93fee5-3dbb-42ac-bf69-8cc6270fd6b1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.603141 4933 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.603171 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.603181 4933 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.603189 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.603197 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.603206 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qw8wg\" (UniqueName: \"kubernetes.io/projected/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-kube-api-access-qw8wg\") on node \"crc\" DevicePath \"\"" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.603217 4933 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.603225 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-logs\") on node \"crc\" DevicePath \"\"" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.603232 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.861124 4933 generic.go:334] "Generic (PLEG): container finished" podID="df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" containerID="6ab5b195b2a79d5aca3da2b89b5b634f51225b92b9594ce993107c2ee54625e2" exitCode=0 Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.861176 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1","Type":"ContainerDied","Data":"6ab5b195b2a79d5aca3da2b89b5b634f51225b92b9594ce993107c2ee54625e2"} Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.861207 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"df93fee5-3dbb-42ac-bf69-8cc6270fd6b1","Type":"ContainerDied","Data":"fa4e086e3a87f171f6e5c4324f4dcbbe408c3744d5474ee736619f168a8f7f7e"} Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.861227 4933 scope.go:117] "RemoveContainer" containerID="6ab5b195b2a79d5aca3da2b89b5b634f51225b92b9594ce993107c2ee54625e2" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.861297 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.890732 4933 scope.go:117] "RemoveContainer" containerID="d910aee000113c72735e01a4cc84d174be3c925f57ab25bcc1a9e496447a43d2" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.912750 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.922657 4933 scope.go:117] "RemoveContainer" containerID="6ab5b195b2a79d5aca3da2b89b5b634f51225b92b9594ce993107c2ee54625e2" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.922809 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 22 07:20:35 crc kubenswrapper[4933]: E0122 07:20:35.923262 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ab5b195b2a79d5aca3da2b89b5b634f51225b92b9594ce993107c2ee54625e2\": container with ID starting with 6ab5b195b2a79d5aca3da2b89b5b634f51225b92b9594ce993107c2ee54625e2 not found: ID does not exist" containerID="6ab5b195b2a79d5aca3da2b89b5b634f51225b92b9594ce993107c2ee54625e2" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.923334 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ab5b195b2a79d5aca3da2b89b5b634f51225b92b9594ce993107c2ee54625e2"} err="failed to get container status \"6ab5b195b2a79d5aca3da2b89b5b634f51225b92b9594ce993107c2ee54625e2\": rpc error: code = NotFound desc = could not find container \"6ab5b195b2a79d5aca3da2b89b5b634f51225b92b9594ce993107c2ee54625e2\": container with ID starting with 6ab5b195b2a79d5aca3da2b89b5b634f51225b92b9594ce993107c2ee54625e2 not found: ID does not exist" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.923375 4933 scope.go:117] "RemoveContainer" containerID="d910aee000113c72735e01a4cc84d174be3c925f57ab25bcc1a9e496447a43d2" Jan 22 07:20:35 crc kubenswrapper[4933]: E0122 07:20:35.923842 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d910aee000113c72735e01a4cc84d174be3c925f57ab25bcc1a9e496447a43d2\": container with ID starting with d910aee000113c72735e01a4cc84d174be3c925f57ab25bcc1a9e496447a43d2 not found: ID does not exist" containerID="d910aee000113c72735e01a4cc84d174be3c925f57ab25bcc1a9e496447a43d2" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.923882 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d910aee000113c72735e01a4cc84d174be3c925f57ab25bcc1a9e496447a43d2"} err="failed to get container status \"d910aee000113c72735e01a4cc84d174be3c925f57ab25bcc1a9e496447a43d2\": rpc error: code = NotFound desc = could not find container \"d910aee000113c72735e01a4cc84d174be3c925f57ab25bcc1a9e496447a43d2\": container with ID starting with d910aee000113c72735e01a4cc84d174be3c925f57ab25bcc1a9e496447a43d2 not found: ID does not exist" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.926841 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.943619 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 22 07:20:35 crc kubenswrapper[4933]: E0122 07:20:35.944037 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" containerName="cinder-api" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.944058 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" containerName="cinder-api" Jan 22 07:20:35 crc kubenswrapper[4933]: E0122 07:20:35.944105 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" containerName="cinder-api-log" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.944114 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" containerName="cinder-api-log" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.944352 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" containerName="cinder-api-log" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.944375 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" containerName="cinder-api" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.945431 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.950171 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.950324 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.950404 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 22 07:20:35 crc kubenswrapper[4933]: I0122 07:20:35.953589 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.009756 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-logs\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.009800 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.009823 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.009908 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-config-data-custom\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.009966 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6phgf\" (UniqueName: \"kubernetes.io/projected/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-kube-api-access-6phgf\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.009990 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-public-tls-certs\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.010025 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-scripts\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.010044 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-config-data\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.010174 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-etc-machine-id\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.111465 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6phgf\" (UniqueName: \"kubernetes.io/projected/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-kube-api-access-6phgf\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.111513 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-public-tls-certs\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.111544 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-scripts\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.111566 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-config-data\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.111618 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-etc-machine-id\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.111689 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-logs\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.111714 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.111734 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.111775 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-etc-machine-id\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.111793 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-config-data-custom\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.112592 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-logs\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.116897 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-public-tls-certs\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.118187 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.118420 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-config-data-custom\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.118500 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-scripts\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.121197 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.121180 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-config-data\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.134758 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6phgf\" (UniqueName: \"kubernetes.io/projected/b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca-kube-api-access-6phgf\") pod \"cinder-api-0\" (UID: \"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca\") " pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.274672 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.501872 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df93fee5-3dbb-42ac-bf69-8cc6270fd6b1" path="/var/lib/kubelet/pods/df93fee5-3dbb-42ac-bf69-8cc6270fd6b1/volumes" Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.540252 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 22 07:20:36 crc kubenswrapper[4933]: W0122 07:20:36.544771 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb3d8f772_24e9_4f04_ba84_6f8f1c36d9ca.slice/crio-76bf6e2f2bb6cadd05a15f15c88d250bd48e59084a7169de64aa5fe77a5d3b39 WatchSource:0}: Error finding container 76bf6e2f2bb6cadd05a15f15c88d250bd48e59084a7169de64aa5fe77a5d3b39: Status 404 returned error can't find the container with id 76bf6e2f2bb6cadd05a15f15c88d250bd48e59084a7169de64aa5fe77a5d3b39 Jan 22 07:20:36 crc kubenswrapper[4933]: I0122 07:20:36.871210 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca","Type":"ContainerStarted","Data":"76bf6e2f2bb6cadd05a15f15c88d250bd48e59084a7169de64aa5fe77a5d3b39"} Jan 22 07:20:37 crc kubenswrapper[4933]: I0122 07:20:37.883504 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca","Type":"ContainerStarted","Data":"1a62de58351884f37496485a4a56a13eb3cfce778ea722447df7644fcffb532d"} Jan 22 07:20:37 crc kubenswrapper[4933]: I0122 07:20:37.883849 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 22 07:20:37 crc kubenswrapper[4933]: I0122 07:20:37.883863 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca","Type":"ContainerStarted","Data":"e11606f042565fe31564a9b1623d55efc6faf5ce79be59484522593c65b8135a"} Jan 22 07:20:37 crc kubenswrapper[4933]: I0122 07:20:37.934139 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=2.9341160950000003 podStartE2EDuration="2.934116095s" podCreationTimestamp="2026-01-22 07:20:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:20:37.926993221 +0000 UTC m=+5685.764118594" watchObservedRunningTime="2026-01-22 07:20:37.934116095 +0000 UTC m=+5685.771241448" Jan 22 07:20:41 crc kubenswrapper[4933]: I0122 07:20:41.143768 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 22 07:20:41 crc kubenswrapper[4933]: I0122 07:20:41.200302 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 07:20:41 crc kubenswrapper[4933]: I0122 07:20:41.919203 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="1290d724-64e5-4bb7-890d-971660668122" containerName="cinder-scheduler" containerID="cri-o://359953db659ebac2d746a63c6061c237179febd7a9e4156a5dc1793df4f272c1" gracePeriod=30 Jan 22 07:20:41 crc kubenswrapper[4933]: I0122 07:20:41.919297 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="1290d724-64e5-4bb7-890d-971660668122" containerName="probe" containerID="cri-o://b56655f873282b23738867afd098237604c256a3a88bf4c3bc29944cf97a023e" gracePeriod=30 Jan 22 07:20:42 crc kubenswrapper[4933]: I0122 07:20:42.930595 4933 generic.go:334] "Generic (PLEG): container finished" podID="1290d724-64e5-4bb7-890d-971660668122" containerID="b56655f873282b23738867afd098237604c256a3a88bf4c3bc29944cf97a023e" exitCode=0 Jan 22 07:20:42 crc kubenswrapper[4933]: I0122 07:20:42.930639 4933 generic.go:334] "Generic (PLEG): container finished" podID="1290d724-64e5-4bb7-890d-971660668122" containerID="359953db659ebac2d746a63c6061c237179febd7a9e4156a5dc1793df4f272c1" exitCode=0 Jan 22 07:20:42 crc kubenswrapper[4933]: I0122 07:20:42.930665 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1290d724-64e5-4bb7-890d-971660668122","Type":"ContainerDied","Data":"b56655f873282b23738867afd098237604c256a3a88bf4c3bc29944cf97a023e"} Jan 22 07:20:42 crc kubenswrapper[4933]: I0122 07:20:42.930691 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1290d724-64e5-4bb7-890d-971660668122","Type":"ContainerDied","Data":"359953db659ebac2d746a63c6061c237179febd7a9e4156a5dc1793df4f272c1"} Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.140598 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.244288 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-combined-ca-bundle\") pod \"1290d724-64e5-4bb7-890d-971660668122\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.244352 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1290d724-64e5-4bb7-890d-971660668122-etc-machine-id\") pod \"1290d724-64e5-4bb7-890d-971660668122\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.244429 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-config-data\") pod \"1290d724-64e5-4bb7-890d-971660668122\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.244478 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sld9h\" (UniqueName: \"kubernetes.io/projected/1290d724-64e5-4bb7-890d-971660668122-kube-api-access-sld9h\") pod \"1290d724-64e5-4bb7-890d-971660668122\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.244553 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-config-data-custom\") pod \"1290d724-64e5-4bb7-890d-971660668122\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.244573 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-scripts\") pod \"1290d724-64e5-4bb7-890d-971660668122\" (UID: \"1290d724-64e5-4bb7-890d-971660668122\") " Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.244921 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1290d724-64e5-4bb7-890d-971660668122-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "1290d724-64e5-4bb7-890d-971660668122" (UID: "1290d724-64e5-4bb7-890d-971660668122"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.245704 4933 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1290d724-64e5-4bb7-890d-971660668122-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.249524 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1290d724-64e5-4bb7-890d-971660668122" (UID: "1290d724-64e5-4bb7-890d-971660668122"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.249568 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1290d724-64e5-4bb7-890d-971660668122-kube-api-access-sld9h" (OuterVolumeSpecName: "kube-api-access-sld9h") pod "1290d724-64e5-4bb7-890d-971660668122" (UID: "1290d724-64e5-4bb7-890d-971660668122"). InnerVolumeSpecName "kube-api-access-sld9h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.249949 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-scripts" (OuterVolumeSpecName: "scripts") pod "1290d724-64e5-4bb7-890d-971660668122" (UID: "1290d724-64e5-4bb7-890d-971660668122"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.293717 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1290d724-64e5-4bb7-890d-971660668122" (UID: "1290d724-64e5-4bb7-890d-971660668122"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.347311 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.347349 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sld9h\" (UniqueName: \"kubernetes.io/projected/1290d724-64e5-4bb7-890d-971660668122-kube-api-access-sld9h\") on node \"crc\" DevicePath \"\"" Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.347362 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.347373 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.364987 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-config-data" (OuterVolumeSpecName: "config-data") pod "1290d724-64e5-4bb7-890d-971660668122" (UID: "1290d724-64e5-4bb7-890d-971660668122"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.449260 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1290d724-64e5-4bb7-890d-971660668122-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.953136 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1290d724-64e5-4bb7-890d-971660668122","Type":"ContainerDied","Data":"1aee764575b79262c93ed8cd1d9c58b98bbb54373bb7857b5ad63ed5b20dd40d"} Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.953199 4933 scope.go:117] "RemoveContainer" containerID="b56655f873282b23738867afd098237604c256a3a88bf4c3bc29944cf97a023e" Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.953346 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 07:20:43 crc kubenswrapper[4933]: I0122 07:20:43.978417 4933 scope.go:117] "RemoveContainer" containerID="359953db659ebac2d746a63c6061c237179febd7a9e4156a5dc1793df4f272c1" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.001325 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.019498 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.037434 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 07:20:44 crc kubenswrapper[4933]: E0122 07:20:44.037896 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1290d724-64e5-4bb7-890d-971660668122" containerName="probe" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.037921 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="1290d724-64e5-4bb7-890d-971660668122" containerName="probe" Jan 22 07:20:44 crc kubenswrapper[4933]: E0122 07:20:44.037942 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1290d724-64e5-4bb7-890d-971660668122" containerName="cinder-scheduler" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.037952 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="1290d724-64e5-4bb7-890d-971660668122" containerName="cinder-scheduler" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.038205 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="1290d724-64e5-4bb7-890d-971660668122" containerName="probe" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.038231 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="1290d724-64e5-4bb7-890d-971660668122" containerName="cinder-scheduler" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.039463 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.044192 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.047463 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.160959 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-98kvh\" (UniqueName: \"kubernetes.io/projected/716d4067-62fa-47c4-9874-078e19672c93-kube-api-access-98kvh\") pod \"cinder-scheduler-0\" (UID: \"716d4067-62fa-47c4-9874-078e19672c93\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.161138 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/716d4067-62fa-47c4-9874-078e19672c93-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"716d4067-62fa-47c4-9874-078e19672c93\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.161287 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/716d4067-62fa-47c4-9874-078e19672c93-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"716d4067-62fa-47c4-9874-078e19672c93\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.161337 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/716d4067-62fa-47c4-9874-078e19672c93-config-data\") pod \"cinder-scheduler-0\" (UID: \"716d4067-62fa-47c4-9874-078e19672c93\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.161420 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/716d4067-62fa-47c4-9874-078e19672c93-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"716d4067-62fa-47c4-9874-078e19672c93\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.161480 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/716d4067-62fa-47c4-9874-078e19672c93-scripts\") pod \"cinder-scheduler-0\" (UID: \"716d4067-62fa-47c4-9874-078e19672c93\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.262828 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/716d4067-62fa-47c4-9874-078e19672c93-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"716d4067-62fa-47c4-9874-078e19672c93\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.262886 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/716d4067-62fa-47c4-9874-078e19672c93-config-data\") pod \"cinder-scheduler-0\" (UID: \"716d4067-62fa-47c4-9874-078e19672c93\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.262943 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/716d4067-62fa-47c4-9874-078e19672c93-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"716d4067-62fa-47c4-9874-078e19672c93\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.262972 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/716d4067-62fa-47c4-9874-078e19672c93-scripts\") pod \"cinder-scheduler-0\" (UID: \"716d4067-62fa-47c4-9874-078e19672c93\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.262994 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/716d4067-62fa-47c4-9874-078e19672c93-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"716d4067-62fa-47c4-9874-078e19672c93\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.263004 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-98kvh\" (UniqueName: \"kubernetes.io/projected/716d4067-62fa-47c4-9874-078e19672c93-kube-api-access-98kvh\") pod \"cinder-scheduler-0\" (UID: \"716d4067-62fa-47c4-9874-078e19672c93\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.263195 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/716d4067-62fa-47c4-9874-078e19672c93-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"716d4067-62fa-47c4-9874-078e19672c93\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.268462 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/716d4067-62fa-47c4-9874-078e19672c93-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"716d4067-62fa-47c4-9874-078e19672c93\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.268572 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/716d4067-62fa-47c4-9874-078e19672c93-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"716d4067-62fa-47c4-9874-078e19672c93\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.268873 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/716d4067-62fa-47c4-9874-078e19672c93-config-data\") pod \"cinder-scheduler-0\" (UID: \"716d4067-62fa-47c4-9874-078e19672c93\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.268923 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/716d4067-62fa-47c4-9874-078e19672c93-scripts\") pod \"cinder-scheduler-0\" (UID: \"716d4067-62fa-47c4-9874-078e19672c93\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.280276 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-98kvh\" (UniqueName: \"kubernetes.io/projected/716d4067-62fa-47c4-9874-078e19672c93-kube-api-access-98kvh\") pod \"cinder-scheduler-0\" (UID: \"716d4067-62fa-47c4-9874-078e19672c93\") " pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.364882 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.500848 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1290d724-64e5-4bb7-890d-971660668122" path="/var/lib/kubelet/pods/1290d724-64e5-4bb7-890d-971660668122/volumes" Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.810017 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 07:20:44 crc kubenswrapper[4933]: I0122 07:20:44.971532 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"716d4067-62fa-47c4-9874-078e19672c93","Type":"ContainerStarted","Data":"c8df2c3758c4f3377a2ef681b9dcd306a0db076b3b9e43ef1484810a91af31c6"} Jan 22 07:20:45 crc kubenswrapper[4933]: I0122 07:20:45.983007 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"716d4067-62fa-47c4-9874-078e19672c93","Type":"ContainerStarted","Data":"17440ee79a46ac125842c6084c998324cbc330b31ce391593b189a5bdcb2c396"} Jan 22 07:20:46 crc kubenswrapper[4933]: I0122 07:20:46.994465 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"716d4067-62fa-47c4-9874-078e19672c93","Type":"ContainerStarted","Data":"fc2cd6a127df17ce201e7911bb2630de536e094cb19245ca87d931e9b3d5a7d6"} Jan 22 07:20:47 crc kubenswrapper[4933]: I0122 07:20:47.031507 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.031480129 podStartE2EDuration="4.031480129s" podCreationTimestamp="2026-01-22 07:20:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:20:47.024974021 +0000 UTC m=+5694.862099404" watchObservedRunningTime="2026-01-22 07:20:47.031480129 +0000 UTC m=+5694.868605512" Jan 22 07:20:48 crc kubenswrapper[4933]: I0122 07:20:48.080307 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 22 07:20:49 crc kubenswrapper[4933]: I0122 07:20:49.366260 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 22 07:20:54 crc kubenswrapper[4933]: I0122 07:20:54.570300 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.072399 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-gzkdg"] Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.073787 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-gzkdg" Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.090867 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-gzkdg"] Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.161101 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-np2vz\" (UniqueName: \"kubernetes.io/projected/9fa99816-1176-4475-ba6d-a910e9008aa1-kube-api-access-np2vz\") pod \"glance-db-create-gzkdg\" (UID: \"9fa99816-1176-4475-ba6d-a910e9008aa1\") " pod="openstack/glance-db-create-gzkdg" Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.161292 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9fa99816-1176-4475-ba6d-a910e9008aa1-operator-scripts\") pod \"glance-db-create-gzkdg\" (UID: \"9fa99816-1176-4475-ba6d-a910e9008aa1\") " pod="openstack/glance-db-create-gzkdg" Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.177830 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-4c14-account-create-update-jprrg"] Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.178868 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4c14-account-create-update-jprrg" Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.181381 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.193736 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-4c14-account-create-update-jprrg"] Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.262534 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s98b4\" (UniqueName: \"kubernetes.io/projected/a9a64653-d223-46b3-9c35-dd91036c310a-kube-api-access-s98b4\") pod \"glance-4c14-account-create-update-jprrg\" (UID: \"a9a64653-d223-46b3-9c35-dd91036c310a\") " pod="openstack/glance-4c14-account-create-update-jprrg" Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.262787 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9fa99816-1176-4475-ba6d-a910e9008aa1-operator-scripts\") pod \"glance-db-create-gzkdg\" (UID: \"9fa99816-1176-4475-ba6d-a910e9008aa1\") " pod="openstack/glance-db-create-gzkdg" Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.262852 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9a64653-d223-46b3-9c35-dd91036c310a-operator-scripts\") pod \"glance-4c14-account-create-update-jprrg\" (UID: \"a9a64653-d223-46b3-9c35-dd91036c310a\") " pod="openstack/glance-4c14-account-create-update-jprrg" Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.262976 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-np2vz\" (UniqueName: \"kubernetes.io/projected/9fa99816-1176-4475-ba6d-a910e9008aa1-kube-api-access-np2vz\") pod \"glance-db-create-gzkdg\" (UID: \"9fa99816-1176-4475-ba6d-a910e9008aa1\") " pod="openstack/glance-db-create-gzkdg" Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.263460 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9fa99816-1176-4475-ba6d-a910e9008aa1-operator-scripts\") pod \"glance-db-create-gzkdg\" (UID: \"9fa99816-1176-4475-ba6d-a910e9008aa1\") " pod="openstack/glance-db-create-gzkdg" Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.279943 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-np2vz\" (UniqueName: \"kubernetes.io/projected/9fa99816-1176-4475-ba6d-a910e9008aa1-kube-api-access-np2vz\") pod \"glance-db-create-gzkdg\" (UID: \"9fa99816-1176-4475-ba6d-a910e9008aa1\") " pod="openstack/glance-db-create-gzkdg" Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.364519 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9a64653-d223-46b3-9c35-dd91036c310a-operator-scripts\") pod \"glance-4c14-account-create-update-jprrg\" (UID: \"a9a64653-d223-46b3-9c35-dd91036c310a\") " pod="openstack/glance-4c14-account-create-update-jprrg" Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.364607 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s98b4\" (UniqueName: \"kubernetes.io/projected/a9a64653-d223-46b3-9c35-dd91036c310a-kube-api-access-s98b4\") pod \"glance-4c14-account-create-update-jprrg\" (UID: \"a9a64653-d223-46b3-9c35-dd91036c310a\") " pod="openstack/glance-4c14-account-create-update-jprrg" Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.365405 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9a64653-d223-46b3-9c35-dd91036c310a-operator-scripts\") pod \"glance-4c14-account-create-update-jprrg\" (UID: \"a9a64653-d223-46b3-9c35-dd91036c310a\") " pod="openstack/glance-4c14-account-create-update-jprrg" Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.381553 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s98b4\" (UniqueName: \"kubernetes.io/projected/a9a64653-d223-46b3-9c35-dd91036c310a-kube-api-access-s98b4\") pod \"glance-4c14-account-create-update-jprrg\" (UID: \"a9a64653-d223-46b3-9c35-dd91036c310a\") " pod="openstack/glance-4c14-account-create-update-jprrg" Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.393048 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-gzkdg" Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.498943 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4c14-account-create-update-jprrg" Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.831994 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-gzkdg"] Jan 22 07:20:55 crc kubenswrapper[4933]: I0122 07:20:55.923457 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-4c14-account-create-update-jprrg"] Jan 22 07:20:55 crc kubenswrapper[4933]: W0122 07:20:55.943674 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda9a64653_d223_46b3_9c35_dd91036c310a.slice/crio-9e9647497edc5f97190b43029a46de3856d7160af10e31504c4256a2547efcd8 WatchSource:0}: Error finding container 9e9647497edc5f97190b43029a46de3856d7160af10e31504c4256a2547efcd8: Status 404 returned error can't find the container with id 9e9647497edc5f97190b43029a46de3856d7160af10e31504c4256a2547efcd8 Jan 22 07:20:56 crc kubenswrapper[4933]: I0122 07:20:56.070626 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-4c14-account-create-update-jprrg" event={"ID":"a9a64653-d223-46b3-9c35-dd91036c310a","Type":"ContainerStarted","Data":"9e9647497edc5f97190b43029a46de3856d7160af10e31504c4256a2547efcd8"} Jan 22 07:20:56 crc kubenswrapper[4933]: I0122 07:20:56.072203 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-gzkdg" event={"ID":"9fa99816-1176-4475-ba6d-a910e9008aa1","Type":"ContainerStarted","Data":"04e389c31c38bd2ffd009fd698864582de82b945539be83e06ff7b6f5f3b106e"} Jan 22 07:20:56 crc kubenswrapper[4933]: I0122 07:20:56.072228 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-gzkdg" event={"ID":"9fa99816-1176-4475-ba6d-a910e9008aa1","Type":"ContainerStarted","Data":"9a13ddf0318067703a5db4da17f9138efd146a00523adc3afbc61a82b53c2d0a"} Jan 22 07:20:56 crc kubenswrapper[4933]: I0122 07:20:56.094595 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-gzkdg" podStartSLOduration=1.094574271 podStartE2EDuration="1.094574271s" podCreationTimestamp="2026-01-22 07:20:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:20:56.087865228 +0000 UTC m=+5703.924990581" watchObservedRunningTime="2026-01-22 07:20:56.094574271 +0000 UTC m=+5703.931699624" Jan 22 07:20:57 crc kubenswrapper[4933]: I0122 07:20:57.083743 4933 generic.go:334] "Generic (PLEG): container finished" podID="9fa99816-1176-4475-ba6d-a910e9008aa1" containerID="04e389c31c38bd2ffd009fd698864582de82b945539be83e06ff7b6f5f3b106e" exitCode=0 Jan 22 07:20:57 crc kubenswrapper[4933]: I0122 07:20:57.083828 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-gzkdg" event={"ID":"9fa99816-1176-4475-ba6d-a910e9008aa1","Type":"ContainerDied","Data":"04e389c31c38bd2ffd009fd698864582de82b945539be83e06ff7b6f5f3b106e"} Jan 22 07:20:57 crc kubenswrapper[4933]: I0122 07:20:57.085856 4933 generic.go:334] "Generic (PLEG): container finished" podID="a9a64653-d223-46b3-9c35-dd91036c310a" containerID="92fbc4d1d10495a04e56e0b82627140050fc1c17732f59379de7dd97ecfa4be9" exitCode=0 Jan 22 07:20:57 crc kubenswrapper[4933]: I0122 07:20:57.085895 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-4c14-account-create-update-jprrg" event={"ID":"a9a64653-d223-46b3-9c35-dd91036c310a","Type":"ContainerDied","Data":"92fbc4d1d10495a04e56e0b82627140050fc1c17732f59379de7dd97ecfa4be9"} Jan 22 07:20:58 crc kubenswrapper[4933]: I0122 07:20:58.484977 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-gzkdg" Jan 22 07:20:58 crc kubenswrapper[4933]: I0122 07:20:58.492386 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4c14-account-create-update-jprrg" Jan 22 07:20:58 crc kubenswrapper[4933]: I0122 07:20:58.528507 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-np2vz\" (UniqueName: \"kubernetes.io/projected/9fa99816-1176-4475-ba6d-a910e9008aa1-kube-api-access-np2vz\") pod \"9fa99816-1176-4475-ba6d-a910e9008aa1\" (UID: \"9fa99816-1176-4475-ba6d-a910e9008aa1\") " Jan 22 07:20:58 crc kubenswrapper[4933]: I0122 07:20:58.529858 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9fa99816-1176-4475-ba6d-a910e9008aa1-operator-scripts\") pod \"9fa99816-1176-4475-ba6d-a910e9008aa1\" (UID: \"9fa99816-1176-4475-ba6d-a910e9008aa1\") " Jan 22 07:20:58 crc kubenswrapper[4933]: I0122 07:20:58.529898 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9a64653-d223-46b3-9c35-dd91036c310a-operator-scripts\") pod \"a9a64653-d223-46b3-9c35-dd91036c310a\" (UID: \"a9a64653-d223-46b3-9c35-dd91036c310a\") " Jan 22 07:20:58 crc kubenswrapper[4933]: I0122 07:20:58.529948 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s98b4\" (UniqueName: \"kubernetes.io/projected/a9a64653-d223-46b3-9c35-dd91036c310a-kube-api-access-s98b4\") pod \"a9a64653-d223-46b3-9c35-dd91036c310a\" (UID: \"a9a64653-d223-46b3-9c35-dd91036c310a\") " Jan 22 07:20:58 crc kubenswrapper[4933]: I0122 07:20:58.532567 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9a64653-d223-46b3-9c35-dd91036c310a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a9a64653-d223-46b3-9c35-dd91036c310a" (UID: "a9a64653-d223-46b3-9c35-dd91036c310a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:20:58 crc kubenswrapper[4933]: I0122 07:20:58.534533 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9fa99816-1176-4475-ba6d-a910e9008aa1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9fa99816-1176-4475-ba6d-a910e9008aa1" (UID: "9fa99816-1176-4475-ba6d-a910e9008aa1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:20:58 crc kubenswrapper[4933]: I0122 07:20:58.538764 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9a64653-d223-46b3-9c35-dd91036c310a-kube-api-access-s98b4" (OuterVolumeSpecName: "kube-api-access-s98b4") pod "a9a64653-d223-46b3-9c35-dd91036c310a" (UID: "a9a64653-d223-46b3-9c35-dd91036c310a"). InnerVolumeSpecName "kube-api-access-s98b4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:20:58 crc kubenswrapper[4933]: I0122 07:20:58.540145 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9fa99816-1176-4475-ba6d-a910e9008aa1-kube-api-access-np2vz" (OuterVolumeSpecName: "kube-api-access-np2vz") pod "9fa99816-1176-4475-ba6d-a910e9008aa1" (UID: "9fa99816-1176-4475-ba6d-a910e9008aa1"). InnerVolumeSpecName "kube-api-access-np2vz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:20:58 crc kubenswrapper[4933]: I0122 07:20:58.632335 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-np2vz\" (UniqueName: \"kubernetes.io/projected/9fa99816-1176-4475-ba6d-a910e9008aa1-kube-api-access-np2vz\") on node \"crc\" DevicePath \"\"" Jan 22 07:20:58 crc kubenswrapper[4933]: I0122 07:20:58.632397 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9fa99816-1176-4475-ba6d-a910e9008aa1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:20:58 crc kubenswrapper[4933]: I0122 07:20:58.632406 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9a64653-d223-46b3-9c35-dd91036c310a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:20:58 crc kubenswrapper[4933]: I0122 07:20:58.632415 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s98b4\" (UniqueName: \"kubernetes.io/projected/a9a64653-d223-46b3-9c35-dd91036c310a-kube-api-access-s98b4\") on node \"crc\" DevicePath \"\"" Jan 22 07:20:59 crc kubenswrapper[4933]: I0122 07:20:59.103484 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-gzkdg" Jan 22 07:20:59 crc kubenswrapper[4933]: I0122 07:20:59.103484 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-gzkdg" event={"ID":"9fa99816-1176-4475-ba6d-a910e9008aa1","Type":"ContainerDied","Data":"9a13ddf0318067703a5db4da17f9138efd146a00523adc3afbc61a82b53c2d0a"} Jan 22 07:20:59 crc kubenswrapper[4933]: I0122 07:20:59.104121 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a13ddf0318067703a5db4da17f9138efd146a00523adc3afbc61a82b53c2d0a" Jan 22 07:20:59 crc kubenswrapper[4933]: I0122 07:20:59.105628 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-4c14-account-create-update-jprrg" event={"ID":"a9a64653-d223-46b3-9c35-dd91036c310a","Type":"ContainerDied","Data":"9e9647497edc5f97190b43029a46de3856d7160af10e31504c4256a2547efcd8"} Jan 22 07:20:59 crc kubenswrapper[4933]: I0122 07:20:59.105670 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4c14-account-create-update-jprrg" Jan 22 07:20:59 crc kubenswrapper[4933]: I0122 07:20:59.105685 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9e9647497edc5f97190b43029a46de3856d7160af10e31504c4256a2547efcd8" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.250559 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-5n6cg"] Jan 22 07:21:00 crc kubenswrapper[4933]: E0122 07:21:00.253652 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fa99816-1176-4475-ba6d-a910e9008aa1" containerName="mariadb-database-create" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.253686 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fa99816-1176-4475-ba6d-a910e9008aa1" containerName="mariadb-database-create" Jan 22 07:21:00 crc kubenswrapper[4933]: E0122 07:21:00.253716 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9a64653-d223-46b3-9c35-dd91036c310a" containerName="mariadb-account-create-update" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.253725 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9a64653-d223-46b3-9c35-dd91036c310a" containerName="mariadb-account-create-update" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.254019 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9a64653-d223-46b3-9c35-dd91036c310a" containerName="mariadb-account-create-update" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.254034 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="9fa99816-1176-4475-ba6d-a910e9008aa1" containerName="mariadb-database-create" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.254954 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-5n6cg" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.257136 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.257570 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-2pv9h" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.264098 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-5n6cg"] Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.359907 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-db-sync-config-data\") pod \"glance-db-sync-5n6cg\" (UID: \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\") " pod="openstack/glance-db-sync-5n6cg" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.359997 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-config-data\") pod \"glance-db-sync-5n6cg\" (UID: \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\") " pod="openstack/glance-db-sync-5n6cg" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.360049 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-combined-ca-bundle\") pod \"glance-db-sync-5n6cg\" (UID: \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\") " pod="openstack/glance-db-sync-5n6cg" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.360250 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqsl8\" (UniqueName: \"kubernetes.io/projected/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-kube-api-access-hqsl8\") pod \"glance-db-sync-5n6cg\" (UID: \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\") " pod="openstack/glance-db-sync-5n6cg" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.461749 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-config-data\") pod \"glance-db-sync-5n6cg\" (UID: \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\") " pod="openstack/glance-db-sync-5n6cg" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.461829 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-combined-ca-bundle\") pod \"glance-db-sync-5n6cg\" (UID: \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\") " pod="openstack/glance-db-sync-5n6cg" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.461882 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqsl8\" (UniqueName: \"kubernetes.io/projected/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-kube-api-access-hqsl8\") pod \"glance-db-sync-5n6cg\" (UID: \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\") " pod="openstack/glance-db-sync-5n6cg" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.461919 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-db-sync-config-data\") pod \"glance-db-sync-5n6cg\" (UID: \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\") " pod="openstack/glance-db-sync-5n6cg" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.466647 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-config-data\") pod \"glance-db-sync-5n6cg\" (UID: \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\") " pod="openstack/glance-db-sync-5n6cg" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.471599 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-db-sync-config-data\") pod \"glance-db-sync-5n6cg\" (UID: \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\") " pod="openstack/glance-db-sync-5n6cg" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.495647 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqsl8\" (UniqueName: \"kubernetes.io/projected/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-kube-api-access-hqsl8\") pod \"glance-db-sync-5n6cg\" (UID: \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\") " pod="openstack/glance-db-sync-5n6cg" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.497686 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-combined-ca-bundle\") pod \"glance-db-sync-5n6cg\" (UID: \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\") " pod="openstack/glance-db-sync-5n6cg" Jan 22 07:21:00 crc kubenswrapper[4933]: I0122 07:21:00.577559 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-5n6cg" Jan 22 07:21:01 crc kubenswrapper[4933]: I0122 07:21:01.096518 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-5n6cg"] Jan 22 07:21:01 crc kubenswrapper[4933]: I0122 07:21:01.150736 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-5n6cg" event={"ID":"94de467d-6c82-41bc-bc7c-63cbe5a0aea4","Type":"ContainerStarted","Data":"eaa45b428574061f6371c293dbfa43e9d39bf33cb421fb1a0674f61a6533c9aa"} Jan 22 07:21:02 crc kubenswrapper[4933]: I0122 07:21:02.158916 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-5n6cg" event={"ID":"94de467d-6c82-41bc-bc7c-63cbe5a0aea4","Type":"ContainerStarted","Data":"864b928f4ac7e129ad8497ee7751c0b7b4094b4a7fa6e582f926dbf1c0287ba8"} Jan 22 07:21:02 crc kubenswrapper[4933]: I0122 07:21:02.177848 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-5n6cg" podStartSLOduration=2.177833164 podStartE2EDuration="2.177833164s" podCreationTimestamp="2026-01-22 07:21:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:21:02.173712494 +0000 UTC m=+5710.010837837" watchObservedRunningTime="2026-01-22 07:21:02.177833164 +0000 UTC m=+5710.014958517" Jan 22 07:21:04 crc kubenswrapper[4933]: I0122 07:21:04.682260 4933 scope.go:117] "RemoveContainer" containerID="e7ac1bc107ca82175d72b2c56b74ce0436bd44f94fbc67fab854adf4c664fc0c" Jan 22 07:21:05 crc kubenswrapper[4933]: I0122 07:21:05.188824 4933 generic.go:334] "Generic (PLEG): container finished" podID="94de467d-6c82-41bc-bc7c-63cbe5a0aea4" containerID="864b928f4ac7e129ad8497ee7751c0b7b4094b4a7fa6e582f926dbf1c0287ba8" exitCode=0 Jan 22 07:21:05 crc kubenswrapper[4933]: I0122 07:21:05.188914 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-5n6cg" event={"ID":"94de467d-6c82-41bc-bc7c-63cbe5a0aea4","Type":"ContainerDied","Data":"864b928f4ac7e129ad8497ee7751c0b7b4094b4a7fa6e582f926dbf1c0287ba8"} Jan 22 07:21:06 crc kubenswrapper[4933]: I0122 07:21:06.579605 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-5n6cg" Jan 22 07:21:06 crc kubenswrapper[4933]: I0122 07:21:06.775940 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-db-sync-config-data\") pod \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\" (UID: \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\") " Jan 22 07:21:06 crc kubenswrapper[4933]: I0122 07:21:06.776010 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-config-data\") pod \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\" (UID: \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\") " Jan 22 07:21:06 crc kubenswrapper[4933]: I0122 07:21:06.776158 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-combined-ca-bundle\") pod \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\" (UID: \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\") " Jan 22 07:21:06 crc kubenswrapper[4933]: I0122 07:21:06.776313 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hqsl8\" (UniqueName: \"kubernetes.io/projected/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-kube-api-access-hqsl8\") pod \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\" (UID: \"94de467d-6c82-41bc-bc7c-63cbe5a0aea4\") " Jan 22 07:21:06 crc kubenswrapper[4933]: I0122 07:21:06.782555 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "94de467d-6c82-41bc-bc7c-63cbe5a0aea4" (UID: "94de467d-6c82-41bc-bc7c-63cbe5a0aea4"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:21:06 crc kubenswrapper[4933]: I0122 07:21:06.783252 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-kube-api-access-hqsl8" (OuterVolumeSpecName: "kube-api-access-hqsl8") pod "94de467d-6c82-41bc-bc7c-63cbe5a0aea4" (UID: "94de467d-6c82-41bc-bc7c-63cbe5a0aea4"). InnerVolumeSpecName "kube-api-access-hqsl8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:21:06 crc kubenswrapper[4933]: I0122 07:21:06.807975 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "94de467d-6c82-41bc-bc7c-63cbe5a0aea4" (UID: "94de467d-6c82-41bc-bc7c-63cbe5a0aea4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:21:06 crc kubenswrapper[4933]: I0122 07:21:06.833670 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-config-data" (OuterVolumeSpecName: "config-data") pod "94de467d-6c82-41bc-bc7c-63cbe5a0aea4" (UID: "94de467d-6c82-41bc-bc7c-63cbe5a0aea4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:21:06 crc kubenswrapper[4933]: I0122 07:21:06.878045 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hqsl8\" (UniqueName: \"kubernetes.io/projected/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-kube-api-access-hqsl8\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:06 crc kubenswrapper[4933]: I0122 07:21:06.878086 4933 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:06 crc kubenswrapper[4933]: I0122 07:21:06.878097 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:06 crc kubenswrapper[4933]: I0122 07:21:06.878105 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94de467d-6c82-41bc-bc7c-63cbe5a0aea4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.208116 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-5n6cg" event={"ID":"94de467d-6c82-41bc-bc7c-63cbe5a0aea4","Type":"ContainerDied","Data":"eaa45b428574061f6371c293dbfa43e9d39bf33cb421fb1a0674f61a6533c9aa"} Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.208154 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eaa45b428574061f6371c293dbfa43e9d39bf33cb421fb1a0674f61a6533c9aa" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.208180 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-5n6cg" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.559331 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77b79d864c-9jrkg"] Jan 22 07:21:07 crc kubenswrapper[4933]: E0122 07:21:07.559732 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94de467d-6c82-41bc-bc7c-63cbe5a0aea4" containerName="glance-db-sync" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.559747 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="94de467d-6c82-41bc-bc7c-63cbe5a0aea4" containerName="glance-db-sync" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.559996 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="94de467d-6c82-41bc-bc7c-63cbe5a0aea4" containerName="glance-db-sync" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.560901 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.576972 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77b79d864c-9jrkg"] Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.660638 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.662449 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.667595 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.667634 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-2pv9h" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.667785 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.683690 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.695857 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bt8bv\" (UniqueName: \"kubernetes.io/projected/31f9bca1-a923-47bb-948e-10191d7f05f8-kube-api-access-bt8bv\") pod \"dnsmasq-dns-77b79d864c-9jrkg\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.695990 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-ovsdbserver-sb\") pod \"dnsmasq-dns-77b79d864c-9jrkg\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.696021 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-dns-svc\") pod \"dnsmasq-dns-77b79d864c-9jrkg\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.696054 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-ovsdbserver-nb\") pod \"dnsmasq-dns-77b79d864c-9jrkg\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.696119 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-config\") pod \"dnsmasq-dns-77b79d864c-9jrkg\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.799162 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/79b58375-0218-465e-b2d3-7bd94d33a5f9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.799565 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/79b58375-0218-465e-b2d3-7bd94d33a5f9-logs\") pod \"glance-default-external-api-0\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.799606 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b58375-0218-465e-b2d3-7bd94d33a5f9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.799651 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-ovsdbserver-sb\") pod \"dnsmasq-dns-77b79d864c-9jrkg\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.799678 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-dns-svc\") pod \"dnsmasq-dns-77b79d864c-9jrkg\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.799713 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-ovsdbserver-nb\") pod \"dnsmasq-dns-77b79d864c-9jrkg\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.799749 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b58375-0218-465e-b2d3-7bd94d33a5f9-config-data\") pod \"glance-default-external-api-0\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.799802 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-config\") pod \"dnsmasq-dns-77b79d864c-9jrkg\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.799836 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wf7k\" (UniqueName: \"kubernetes.io/projected/79b58375-0218-465e-b2d3-7bd94d33a5f9-kube-api-access-6wf7k\") pod \"glance-default-external-api-0\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.799881 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b58375-0218-465e-b2d3-7bd94d33a5f9-scripts\") pod \"glance-default-external-api-0\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.799938 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bt8bv\" (UniqueName: \"kubernetes.io/projected/31f9bca1-a923-47bb-948e-10191d7f05f8-kube-api-access-bt8bv\") pod \"dnsmasq-dns-77b79d864c-9jrkg\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.801277 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-ovsdbserver-sb\") pod \"dnsmasq-dns-77b79d864c-9jrkg\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.801836 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-dns-svc\") pod \"dnsmasq-dns-77b79d864c-9jrkg\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.802377 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-config\") pod \"dnsmasq-dns-77b79d864c-9jrkg\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.802752 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-ovsdbserver-nb\") pod \"dnsmasq-dns-77b79d864c-9jrkg\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.845910 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bt8bv\" (UniqueName: \"kubernetes.io/projected/31f9bca1-a923-47bb-948e-10191d7f05f8-kube-api-access-bt8bv\") pod \"dnsmasq-dns-77b79d864c-9jrkg\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.869145 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.871255 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.874522 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.889603 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.889975 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.902519 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/79b58375-0218-465e-b2d3-7bd94d33a5f9-logs\") pod \"glance-default-external-api-0\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.902578 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b58375-0218-465e-b2d3-7bd94d33a5f9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.902641 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b58375-0218-465e-b2d3-7bd94d33a5f9-config-data\") pod \"glance-default-external-api-0\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.902692 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wf7k\" (UniqueName: \"kubernetes.io/projected/79b58375-0218-465e-b2d3-7bd94d33a5f9-kube-api-access-6wf7k\") pod \"glance-default-external-api-0\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.902730 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b58375-0218-465e-b2d3-7bd94d33a5f9-scripts\") pod \"glance-default-external-api-0\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.902796 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/79b58375-0218-465e-b2d3-7bd94d33a5f9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.903300 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/79b58375-0218-465e-b2d3-7bd94d33a5f9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.904887 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/79b58375-0218-465e-b2d3-7bd94d33a5f9-logs\") pod \"glance-default-external-api-0\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.910291 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b58375-0218-465e-b2d3-7bd94d33a5f9-config-data\") pod \"glance-default-external-api-0\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.910342 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b58375-0218-465e-b2d3-7bd94d33a5f9-scripts\") pod \"glance-default-external-api-0\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.913881 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b58375-0218-465e-b2d3-7bd94d33a5f9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.934843 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wf7k\" (UniqueName: \"kubernetes.io/projected/79b58375-0218-465e-b2d3-7bd94d33a5f9-kube-api-access-6wf7k\") pod \"glance-default-external-api-0\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:07 crc kubenswrapper[4933]: I0122 07:21:07.995038 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.014735 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f3513e89-39be-45bd-8e6c-ce53c3d85eed-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.014823 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3513e89-39be-45bd-8e6c-ce53c3d85eed-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.014868 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3513e89-39be-45bd-8e6c-ce53c3d85eed-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.014889 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3513e89-39be-45bd-8e6c-ce53c3d85eed-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.014991 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8thvs\" (UniqueName: \"kubernetes.io/projected/f3513e89-39be-45bd-8e6c-ce53c3d85eed-kube-api-access-8thvs\") pod \"glance-default-internal-api-0\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.015178 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3513e89-39be-45bd-8e6c-ce53c3d85eed-logs\") pod \"glance-default-internal-api-0\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.116351 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3513e89-39be-45bd-8e6c-ce53c3d85eed-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.116759 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3513e89-39be-45bd-8e6c-ce53c3d85eed-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.116782 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3513e89-39be-45bd-8e6c-ce53c3d85eed-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.116865 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8thvs\" (UniqueName: \"kubernetes.io/projected/f3513e89-39be-45bd-8e6c-ce53c3d85eed-kube-api-access-8thvs\") pod \"glance-default-internal-api-0\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.116917 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3513e89-39be-45bd-8e6c-ce53c3d85eed-logs\") pod \"glance-default-internal-api-0\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.116964 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f3513e89-39be-45bd-8e6c-ce53c3d85eed-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.117460 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f3513e89-39be-45bd-8e6c-ce53c3d85eed-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.120618 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3513e89-39be-45bd-8e6c-ce53c3d85eed-logs\") pod \"glance-default-internal-api-0\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.125542 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3513e89-39be-45bd-8e6c-ce53c3d85eed-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.126281 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3513e89-39be-45bd-8e6c-ce53c3d85eed-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.137129 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3513e89-39be-45bd-8e6c-ce53c3d85eed-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.141490 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8thvs\" (UniqueName: \"kubernetes.io/projected/f3513e89-39be-45bd-8e6c-ce53c3d85eed-kube-api-access-8thvs\") pod \"glance-default-internal-api-0\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.205553 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.416837 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77b79d864c-9jrkg"] Jan 22 07:21:08 crc kubenswrapper[4933]: W0122 07:21:08.426607 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod31f9bca1_a923_47bb_948e_10191d7f05f8.slice/crio-5194c9ee47ee31d84630089e4ff5275a2d9557c69f955c84f8352e0e65233d24 WatchSource:0}: Error finding container 5194c9ee47ee31d84630089e4ff5275a2d9557c69f955c84f8352e0e65233d24: Status 404 returned error can't find the container with id 5194c9ee47ee31d84630089e4ff5275a2d9557c69f955c84f8352e0e65233d24 Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.730327 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.745282 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 07:21:08 crc kubenswrapper[4933]: I0122 07:21:08.831732 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 07:21:08 crc kubenswrapper[4933]: W0122 07:21:08.851315 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3513e89_39be_45bd_8e6c_ce53c3d85eed.slice/crio-1faf700cd9887e7714693ae3fa93f5adfa9f0831d6f8217a76909ace7f89d185 WatchSource:0}: Error finding container 1faf700cd9887e7714693ae3fa93f5adfa9f0831d6f8217a76909ace7f89d185: Status 404 returned error can't find the container with id 1faf700cd9887e7714693ae3fa93f5adfa9f0831d6f8217a76909ace7f89d185 Jan 22 07:21:09 crc kubenswrapper[4933]: I0122 07:21:09.238285 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f3513e89-39be-45bd-8e6c-ce53c3d85eed","Type":"ContainerStarted","Data":"1faf700cd9887e7714693ae3fa93f5adfa9f0831d6f8217a76909ace7f89d185"} Jan 22 07:21:09 crc kubenswrapper[4933]: I0122 07:21:09.241939 4933 generic.go:334] "Generic (PLEG): container finished" podID="31f9bca1-a923-47bb-948e-10191d7f05f8" containerID="69deb8ecabb767fe1a56c94fcf88ccd53fd8ebbf1c701dac93c6ea7dae10f8aa" exitCode=0 Jan 22 07:21:09 crc kubenswrapper[4933]: I0122 07:21:09.241996 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" event={"ID":"31f9bca1-a923-47bb-948e-10191d7f05f8","Type":"ContainerDied","Data":"69deb8ecabb767fe1a56c94fcf88ccd53fd8ebbf1c701dac93c6ea7dae10f8aa"} Jan 22 07:21:09 crc kubenswrapper[4933]: I0122 07:21:09.242018 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" event={"ID":"31f9bca1-a923-47bb-948e-10191d7f05f8","Type":"ContainerStarted","Data":"5194c9ee47ee31d84630089e4ff5275a2d9557c69f955c84f8352e0e65233d24"} Jan 22 07:21:09 crc kubenswrapper[4933]: I0122 07:21:09.245348 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"79b58375-0218-465e-b2d3-7bd94d33a5f9","Type":"ContainerStarted","Data":"e8b3411fd7816dfcc0b36e9d5d1954633d57868f8a17927ae18a7ed6efbdec52"} Jan 22 07:21:10 crc kubenswrapper[4933]: I0122 07:21:10.181104 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 07:21:10 crc kubenswrapper[4933]: I0122 07:21:10.254351 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f3513e89-39be-45bd-8e6c-ce53c3d85eed","Type":"ContainerStarted","Data":"04e8df97da4496b2574aac7338bf3e3d16da7d37f27cf97f3cb6dc214a4016c9"} Jan 22 07:21:10 crc kubenswrapper[4933]: I0122 07:21:10.254406 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f3513e89-39be-45bd-8e6c-ce53c3d85eed","Type":"ContainerStarted","Data":"fe1dea5a00ed2726c5c8a2628ee212aeea971b780e6ae72ae4c33eb0046db98f"} Jan 22 07:21:10 crc kubenswrapper[4933]: I0122 07:21:10.256390 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" event={"ID":"31f9bca1-a923-47bb-948e-10191d7f05f8","Type":"ContainerStarted","Data":"ec9e06d8f8bfa760f366368f5598245487201f3a1f497c757fd46b32110f06ae"} Jan 22 07:21:10 crc kubenswrapper[4933]: I0122 07:21:10.256549 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:10 crc kubenswrapper[4933]: I0122 07:21:10.258170 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"79b58375-0218-465e-b2d3-7bd94d33a5f9","Type":"ContainerStarted","Data":"c4e9d4f9be3ca2bb479d7821c07fc200df3234e98f98915b73e8e25bab17c24c"} Jan 22 07:21:10 crc kubenswrapper[4933]: I0122 07:21:10.258227 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"79b58375-0218-465e-b2d3-7bd94d33a5f9","Type":"ContainerStarted","Data":"a17095c9881d597fcd69702ccd90ef59bb650775880b9f2a531c77253c81838e"} Jan 22 07:21:10 crc kubenswrapper[4933]: I0122 07:21:10.258286 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="79b58375-0218-465e-b2d3-7bd94d33a5f9" containerName="glance-httpd" containerID="cri-o://c4e9d4f9be3ca2bb479d7821c07fc200df3234e98f98915b73e8e25bab17c24c" gracePeriod=30 Jan 22 07:21:10 crc kubenswrapper[4933]: I0122 07:21:10.258269 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="79b58375-0218-465e-b2d3-7bd94d33a5f9" containerName="glance-log" containerID="cri-o://a17095c9881d597fcd69702ccd90ef59bb650775880b9f2a531c77253c81838e" gracePeriod=30 Jan 22 07:21:10 crc kubenswrapper[4933]: I0122 07:21:10.284983 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.284960356 podStartE2EDuration="3.284960356s" podCreationTimestamp="2026-01-22 07:21:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:21:10.273431436 +0000 UTC m=+5718.110556789" watchObservedRunningTime="2026-01-22 07:21:10.284960356 +0000 UTC m=+5718.122085709" Jan 22 07:21:10 crc kubenswrapper[4933]: I0122 07:21:10.296730 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" podStartSLOduration=3.296714202 podStartE2EDuration="3.296714202s" podCreationTimestamp="2026-01-22 07:21:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:21:10.292383427 +0000 UTC m=+5718.129508790" watchObservedRunningTime="2026-01-22 07:21:10.296714202 +0000 UTC m=+5718.133839555" Jan 22 07:21:10 crc kubenswrapper[4933]: I0122 07:21:10.324586 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.324561629 podStartE2EDuration="3.324561629s" podCreationTimestamp="2026-01-22 07:21:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:21:10.317504507 +0000 UTC m=+5718.154629870" watchObservedRunningTime="2026-01-22 07:21:10.324561629 +0000 UTC m=+5718.161686982" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.013324 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.180384 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/79b58375-0218-465e-b2d3-7bd94d33a5f9-httpd-run\") pod \"79b58375-0218-465e-b2d3-7bd94d33a5f9\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.180481 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b58375-0218-465e-b2d3-7bd94d33a5f9-scripts\") pod \"79b58375-0218-465e-b2d3-7bd94d33a5f9\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.180552 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b58375-0218-465e-b2d3-7bd94d33a5f9-config-data\") pod \"79b58375-0218-465e-b2d3-7bd94d33a5f9\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.180596 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6wf7k\" (UniqueName: \"kubernetes.io/projected/79b58375-0218-465e-b2d3-7bd94d33a5f9-kube-api-access-6wf7k\") pod \"79b58375-0218-465e-b2d3-7bd94d33a5f9\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.180703 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/79b58375-0218-465e-b2d3-7bd94d33a5f9-logs\") pod \"79b58375-0218-465e-b2d3-7bd94d33a5f9\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.180742 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b58375-0218-465e-b2d3-7bd94d33a5f9-combined-ca-bundle\") pod \"79b58375-0218-465e-b2d3-7bd94d33a5f9\" (UID: \"79b58375-0218-465e-b2d3-7bd94d33a5f9\") " Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.180920 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79b58375-0218-465e-b2d3-7bd94d33a5f9-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "79b58375-0218-465e-b2d3-7bd94d33a5f9" (UID: "79b58375-0218-465e-b2d3-7bd94d33a5f9"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.181114 4933 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/79b58375-0218-465e-b2d3-7bd94d33a5f9-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.181260 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79b58375-0218-465e-b2d3-7bd94d33a5f9-logs" (OuterVolumeSpecName: "logs") pod "79b58375-0218-465e-b2d3-7bd94d33a5f9" (UID: "79b58375-0218-465e-b2d3-7bd94d33a5f9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.188864 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79b58375-0218-465e-b2d3-7bd94d33a5f9-scripts" (OuterVolumeSpecName: "scripts") pod "79b58375-0218-465e-b2d3-7bd94d33a5f9" (UID: "79b58375-0218-465e-b2d3-7bd94d33a5f9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.189812 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79b58375-0218-465e-b2d3-7bd94d33a5f9-kube-api-access-6wf7k" (OuterVolumeSpecName: "kube-api-access-6wf7k") pod "79b58375-0218-465e-b2d3-7bd94d33a5f9" (UID: "79b58375-0218-465e-b2d3-7bd94d33a5f9"). InnerVolumeSpecName "kube-api-access-6wf7k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.214173 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79b58375-0218-465e-b2d3-7bd94d33a5f9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "79b58375-0218-465e-b2d3-7bd94d33a5f9" (UID: "79b58375-0218-465e-b2d3-7bd94d33a5f9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.236402 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79b58375-0218-465e-b2d3-7bd94d33a5f9-config-data" (OuterVolumeSpecName: "config-data") pod "79b58375-0218-465e-b2d3-7bd94d33a5f9" (UID: "79b58375-0218-465e-b2d3-7bd94d33a5f9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.267430 4933 generic.go:334] "Generic (PLEG): container finished" podID="79b58375-0218-465e-b2d3-7bd94d33a5f9" containerID="c4e9d4f9be3ca2bb479d7821c07fc200df3234e98f98915b73e8e25bab17c24c" exitCode=143 Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.267471 4933 generic.go:334] "Generic (PLEG): container finished" podID="79b58375-0218-465e-b2d3-7bd94d33a5f9" containerID="a17095c9881d597fcd69702ccd90ef59bb650775880b9f2a531c77253c81838e" exitCode=143 Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.268609 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.271248 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"79b58375-0218-465e-b2d3-7bd94d33a5f9","Type":"ContainerDied","Data":"c4e9d4f9be3ca2bb479d7821c07fc200df3234e98f98915b73e8e25bab17c24c"} Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.271314 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"79b58375-0218-465e-b2d3-7bd94d33a5f9","Type":"ContainerDied","Data":"a17095c9881d597fcd69702ccd90ef59bb650775880b9f2a531c77253c81838e"} Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.271330 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"79b58375-0218-465e-b2d3-7bd94d33a5f9","Type":"ContainerDied","Data":"e8b3411fd7816dfcc0b36e9d5d1954633d57868f8a17927ae18a7ed6efbdec52"} Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.271351 4933 scope.go:117] "RemoveContainer" containerID="c4e9d4f9be3ca2bb479d7821c07fc200df3234e98f98915b73e8e25bab17c24c" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.271691 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f3513e89-39be-45bd-8e6c-ce53c3d85eed" containerName="glance-log" containerID="cri-o://fe1dea5a00ed2726c5c8a2628ee212aeea971b780e6ae72ae4c33eb0046db98f" gracePeriod=30 Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.272016 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f3513e89-39be-45bd-8e6c-ce53c3d85eed" containerName="glance-httpd" containerID="cri-o://04e8df97da4496b2574aac7338bf3e3d16da7d37f27cf97f3cb6dc214a4016c9" gracePeriod=30 Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.282733 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b58375-0218-465e-b2d3-7bd94d33a5f9-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.282763 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b58375-0218-465e-b2d3-7bd94d33a5f9-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.282775 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6wf7k\" (UniqueName: \"kubernetes.io/projected/79b58375-0218-465e-b2d3-7bd94d33a5f9-kube-api-access-6wf7k\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.282786 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/79b58375-0218-465e-b2d3-7bd94d33a5f9-logs\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.282796 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b58375-0218-465e-b2d3-7bd94d33a5f9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.321710 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.325269 4933 scope.go:117] "RemoveContainer" containerID="a17095c9881d597fcd69702ccd90ef59bb650775880b9f2a531c77253c81838e" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.331228 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.355976 4933 scope.go:117] "RemoveContainer" containerID="c4e9d4f9be3ca2bb479d7821c07fc200df3234e98f98915b73e8e25bab17c24c" Jan 22 07:21:11 crc kubenswrapper[4933]: E0122 07:21:11.356725 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4e9d4f9be3ca2bb479d7821c07fc200df3234e98f98915b73e8e25bab17c24c\": container with ID starting with c4e9d4f9be3ca2bb479d7821c07fc200df3234e98f98915b73e8e25bab17c24c not found: ID does not exist" containerID="c4e9d4f9be3ca2bb479d7821c07fc200df3234e98f98915b73e8e25bab17c24c" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.356756 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4e9d4f9be3ca2bb479d7821c07fc200df3234e98f98915b73e8e25bab17c24c"} err="failed to get container status \"c4e9d4f9be3ca2bb479d7821c07fc200df3234e98f98915b73e8e25bab17c24c\": rpc error: code = NotFound desc = could not find container \"c4e9d4f9be3ca2bb479d7821c07fc200df3234e98f98915b73e8e25bab17c24c\": container with ID starting with c4e9d4f9be3ca2bb479d7821c07fc200df3234e98f98915b73e8e25bab17c24c not found: ID does not exist" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.356777 4933 scope.go:117] "RemoveContainer" containerID="a17095c9881d597fcd69702ccd90ef59bb650775880b9f2a531c77253c81838e" Jan 22 07:21:11 crc kubenswrapper[4933]: E0122 07:21:11.359469 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a17095c9881d597fcd69702ccd90ef59bb650775880b9f2a531c77253c81838e\": container with ID starting with a17095c9881d597fcd69702ccd90ef59bb650775880b9f2a531c77253c81838e not found: ID does not exist" containerID="a17095c9881d597fcd69702ccd90ef59bb650775880b9f2a531c77253c81838e" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.359494 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a17095c9881d597fcd69702ccd90ef59bb650775880b9f2a531c77253c81838e"} err="failed to get container status \"a17095c9881d597fcd69702ccd90ef59bb650775880b9f2a531c77253c81838e\": rpc error: code = NotFound desc = could not find container \"a17095c9881d597fcd69702ccd90ef59bb650775880b9f2a531c77253c81838e\": container with ID starting with a17095c9881d597fcd69702ccd90ef59bb650775880b9f2a531c77253c81838e not found: ID does not exist" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.359510 4933 scope.go:117] "RemoveContainer" containerID="c4e9d4f9be3ca2bb479d7821c07fc200df3234e98f98915b73e8e25bab17c24c" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.365382 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 07:21:11 crc kubenswrapper[4933]: E0122 07:21:11.365743 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79b58375-0218-465e-b2d3-7bd94d33a5f9" containerName="glance-log" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.365760 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="79b58375-0218-465e-b2d3-7bd94d33a5f9" containerName="glance-log" Jan 22 07:21:11 crc kubenswrapper[4933]: E0122 07:21:11.365786 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79b58375-0218-465e-b2d3-7bd94d33a5f9" containerName="glance-httpd" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.365793 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="79b58375-0218-465e-b2d3-7bd94d33a5f9" containerName="glance-httpd" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.365945 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="79b58375-0218-465e-b2d3-7bd94d33a5f9" containerName="glance-httpd" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.365965 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="79b58375-0218-465e-b2d3-7bd94d33a5f9" containerName="glance-log" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.367011 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.367153 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4e9d4f9be3ca2bb479d7821c07fc200df3234e98f98915b73e8e25bab17c24c"} err="failed to get container status \"c4e9d4f9be3ca2bb479d7821c07fc200df3234e98f98915b73e8e25bab17c24c\": rpc error: code = NotFound desc = could not find container \"c4e9d4f9be3ca2bb479d7821c07fc200df3234e98f98915b73e8e25bab17c24c\": container with ID starting with c4e9d4f9be3ca2bb479d7821c07fc200df3234e98f98915b73e8e25bab17c24c not found: ID does not exist" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.367194 4933 scope.go:117] "RemoveContainer" containerID="a17095c9881d597fcd69702ccd90ef59bb650775880b9f2a531c77253c81838e" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.371287 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.376497 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.376815 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a17095c9881d597fcd69702ccd90ef59bb650775880b9f2a531c77253c81838e"} err="failed to get container status \"a17095c9881d597fcd69702ccd90ef59bb650775880b9f2a531c77253c81838e\": rpc error: code = NotFound desc = could not find container \"a17095c9881d597fcd69702ccd90ef59bb650775880b9f2a531c77253c81838e\": container with ID starting with a17095c9881d597fcd69702ccd90ef59bb650775880b9f2a531c77253c81838e not found: ID does not exist" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.384242 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.384308 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-config-data\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.384336 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4791b14-4baa-4617-8ed9-be7a99cefa10-logs\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.384369 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.384458 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6tl5\" (UniqueName: \"kubernetes.io/projected/d4791b14-4baa-4617-8ed9-be7a99cefa10-kube-api-access-w6tl5\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.384506 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-scripts\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.384523 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d4791b14-4baa-4617-8ed9-be7a99cefa10-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.392278 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.485870 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6tl5\" (UniqueName: \"kubernetes.io/projected/d4791b14-4baa-4617-8ed9-be7a99cefa10-kube-api-access-w6tl5\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.485980 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-scripts\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.486184 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d4791b14-4baa-4617-8ed9-be7a99cefa10-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.486237 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.486324 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-config-data\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.486382 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4791b14-4baa-4617-8ed9-be7a99cefa10-logs\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.486464 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.486636 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d4791b14-4baa-4617-8ed9-be7a99cefa10-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.486897 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4791b14-4baa-4617-8ed9-be7a99cefa10-logs\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.489693 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-scripts\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.489750 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.490576 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.491632 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-config-data\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.510892 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6tl5\" (UniqueName: \"kubernetes.io/projected/d4791b14-4baa-4617-8ed9-be7a99cefa10-kube-api-access-w6tl5\") pod \"glance-default-external-api-0\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " pod="openstack/glance-default-external-api-0" Jan 22 07:21:11 crc kubenswrapper[4933]: I0122 07:21:11.741276 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 07:21:12 crc kubenswrapper[4933]: I0122 07:21:12.277615 4933 generic.go:334] "Generic (PLEG): container finished" podID="f3513e89-39be-45bd-8e6c-ce53c3d85eed" containerID="04e8df97da4496b2574aac7338bf3e3d16da7d37f27cf97f3cb6dc214a4016c9" exitCode=0 Jan 22 07:21:12 crc kubenswrapper[4933]: I0122 07:21:12.277900 4933 generic.go:334] "Generic (PLEG): container finished" podID="f3513e89-39be-45bd-8e6c-ce53c3d85eed" containerID="fe1dea5a00ed2726c5c8a2628ee212aeea971b780e6ae72ae4c33eb0046db98f" exitCode=143 Jan 22 07:21:12 crc kubenswrapper[4933]: I0122 07:21:12.277689 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f3513e89-39be-45bd-8e6c-ce53c3d85eed","Type":"ContainerDied","Data":"04e8df97da4496b2574aac7338bf3e3d16da7d37f27cf97f3cb6dc214a4016c9"} Jan 22 07:21:12 crc kubenswrapper[4933]: I0122 07:21:12.277968 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f3513e89-39be-45bd-8e6c-ce53c3d85eed","Type":"ContainerDied","Data":"fe1dea5a00ed2726c5c8a2628ee212aeea971b780e6ae72ae4c33eb0046db98f"} Jan 22 07:21:12 crc kubenswrapper[4933]: I0122 07:21:12.357570 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 07:21:12 crc kubenswrapper[4933]: W0122 07:21:12.365930 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd4791b14_4baa_4617_8ed9_be7a99cefa10.slice/crio-811a8828696100867ca7897585fb5b058e70a0ca53671a6abfb71d039a0fa5a1 WatchSource:0}: Error finding container 811a8828696100867ca7897585fb5b058e70a0ca53671a6abfb71d039a0fa5a1: Status 404 returned error can't find the container with id 811a8828696100867ca7897585fb5b058e70a0ca53671a6abfb71d039a0fa5a1 Jan 22 07:21:12 crc kubenswrapper[4933]: I0122 07:21:12.510778 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79b58375-0218-465e-b2d3-7bd94d33a5f9" path="/var/lib/kubelet/pods/79b58375-0218-465e-b2d3-7bd94d33a5f9/volumes" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.171986 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.226536 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3513e89-39be-45bd-8e6c-ce53c3d85eed-scripts\") pod \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.226901 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8thvs\" (UniqueName: \"kubernetes.io/projected/f3513e89-39be-45bd-8e6c-ce53c3d85eed-kube-api-access-8thvs\") pod \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.226957 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3513e89-39be-45bd-8e6c-ce53c3d85eed-logs\") pod \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.227037 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3513e89-39be-45bd-8e6c-ce53c3d85eed-config-data\") pod \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.227109 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f3513e89-39be-45bd-8e6c-ce53c3d85eed-httpd-run\") pod \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.227204 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3513e89-39be-45bd-8e6c-ce53c3d85eed-combined-ca-bundle\") pod \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\" (UID: \"f3513e89-39be-45bd-8e6c-ce53c3d85eed\") " Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.227819 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3513e89-39be-45bd-8e6c-ce53c3d85eed-logs" (OuterVolumeSpecName: "logs") pod "f3513e89-39be-45bd-8e6c-ce53c3d85eed" (UID: "f3513e89-39be-45bd-8e6c-ce53c3d85eed"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.228458 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3513e89-39be-45bd-8e6c-ce53c3d85eed-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f3513e89-39be-45bd-8e6c-ce53c3d85eed" (UID: "f3513e89-39be-45bd-8e6c-ce53c3d85eed"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.233446 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3513e89-39be-45bd-8e6c-ce53c3d85eed-kube-api-access-8thvs" (OuterVolumeSpecName: "kube-api-access-8thvs") pod "f3513e89-39be-45bd-8e6c-ce53c3d85eed" (UID: "f3513e89-39be-45bd-8e6c-ce53c3d85eed"). InnerVolumeSpecName "kube-api-access-8thvs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.235144 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3513e89-39be-45bd-8e6c-ce53c3d85eed-scripts" (OuterVolumeSpecName: "scripts") pod "f3513e89-39be-45bd-8e6c-ce53c3d85eed" (UID: "f3513e89-39be-45bd-8e6c-ce53c3d85eed"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.265799 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3513e89-39be-45bd-8e6c-ce53c3d85eed-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f3513e89-39be-45bd-8e6c-ce53c3d85eed" (UID: "f3513e89-39be-45bd-8e6c-ce53c3d85eed"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.277622 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3513e89-39be-45bd-8e6c-ce53c3d85eed-config-data" (OuterVolumeSpecName: "config-data") pod "f3513e89-39be-45bd-8e6c-ce53c3d85eed" (UID: "f3513e89-39be-45bd-8e6c-ce53c3d85eed"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.291951 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f3513e89-39be-45bd-8e6c-ce53c3d85eed","Type":"ContainerDied","Data":"1faf700cd9887e7714693ae3fa93f5adfa9f0831d6f8217a76909ace7f89d185"} Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.292014 4933 scope.go:117] "RemoveContainer" containerID="04e8df97da4496b2574aac7338bf3e3d16da7d37f27cf97f3cb6dc214a4016c9" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.292136 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.306481 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d4791b14-4baa-4617-8ed9-be7a99cefa10","Type":"ContainerStarted","Data":"c9596897b78fcef7e14f3af766be7bf3274b03cc4d6e81701c5db05b4038497f"} Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.306522 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d4791b14-4baa-4617-8ed9-be7a99cefa10","Type":"ContainerStarted","Data":"811a8828696100867ca7897585fb5b058e70a0ca53671a6abfb71d039a0fa5a1"} Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.328466 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.329238 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3513e89-39be-45bd-8e6c-ce53c3d85eed-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.329278 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8thvs\" (UniqueName: \"kubernetes.io/projected/f3513e89-39be-45bd-8e6c-ce53c3d85eed-kube-api-access-8thvs\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.329293 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3513e89-39be-45bd-8e6c-ce53c3d85eed-logs\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.329306 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3513e89-39be-45bd-8e6c-ce53c3d85eed-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.329318 4933 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f3513e89-39be-45bd-8e6c-ce53c3d85eed-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.329328 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3513e89-39be-45bd-8e6c-ce53c3d85eed-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.346681 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.355966 4933 scope.go:117] "RemoveContainer" containerID="fe1dea5a00ed2726c5c8a2628ee212aeea971b780e6ae72ae4c33eb0046db98f" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.359750 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 07:21:13 crc kubenswrapper[4933]: E0122 07:21:13.360180 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3513e89-39be-45bd-8e6c-ce53c3d85eed" containerName="glance-log" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.360201 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3513e89-39be-45bd-8e6c-ce53c3d85eed" containerName="glance-log" Jan 22 07:21:13 crc kubenswrapper[4933]: E0122 07:21:13.360238 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3513e89-39be-45bd-8e6c-ce53c3d85eed" containerName="glance-httpd" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.360246 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3513e89-39be-45bd-8e6c-ce53c3d85eed" containerName="glance-httpd" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.360476 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3513e89-39be-45bd-8e6c-ce53c3d85eed" containerName="glance-httpd" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.360499 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3513e89-39be-45bd-8e6c-ce53c3d85eed" containerName="glance-log" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.361589 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.364767 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.364889 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.371651 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.431551 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ft9n6\" (UniqueName: \"kubernetes.io/projected/092e7306-126b-461f-926e-40eb750fe16c-kube-api-access-ft9n6\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.431644 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.431671 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/092e7306-126b-461f-926e-40eb750fe16c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.431703 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.431729 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.431803 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.432980 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/092e7306-126b-461f-926e-40eb750fe16c-logs\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.534026 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.534303 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/092e7306-126b-461f-926e-40eb750fe16c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.534517 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.534631 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.534843 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.534986 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/092e7306-126b-461f-926e-40eb750fe16c-logs\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.535290 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ft9n6\" (UniqueName: \"kubernetes.io/projected/092e7306-126b-461f-926e-40eb750fe16c-kube-api-access-ft9n6\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.537528 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/092e7306-126b-461f-926e-40eb750fe16c-logs\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.538053 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/092e7306-126b-461f-926e-40eb750fe16c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.538661 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.541679 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.541795 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.542377 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.555495 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ft9n6\" (UniqueName: \"kubernetes.io/projected/092e7306-126b-461f-926e-40eb750fe16c-kube-api-access-ft9n6\") pod \"glance-default-internal-api-0\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:21:13 crc kubenswrapper[4933]: I0122 07:21:13.692707 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 07:21:14 crc kubenswrapper[4933]: I0122 07:21:14.231013 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 07:21:14 crc kubenswrapper[4933]: I0122 07:21:14.320920 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d4791b14-4baa-4617-8ed9-be7a99cefa10","Type":"ContainerStarted","Data":"7734da35a424a967b843c3fd0e2e954fb00ef47bc9ccae1413f16767fba617d8"} Jan 22 07:21:14 crc kubenswrapper[4933]: I0122 07:21:14.322680 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"092e7306-126b-461f-926e-40eb750fe16c","Type":"ContainerStarted","Data":"4c9097d9029abafe88ee5edd24b87bf642c77c9d425e8557bfa28c539728f750"} Jan 22 07:21:14 crc kubenswrapper[4933]: I0122 07:21:14.347392 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.347368322 podStartE2EDuration="3.347368322s" podCreationTimestamp="2026-01-22 07:21:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:21:14.340591758 +0000 UTC m=+5722.177717111" watchObservedRunningTime="2026-01-22 07:21:14.347368322 +0000 UTC m=+5722.184493675" Jan 22 07:21:14 crc kubenswrapper[4933]: I0122 07:21:14.505394 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3513e89-39be-45bd-8e6c-ce53c3d85eed" path="/var/lib/kubelet/pods/f3513e89-39be-45bd-8e6c-ce53c3d85eed/volumes" Jan 22 07:21:15 crc kubenswrapper[4933]: I0122 07:21:15.335212 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"092e7306-126b-461f-926e-40eb750fe16c","Type":"ContainerStarted","Data":"286a9e012307ad919613e2363acff2d6b5283f8ea4ba44cfa8a37bebd45ea682"} Jan 22 07:21:16 crc kubenswrapper[4933]: I0122 07:21:16.347884 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"092e7306-126b-461f-926e-40eb750fe16c","Type":"ContainerStarted","Data":"bbf8107eab3719f42cb265adf56d054bc8486cb67784a51193d0a5839262e8d6"} Jan 22 07:21:16 crc kubenswrapper[4933]: I0122 07:21:16.381663 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.381640855 podStartE2EDuration="3.381640855s" podCreationTimestamp="2026-01-22 07:21:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:21:16.370228088 +0000 UTC m=+5724.207353481" watchObservedRunningTime="2026-01-22 07:21:16.381640855 +0000 UTC m=+5724.218766208" Jan 22 07:21:17 crc kubenswrapper[4933]: I0122 07:21:17.892252 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:17 crc kubenswrapper[4933]: I0122 07:21:17.966616 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-69c775f4cc-wqbrt"] Jan 22 07:21:17 crc kubenswrapper[4933]: I0122 07:21:17.966967 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" podUID="f2e0cadd-0b02-471b-bf57-0ae43b550014" containerName="dnsmasq-dns" containerID="cri-o://5c4367cccf6c2afd88eba9adaad30a63283e8640287805e2c1333fac918bbd7b" gracePeriod=10 Jan 22 07:21:18 crc kubenswrapper[4933]: I0122 07:21:18.367611 4933 generic.go:334] "Generic (PLEG): container finished" podID="f2e0cadd-0b02-471b-bf57-0ae43b550014" containerID="5c4367cccf6c2afd88eba9adaad30a63283e8640287805e2c1333fac918bbd7b" exitCode=0 Jan 22 07:21:18 crc kubenswrapper[4933]: I0122 07:21:18.367659 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" event={"ID":"f2e0cadd-0b02-471b-bf57-0ae43b550014","Type":"ContainerDied","Data":"5c4367cccf6c2afd88eba9adaad30a63283e8640287805e2c1333fac918bbd7b"} Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.018938 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.129187 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-ovsdbserver-sb\") pod \"f2e0cadd-0b02-471b-bf57-0ae43b550014\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.129320 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9gdzt\" (UniqueName: \"kubernetes.io/projected/f2e0cadd-0b02-471b-bf57-0ae43b550014-kube-api-access-9gdzt\") pod \"f2e0cadd-0b02-471b-bf57-0ae43b550014\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.129355 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-ovsdbserver-nb\") pod \"f2e0cadd-0b02-471b-bf57-0ae43b550014\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.129411 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-dns-svc\") pod \"f2e0cadd-0b02-471b-bf57-0ae43b550014\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.129498 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-config\") pod \"f2e0cadd-0b02-471b-bf57-0ae43b550014\" (UID: \"f2e0cadd-0b02-471b-bf57-0ae43b550014\") " Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.135803 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2e0cadd-0b02-471b-bf57-0ae43b550014-kube-api-access-9gdzt" (OuterVolumeSpecName: "kube-api-access-9gdzt") pod "f2e0cadd-0b02-471b-bf57-0ae43b550014" (UID: "f2e0cadd-0b02-471b-bf57-0ae43b550014"). InnerVolumeSpecName "kube-api-access-9gdzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.175430 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f2e0cadd-0b02-471b-bf57-0ae43b550014" (UID: "f2e0cadd-0b02-471b-bf57-0ae43b550014"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.185913 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-config" (OuterVolumeSpecName: "config") pod "f2e0cadd-0b02-471b-bf57-0ae43b550014" (UID: "f2e0cadd-0b02-471b-bf57-0ae43b550014"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.187589 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f2e0cadd-0b02-471b-bf57-0ae43b550014" (UID: "f2e0cadd-0b02-471b-bf57-0ae43b550014"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.188756 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f2e0cadd-0b02-471b-bf57-0ae43b550014" (UID: "f2e0cadd-0b02-471b-bf57-0ae43b550014"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.231795 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.232109 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.232228 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9gdzt\" (UniqueName: \"kubernetes.io/projected/f2e0cadd-0b02-471b-bf57-0ae43b550014-kube-api-access-9gdzt\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.232369 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.232476 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2e0cadd-0b02-471b-bf57-0ae43b550014-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.380200 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" event={"ID":"f2e0cadd-0b02-471b-bf57-0ae43b550014","Type":"ContainerDied","Data":"886e1f19b6945cc076cdeeb1744382b10f6d1c0b636a8bb38723a167b290fa40"} Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.380265 4933 scope.go:117] "RemoveContainer" containerID="5c4367cccf6c2afd88eba9adaad30a63283e8640287805e2c1333fac918bbd7b" Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.380294 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69c775f4cc-wqbrt" Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.413286 4933 scope.go:117] "RemoveContainer" containerID="edcdf10ab69b051dfe880a8adbe12c8c2234f419402a6352ae33ebec38b130f3" Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.421395 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-69c775f4cc-wqbrt"] Jan 22 07:21:19 crc kubenswrapper[4933]: I0122 07:21:19.432422 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-69c775f4cc-wqbrt"] Jan 22 07:21:20 crc kubenswrapper[4933]: I0122 07:21:20.502729 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2e0cadd-0b02-471b-bf57-0ae43b550014" path="/var/lib/kubelet/pods/f2e0cadd-0b02-471b-bf57-0ae43b550014/volumes" Jan 22 07:21:21 crc kubenswrapper[4933]: I0122 07:21:21.742413 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 22 07:21:21 crc kubenswrapper[4933]: I0122 07:21:21.742775 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 22 07:21:21 crc kubenswrapper[4933]: I0122 07:21:21.775538 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 22 07:21:21 crc kubenswrapper[4933]: I0122 07:21:21.788228 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 22 07:21:22 crc kubenswrapper[4933]: I0122 07:21:22.410001 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 07:21:22 crc kubenswrapper[4933]: I0122 07:21:22.410054 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 07:21:23 crc kubenswrapper[4933]: I0122 07:21:23.693137 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 22 07:21:23 crc kubenswrapper[4933]: I0122 07:21:23.694212 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 22 07:21:23 crc kubenswrapper[4933]: I0122 07:21:23.723264 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 22 07:21:23 crc kubenswrapper[4933]: I0122 07:21:23.739281 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 22 07:21:24 crc kubenswrapper[4933]: I0122 07:21:24.343421 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 22 07:21:24 crc kubenswrapper[4933]: I0122 07:21:24.367365 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 22 07:21:24 crc kubenswrapper[4933]: I0122 07:21:24.424950 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 07:21:24 crc kubenswrapper[4933]: I0122 07:21:24.424990 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 07:21:26 crc kubenswrapper[4933]: I0122 07:21:26.272211 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 22 07:21:26 crc kubenswrapper[4933]: I0122 07:21:26.438968 4933 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 07:21:26 crc kubenswrapper[4933]: I0122 07:21:26.473483 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.110759 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vvkc9"] Jan 22 07:21:32 crc kubenswrapper[4933]: E0122 07:21:32.111786 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2e0cadd-0b02-471b-bf57-0ae43b550014" containerName="dnsmasq-dns" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.111802 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2e0cadd-0b02-471b-bf57-0ae43b550014" containerName="dnsmasq-dns" Jan 22 07:21:32 crc kubenswrapper[4933]: E0122 07:21:32.111823 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2e0cadd-0b02-471b-bf57-0ae43b550014" containerName="init" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.111831 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2e0cadd-0b02-471b-bf57-0ae43b550014" containerName="init" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.112090 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2e0cadd-0b02-471b-bf57-0ae43b550014" containerName="dnsmasq-dns" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.128477 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vvkc9" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.140364 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vvkc9"] Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.232532 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/965afeae-5409-40b6-8826-b159a46d87cd-utilities\") pod \"redhat-operators-vvkc9\" (UID: \"965afeae-5409-40b6-8826-b159a46d87cd\") " pod="openshift-marketplace/redhat-operators-vvkc9" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.232642 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7mrd\" (UniqueName: \"kubernetes.io/projected/965afeae-5409-40b6-8826-b159a46d87cd-kube-api-access-h7mrd\") pod \"redhat-operators-vvkc9\" (UID: \"965afeae-5409-40b6-8826-b159a46d87cd\") " pod="openshift-marketplace/redhat-operators-vvkc9" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.232672 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/965afeae-5409-40b6-8826-b159a46d87cd-catalog-content\") pod \"redhat-operators-vvkc9\" (UID: \"965afeae-5409-40b6-8826-b159a46d87cd\") " pod="openshift-marketplace/redhat-operators-vvkc9" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.303624 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6szwq"] Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.305302 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6szwq" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.329207 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6szwq"] Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.333786 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7mrd\" (UniqueName: \"kubernetes.io/projected/965afeae-5409-40b6-8826-b159a46d87cd-kube-api-access-h7mrd\") pod \"redhat-operators-vvkc9\" (UID: \"965afeae-5409-40b6-8826-b159a46d87cd\") " pod="openshift-marketplace/redhat-operators-vvkc9" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.333830 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/965afeae-5409-40b6-8826-b159a46d87cd-catalog-content\") pod \"redhat-operators-vvkc9\" (UID: \"965afeae-5409-40b6-8826-b159a46d87cd\") " pod="openshift-marketplace/redhat-operators-vvkc9" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.333907 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/965afeae-5409-40b6-8826-b159a46d87cd-utilities\") pod \"redhat-operators-vvkc9\" (UID: \"965afeae-5409-40b6-8826-b159a46d87cd\") " pod="openshift-marketplace/redhat-operators-vvkc9" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.334502 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/965afeae-5409-40b6-8826-b159a46d87cd-catalog-content\") pod \"redhat-operators-vvkc9\" (UID: \"965afeae-5409-40b6-8826-b159a46d87cd\") " pod="openshift-marketplace/redhat-operators-vvkc9" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.335175 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/965afeae-5409-40b6-8826-b159a46d87cd-utilities\") pod \"redhat-operators-vvkc9\" (UID: \"965afeae-5409-40b6-8826-b159a46d87cd\") " pod="openshift-marketplace/redhat-operators-vvkc9" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.360641 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7mrd\" (UniqueName: \"kubernetes.io/projected/965afeae-5409-40b6-8826-b159a46d87cd-kube-api-access-h7mrd\") pod \"redhat-operators-vvkc9\" (UID: \"965afeae-5409-40b6-8826-b159a46d87cd\") " pod="openshift-marketplace/redhat-operators-vvkc9" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.435049 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a04be798-513a-46bd-9433-d4f6e9ad5d91-utilities\") pod \"community-operators-6szwq\" (UID: \"a04be798-513a-46bd-9433-d4f6e9ad5d91\") " pod="openshift-marketplace/community-operators-6szwq" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.435452 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a04be798-513a-46bd-9433-d4f6e9ad5d91-catalog-content\") pod \"community-operators-6szwq\" (UID: \"a04be798-513a-46bd-9433-d4f6e9ad5d91\") " pod="openshift-marketplace/community-operators-6szwq" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.435513 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfzh9\" (UniqueName: \"kubernetes.io/projected/a04be798-513a-46bd-9433-d4f6e9ad5d91-kube-api-access-bfzh9\") pod \"community-operators-6szwq\" (UID: \"a04be798-513a-46bd-9433-d4f6e9ad5d91\") " pod="openshift-marketplace/community-operators-6szwq" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.460156 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vvkc9" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.537846 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a04be798-513a-46bd-9433-d4f6e9ad5d91-utilities\") pod \"community-operators-6szwq\" (UID: \"a04be798-513a-46bd-9433-d4f6e9ad5d91\") " pod="openshift-marketplace/community-operators-6szwq" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.537913 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a04be798-513a-46bd-9433-d4f6e9ad5d91-catalog-content\") pod \"community-operators-6szwq\" (UID: \"a04be798-513a-46bd-9433-d4f6e9ad5d91\") " pod="openshift-marketplace/community-operators-6szwq" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.537987 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfzh9\" (UniqueName: \"kubernetes.io/projected/a04be798-513a-46bd-9433-d4f6e9ad5d91-kube-api-access-bfzh9\") pod \"community-operators-6szwq\" (UID: \"a04be798-513a-46bd-9433-d4f6e9ad5d91\") " pod="openshift-marketplace/community-operators-6szwq" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.539441 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a04be798-513a-46bd-9433-d4f6e9ad5d91-utilities\") pod \"community-operators-6szwq\" (UID: \"a04be798-513a-46bd-9433-d4f6e9ad5d91\") " pod="openshift-marketplace/community-operators-6szwq" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.540597 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a04be798-513a-46bd-9433-d4f6e9ad5d91-catalog-content\") pod \"community-operators-6szwq\" (UID: \"a04be798-513a-46bd-9433-d4f6e9ad5d91\") " pod="openshift-marketplace/community-operators-6szwq" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.563243 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfzh9\" (UniqueName: \"kubernetes.io/projected/a04be798-513a-46bd-9433-d4f6e9ad5d91-kube-api-access-bfzh9\") pod \"community-operators-6szwq\" (UID: \"a04be798-513a-46bd-9433-d4f6e9ad5d91\") " pod="openshift-marketplace/community-operators-6szwq" Jan 22 07:21:32 crc kubenswrapper[4933]: I0122 07:21:32.619109 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6szwq" Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:32.717213 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-5z6vc"] Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:32.718835 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5z6vc" Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:32.730444 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-5z6vc"] Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:32.744349 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0b51acb-e0fb-43c4-a533-bdd60e20b081-operator-scripts\") pod \"placement-db-create-5z6vc\" (UID: \"e0b51acb-e0fb-43c4-a533-bdd60e20b081\") " pod="openstack/placement-db-create-5z6vc" Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:32.744396 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swbj5\" (UniqueName: \"kubernetes.io/projected/e0b51acb-e0fb-43c4-a533-bdd60e20b081-kube-api-access-swbj5\") pod \"placement-db-create-5z6vc\" (UID: \"e0b51acb-e0fb-43c4-a533-bdd60e20b081\") " pod="openstack/placement-db-create-5z6vc" Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:32.817130 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-6182-account-create-update-5kkbv"] Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:32.819919 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6182-account-create-update-5kkbv" Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:32.824398 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:32.846301 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0b51acb-e0fb-43c4-a533-bdd60e20b081-operator-scripts\") pod \"placement-db-create-5z6vc\" (UID: \"e0b51acb-e0fb-43c4-a533-bdd60e20b081\") " pod="openstack/placement-db-create-5z6vc" Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:32.846368 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swbj5\" (UniqueName: \"kubernetes.io/projected/e0b51acb-e0fb-43c4-a533-bdd60e20b081-kube-api-access-swbj5\") pod \"placement-db-create-5z6vc\" (UID: \"e0b51acb-e0fb-43c4-a533-bdd60e20b081\") " pod="openstack/placement-db-create-5z6vc" Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:32.848250 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0b51acb-e0fb-43c4-a533-bdd60e20b081-operator-scripts\") pod \"placement-db-create-5z6vc\" (UID: \"e0b51acb-e0fb-43c4-a533-bdd60e20b081\") " pod="openstack/placement-db-create-5z6vc" Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:32.852478 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6182-account-create-update-5kkbv"] Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:32.875881 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swbj5\" (UniqueName: \"kubernetes.io/projected/e0b51acb-e0fb-43c4-a533-bdd60e20b081-kube-api-access-swbj5\") pod \"placement-db-create-5z6vc\" (UID: \"e0b51acb-e0fb-43c4-a533-bdd60e20b081\") " pod="openstack/placement-db-create-5z6vc" Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:32.948584 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00647053-41cd-46da-8ca5-c85caba22fa1-operator-scripts\") pod \"placement-6182-account-create-update-5kkbv\" (UID: \"00647053-41cd-46da-8ca5-c85caba22fa1\") " pod="openstack/placement-6182-account-create-update-5kkbv" Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:32.948654 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgcsx\" (UniqueName: \"kubernetes.io/projected/00647053-41cd-46da-8ca5-c85caba22fa1-kube-api-access-lgcsx\") pod \"placement-6182-account-create-update-5kkbv\" (UID: \"00647053-41cd-46da-8ca5-c85caba22fa1\") " pod="openstack/placement-6182-account-create-update-5kkbv" Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:33.050039 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00647053-41cd-46da-8ca5-c85caba22fa1-operator-scripts\") pod \"placement-6182-account-create-update-5kkbv\" (UID: \"00647053-41cd-46da-8ca5-c85caba22fa1\") " pod="openstack/placement-6182-account-create-update-5kkbv" Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:33.050134 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgcsx\" (UniqueName: \"kubernetes.io/projected/00647053-41cd-46da-8ca5-c85caba22fa1-kube-api-access-lgcsx\") pod \"placement-6182-account-create-update-5kkbv\" (UID: \"00647053-41cd-46da-8ca5-c85caba22fa1\") " pod="openstack/placement-6182-account-create-update-5kkbv" Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:33.051028 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00647053-41cd-46da-8ca5-c85caba22fa1-operator-scripts\") pod \"placement-6182-account-create-update-5kkbv\" (UID: \"00647053-41cd-46da-8ca5-c85caba22fa1\") " pod="openstack/placement-6182-account-create-update-5kkbv" Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:33.086613 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5z6vc" Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:33.086837 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgcsx\" (UniqueName: \"kubernetes.io/projected/00647053-41cd-46da-8ca5-c85caba22fa1-kube-api-access-lgcsx\") pod \"placement-6182-account-create-update-5kkbv\" (UID: \"00647053-41cd-46da-8ca5-c85caba22fa1\") " pod="openstack/placement-6182-account-create-update-5kkbv" Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:33.179460 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6182-account-create-update-5kkbv" Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:35.362189 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vvkc9"] Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:35.453770 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-5z6vc"] Jan 22 07:21:35 crc kubenswrapper[4933]: W0122 07:21:35.454788 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode0b51acb_e0fb_43c4_a533_bdd60e20b081.slice/crio-c72620312c235f6d03b9b6ffebfb275fa886d09df87a11efbb1016141b20f01f WatchSource:0}: Error finding container c72620312c235f6d03b9b6ffebfb275fa886d09df87a11efbb1016141b20f01f: Status 404 returned error can't find the container with id c72620312c235f6d03b9b6ffebfb275fa886d09df87a11efbb1016141b20f01f Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:35.471482 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6szwq"] Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:35.483908 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6182-account-create-update-5kkbv"] Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:35.526431 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-5z6vc" event={"ID":"e0b51acb-e0fb-43c4-a533-bdd60e20b081","Type":"ContainerStarted","Data":"c72620312c235f6d03b9b6ffebfb275fa886d09df87a11efbb1016141b20f01f"} Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:35.532621 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6182-account-create-update-5kkbv" event={"ID":"00647053-41cd-46da-8ca5-c85caba22fa1","Type":"ContainerStarted","Data":"9a2d4dfd8e83536706263e4729bbbc039c1af8322172c3f8d07d3121d0dd1d1e"} Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:35.534281 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vvkc9" event={"ID":"965afeae-5409-40b6-8826-b159a46d87cd","Type":"ContainerStarted","Data":"43e32dc758f0b92c3de2900df438676f9285fee6be67ef05406d65b779521a0d"} Jan 22 07:21:35 crc kubenswrapper[4933]: I0122 07:21:35.535720 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6szwq" event={"ID":"a04be798-513a-46bd-9433-d4f6e9ad5d91","Type":"ContainerStarted","Data":"3e29f22b1b2128bdfc0463a481b87af7aa784396f97e579f62279bbbfee9b160"} Jan 22 07:21:36 crc kubenswrapper[4933]: I0122 07:21:36.550551 4933 generic.go:334] "Generic (PLEG): container finished" podID="a04be798-513a-46bd-9433-d4f6e9ad5d91" containerID="e1e6be2678d931627596caeab4786a5caae914ce2d9100dcfee538adcc245190" exitCode=0 Jan 22 07:21:36 crc kubenswrapper[4933]: I0122 07:21:36.550674 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6szwq" event={"ID":"a04be798-513a-46bd-9433-d4f6e9ad5d91","Type":"ContainerDied","Data":"e1e6be2678d931627596caeab4786a5caae914ce2d9100dcfee538adcc245190"} Jan 22 07:21:36 crc kubenswrapper[4933]: I0122 07:21:36.553457 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-5z6vc" event={"ID":"e0b51acb-e0fb-43c4-a533-bdd60e20b081","Type":"ContainerStarted","Data":"1ad721049ec2aa2dd596c4a27d5a9689bf2452ffb9cc7625611aabd67f515f00"} Jan 22 07:21:36 crc kubenswrapper[4933]: I0122 07:21:36.555396 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6182-account-create-update-5kkbv" event={"ID":"00647053-41cd-46da-8ca5-c85caba22fa1","Type":"ContainerStarted","Data":"77b387574fb21c0b4f690e5c746ffd27323bb7355a884140f938fe4da4b9a95c"} Jan 22 07:21:36 crc kubenswrapper[4933]: I0122 07:21:36.558366 4933 generic.go:334] "Generic (PLEG): container finished" podID="965afeae-5409-40b6-8826-b159a46d87cd" containerID="ebdb1207f6c10c2fa8a403ace8b344a0c8ed48ed3218c4859d2f03dc4cad1889" exitCode=0 Jan 22 07:21:36 crc kubenswrapper[4933]: I0122 07:21:36.558410 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vvkc9" event={"ID":"965afeae-5409-40b6-8826-b159a46d87cd","Type":"ContainerDied","Data":"ebdb1207f6c10c2fa8a403ace8b344a0c8ed48ed3218c4859d2f03dc4cad1889"} Jan 22 07:21:36 crc kubenswrapper[4933]: I0122 07:21:36.597356 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-6182-account-create-update-5kkbv" podStartSLOduration=4.597339864 podStartE2EDuration="4.597339864s" podCreationTimestamp="2026-01-22 07:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:21:36.590911518 +0000 UTC m=+5744.428036891" watchObservedRunningTime="2026-01-22 07:21:36.597339864 +0000 UTC m=+5744.434465217" Jan 22 07:21:36 crc kubenswrapper[4933]: I0122 07:21:36.617756 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-5z6vc" podStartSLOduration=4.6177387 podStartE2EDuration="4.6177387s" podCreationTimestamp="2026-01-22 07:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:21:36.605775059 +0000 UTC m=+5744.442900412" watchObservedRunningTime="2026-01-22 07:21:36.6177387 +0000 UTC m=+5744.454864043" Jan 22 07:21:37 crc kubenswrapper[4933]: I0122 07:21:37.567321 4933 generic.go:334] "Generic (PLEG): container finished" podID="00647053-41cd-46da-8ca5-c85caba22fa1" containerID="77b387574fb21c0b4f690e5c746ffd27323bb7355a884140f938fe4da4b9a95c" exitCode=0 Jan 22 07:21:37 crc kubenswrapper[4933]: I0122 07:21:37.567397 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6182-account-create-update-5kkbv" event={"ID":"00647053-41cd-46da-8ca5-c85caba22fa1","Type":"ContainerDied","Data":"77b387574fb21c0b4f690e5c746ffd27323bb7355a884140f938fe4da4b9a95c"} Jan 22 07:21:37 crc kubenswrapper[4933]: I0122 07:21:37.568861 4933 generic.go:334] "Generic (PLEG): container finished" podID="e0b51acb-e0fb-43c4-a533-bdd60e20b081" containerID="1ad721049ec2aa2dd596c4a27d5a9689bf2452ffb9cc7625611aabd67f515f00" exitCode=0 Jan 22 07:21:37 crc kubenswrapper[4933]: I0122 07:21:37.568896 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-5z6vc" event={"ID":"e0b51acb-e0fb-43c4-a533-bdd60e20b081","Type":"ContainerDied","Data":"1ad721049ec2aa2dd596c4a27d5a9689bf2452ffb9cc7625611aabd67f515f00"} Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.039354 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6182-account-create-update-5kkbv" Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.045030 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5z6vc" Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.058588 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-swbj5\" (UniqueName: \"kubernetes.io/projected/e0b51acb-e0fb-43c4-a533-bdd60e20b081-kube-api-access-swbj5\") pod \"e0b51acb-e0fb-43c4-a533-bdd60e20b081\" (UID: \"e0b51acb-e0fb-43c4-a533-bdd60e20b081\") " Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.058790 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0b51acb-e0fb-43c4-a533-bdd60e20b081-operator-scripts\") pod \"e0b51acb-e0fb-43c4-a533-bdd60e20b081\" (UID: \"e0b51acb-e0fb-43c4-a533-bdd60e20b081\") " Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.059003 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00647053-41cd-46da-8ca5-c85caba22fa1-operator-scripts\") pod \"00647053-41cd-46da-8ca5-c85caba22fa1\" (UID: \"00647053-41cd-46da-8ca5-c85caba22fa1\") " Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.059316 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lgcsx\" (UniqueName: \"kubernetes.io/projected/00647053-41cd-46da-8ca5-c85caba22fa1-kube-api-access-lgcsx\") pod \"00647053-41cd-46da-8ca5-c85caba22fa1\" (UID: \"00647053-41cd-46da-8ca5-c85caba22fa1\") " Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.059488 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0b51acb-e0fb-43c4-a533-bdd60e20b081-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e0b51acb-e0fb-43c4-a533-bdd60e20b081" (UID: "e0b51acb-e0fb-43c4-a533-bdd60e20b081"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.059633 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00647053-41cd-46da-8ca5-c85caba22fa1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "00647053-41cd-46da-8ca5-c85caba22fa1" (UID: "00647053-41cd-46da-8ca5-c85caba22fa1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.060064 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0b51acb-e0fb-43c4-a533-bdd60e20b081-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.060114 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/00647053-41cd-46da-8ca5-c85caba22fa1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.064122 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0b51acb-e0fb-43c4-a533-bdd60e20b081-kube-api-access-swbj5" (OuterVolumeSpecName: "kube-api-access-swbj5") pod "e0b51acb-e0fb-43c4-a533-bdd60e20b081" (UID: "e0b51acb-e0fb-43c4-a533-bdd60e20b081"). InnerVolumeSpecName "kube-api-access-swbj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.064634 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00647053-41cd-46da-8ca5-c85caba22fa1-kube-api-access-lgcsx" (OuterVolumeSpecName: "kube-api-access-lgcsx") pod "00647053-41cd-46da-8ca5-c85caba22fa1" (UID: "00647053-41cd-46da-8ca5-c85caba22fa1"). InnerVolumeSpecName "kube-api-access-lgcsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.162056 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lgcsx\" (UniqueName: \"kubernetes.io/projected/00647053-41cd-46da-8ca5-c85caba22fa1-kube-api-access-lgcsx\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.162122 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-swbj5\" (UniqueName: \"kubernetes.io/projected/e0b51acb-e0fb-43c4-a533-bdd60e20b081-kube-api-access-swbj5\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.593181 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5z6vc" Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.593180 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-5z6vc" event={"ID":"e0b51acb-e0fb-43c4-a533-bdd60e20b081","Type":"ContainerDied","Data":"c72620312c235f6d03b9b6ffebfb275fa886d09df87a11efbb1016141b20f01f"} Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.593517 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c72620312c235f6d03b9b6ffebfb275fa886d09df87a11efbb1016141b20f01f" Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.594920 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6182-account-create-update-5kkbv" Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.595019 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6182-account-create-update-5kkbv" event={"ID":"00647053-41cd-46da-8ca5-c85caba22fa1","Type":"ContainerDied","Data":"9a2d4dfd8e83536706263e4729bbbc039c1af8322172c3f8d07d3121d0dd1d1e"} Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.595089 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a2d4dfd8e83536706263e4729bbbc039c1af8322172c3f8d07d3121d0dd1d1e" Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.600047 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vvkc9" event={"ID":"965afeae-5409-40b6-8826-b159a46d87cd","Type":"ContainerStarted","Data":"5a9c6effadefa149fa255fa2cb5e75ad7a527e0a8b7c1f0235a8bc5d52a6b4ee"} Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.602220 4933 generic.go:334] "Generic (PLEG): container finished" podID="a04be798-513a-46bd-9433-d4f6e9ad5d91" containerID="a8998225796e4e79325a84b852097660293be808797760a2e94fb5dbc028dd84" exitCode=0 Jan 22 07:21:39 crc kubenswrapper[4933]: I0122 07:21:39.602265 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6szwq" event={"ID":"a04be798-513a-46bd-9433-d4f6e9ad5d91","Type":"ContainerDied","Data":"a8998225796e4e79325a84b852097660293be808797760a2e94fb5dbc028dd84"} Jan 22 07:21:40 crc kubenswrapper[4933]: I0122 07:21:40.639497 4933 generic.go:334] "Generic (PLEG): container finished" podID="965afeae-5409-40b6-8826-b159a46d87cd" containerID="5a9c6effadefa149fa255fa2cb5e75ad7a527e0a8b7c1f0235a8bc5d52a6b4ee" exitCode=0 Jan 22 07:21:40 crc kubenswrapper[4933]: I0122 07:21:40.639603 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vvkc9" event={"ID":"965afeae-5409-40b6-8826-b159a46d87cd","Type":"ContainerDied","Data":"5a9c6effadefa149fa255fa2cb5e75ad7a527e0a8b7c1f0235a8bc5d52a6b4ee"} Jan 22 07:21:40 crc kubenswrapper[4933]: I0122 07:21:40.642899 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6szwq" event={"ID":"a04be798-513a-46bd-9433-d4f6e9ad5d91","Type":"ContainerStarted","Data":"00a82d3f218d8e2f1a4405b8f6c4ede877973c028bd19cbf918b17220441afe5"} Jan 22 07:21:40 crc kubenswrapper[4933]: I0122 07:21:40.943049 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:21:40 crc kubenswrapper[4933]: I0122 07:21:40.943609 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:21:41 crc kubenswrapper[4933]: I0122 07:21:41.659285 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vvkc9" event={"ID":"965afeae-5409-40b6-8826-b159a46d87cd","Type":"ContainerStarted","Data":"414d0b928f546c68cc4ec3592b08d5ccd9f0e0a9f9b5f523e638c37bbec1c891"} Jan 22 07:21:41 crc kubenswrapper[4933]: I0122 07:21:41.693553 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6szwq" podStartSLOduration=5.984143007 podStartE2EDuration="9.693531141s" podCreationTimestamp="2026-01-22 07:21:32 +0000 UTC" firstStartedPulling="2026-01-22 07:21:36.552897254 +0000 UTC m=+5744.390022627" lastFinishedPulling="2026-01-22 07:21:40.262285398 +0000 UTC m=+5748.099410761" observedRunningTime="2026-01-22 07:21:41.680518175 +0000 UTC m=+5749.517643538" watchObservedRunningTime="2026-01-22 07:21:41.693531141 +0000 UTC m=+5749.530656494" Jan 22 07:21:41 crc kubenswrapper[4933]: I0122 07:21:41.711874 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vvkc9" podStartSLOduration=4.911534141 podStartE2EDuration="9.711851556s" podCreationTimestamp="2026-01-22 07:21:32 +0000 UTC" firstStartedPulling="2026-01-22 07:21:36.560308094 +0000 UTC m=+5744.397433457" lastFinishedPulling="2026-01-22 07:21:41.360625519 +0000 UTC m=+5749.197750872" observedRunningTime="2026-01-22 07:21:41.70334083 +0000 UTC m=+5749.540466193" watchObservedRunningTime="2026-01-22 07:21:41.711851556 +0000 UTC m=+5749.548976909" Jan 22 07:21:42 crc kubenswrapper[4933]: I0122 07:21:42.460518 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vvkc9" Jan 22 07:21:42 crc kubenswrapper[4933]: I0122 07:21:42.460565 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vvkc9" Jan 22 07:21:42 crc kubenswrapper[4933]: I0122 07:21:42.619497 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6szwq" Jan 22 07:21:42 crc kubenswrapper[4933]: I0122 07:21:42.620261 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6szwq" Jan 22 07:21:42 crc kubenswrapper[4933]: I0122 07:21:42.669092 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6szwq" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.270487 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-58c6cddd7-ks4fh"] Jan 22 07:21:43 crc kubenswrapper[4933]: E0122 07:21:43.271118 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0b51acb-e0fb-43c4-a533-bdd60e20b081" containerName="mariadb-database-create" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.271135 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0b51acb-e0fb-43c4-a533-bdd60e20b081" containerName="mariadb-database-create" Jan 22 07:21:43 crc kubenswrapper[4933]: E0122 07:21:43.271156 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00647053-41cd-46da-8ca5-c85caba22fa1" containerName="mariadb-account-create-update" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.271163 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="00647053-41cd-46da-8ca5-c85caba22fa1" containerName="mariadb-account-create-update" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.271312 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0b51acb-e0fb-43c4-a533-bdd60e20b081" containerName="mariadb-database-create" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.271327 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="00647053-41cd-46da-8ca5-c85caba22fa1" containerName="mariadb-account-create-update" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.272228 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.294153 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-xt5hm"] Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.295449 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-xt5hm" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.299493 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.299786 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-tp55q" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.300368 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.327637 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58c6cddd7-ks4fh"] Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.347334 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-xt5hm"] Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.380018 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dt6ws\" (UniqueName: \"kubernetes.io/projected/f3f3b160-8f16-4d47-9658-893cc951620f-kube-api-access-dt6ws\") pod \"dnsmasq-dns-58c6cddd7-ks4fh\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.380088 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clxwh\" (UniqueName: \"kubernetes.io/projected/adfa05f5-5e61-4cc6-8879-2d298697becf-kube-api-access-clxwh\") pod \"placement-db-sync-xt5hm\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " pod="openstack/placement-db-sync-xt5hm" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.380117 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-ovsdbserver-nb\") pod \"dnsmasq-dns-58c6cddd7-ks4fh\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.380138 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adfa05f5-5e61-4cc6-8879-2d298697becf-config-data\") pod \"placement-db-sync-xt5hm\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " pod="openstack/placement-db-sync-xt5hm" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.380163 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/adfa05f5-5e61-4cc6-8879-2d298697becf-logs\") pod \"placement-db-sync-xt5hm\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " pod="openstack/placement-db-sync-xt5hm" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.380242 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adfa05f5-5e61-4cc6-8879-2d298697becf-combined-ca-bundle\") pod \"placement-db-sync-xt5hm\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " pod="openstack/placement-db-sync-xt5hm" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.380287 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-dns-svc\") pod \"dnsmasq-dns-58c6cddd7-ks4fh\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.380407 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-ovsdbserver-sb\") pod \"dnsmasq-dns-58c6cddd7-ks4fh\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.380502 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-config\") pod \"dnsmasq-dns-58c6cddd7-ks4fh\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.380554 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/adfa05f5-5e61-4cc6-8879-2d298697becf-scripts\") pod \"placement-db-sync-xt5hm\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " pod="openstack/placement-db-sync-xt5hm" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.482533 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dt6ws\" (UniqueName: \"kubernetes.io/projected/f3f3b160-8f16-4d47-9658-893cc951620f-kube-api-access-dt6ws\") pod \"dnsmasq-dns-58c6cddd7-ks4fh\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.482606 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clxwh\" (UniqueName: \"kubernetes.io/projected/adfa05f5-5e61-4cc6-8879-2d298697becf-kube-api-access-clxwh\") pod \"placement-db-sync-xt5hm\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " pod="openstack/placement-db-sync-xt5hm" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.482636 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-ovsdbserver-nb\") pod \"dnsmasq-dns-58c6cddd7-ks4fh\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.482657 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adfa05f5-5e61-4cc6-8879-2d298697becf-config-data\") pod \"placement-db-sync-xt5hm\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " pod="openstack/placement-db-sync-xt5hm" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.482688 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/adfa05f5-5e61-4cc6-8879-2d298697becf-logs\") pod \"placement-db-sync-xt5hm\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " pod="openstack/placement-db-sync-xt5hm" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.482744 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adfa05f5-5e61-4cc6-8879-2d298697becf-combined-ca-bundle\") pod \"placement-db-sync-xt5hm\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " pod="openstack/placement-db-sync-xt5hm" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.482790 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-dns-svc\") pod \"dnsmasq-dns-58c6cddd7-ks4fh\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.482815 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-ovsdbserver-sb\") pod \"dnsmasq-dns-58c6cddd7-ks4fh\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.482841 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-config\") pod \"dnsmasq-dns-58c6cddd7-ks4fh\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.482864 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/adfa05f5-5e61-4cc6-8879-2d298697becf-scripts\") pod \"placement-db-sync-xt5hm\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " pod="openstack/placement-db-sync-xt5hm" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.483491 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-ovsdbserver-nb\") pod \"dnsmasq-dns-58c6cddd7-ks4fh\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.483486 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/adfa05f5-5e61-4cc6-8879-2d298697becf-logs\") pod \"placement-db-sync-xt5hm\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " pod="openstack/placement-db-sync-xt5hm" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.484238 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-dns-svc\") pod \"dnsmasq-dns-58c6cddd7-ks4fh\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.484292 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-ovsdbserver-sb\") pod \"dnsmasq-dns-58c6cddd7-ks4fh\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.484494 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-config\") pod \"dnsmasq-dns-58c6cddd7-ks4fh\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.489286 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adfa05f5-5e61-4cc6-8879-2d298697becf-config-data\") pod \"placement-db-sync-xt5hm\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " pod="openstack/placement-db-sync-xt5hm" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.489837 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adfa05f5-5e61-4cc6-8879-2d298697becf-combined-ca-bundle\") pod \"placement-db-sync-xt5hm\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " pod="openstack/placement-db-sync-xt5hm" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.499939 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/adfa05f5-5e61-4cc6-8879-2d298697becf-scripts\") pod \"placement-db-sync-xt5hm\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " pod="openstack/placement-db-sync-xt5hm" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.500601 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dt6ws\" (UniqueName: \"kubernetes.io/projected/f3f3b160-8f16-4d47-9658-893cc951620f-kube-api-access-dt6ws\") pod \"dnsmasq-dns-58c6cddd7-ks4fh\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.501912 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clxwh\" (UniqueName: \"kubernetes.io/projected/adfa05f5-5e61-4cc6-8879-2d298697becf-kube-api-access-clxwh\") pod \"placement-db-sync-xt5hm\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " pod="openstack/placement-db-sync-xt5hm" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.521758 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vvkc9" podUID="965afeae-5409-40b6-8826-b159a46d87cd" containerName="registry-server" probeResult="failure" output=< Jan 22 07:21:43 crc kubenswrapper[4933]: timeout: failed to connect service ":50051" within 1s Jan 22 07:21:43 crc kubenswrapper[4933]: > Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.617932 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:21:43 crc kubenswrapper[4933]: I0122 07:21:43.658948 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-xt5hm" Jan 22 07:21:44 crc kubenswrapper[4933]: I0122 07:21:44.135635 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58c6cddd7-ks4fh"] Jan 22 07:21:44 crc kubenswrapper[4933]: W0122 07:21:44.147223 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3f3b160_8f16_4d47_9658_893cc951620f.slice/crio-3237ce6bba4795f5c7ff53809cc7b7c4efa69f36e2d7b9f7ca71e8838c83f62c WatchSource:0}: Error finding container 3237ce6bba4795f5c7ff53809cc7b7c4efa69f36e2d7b9f7ca71e8838c83f62c: Status 404 returned error can't find the container with id 3237ce6bba4795f5c7ff53809cc7b7c4efa69f36e2d7b9f7ca71e8838c83f62c Jan 22 07:21:44 crc kubenswrapper[4933]: I0122 07:21:44.263777 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-xt5hm"] Jan 22 07:21:44 crc kubenswrapper[4933]: W0122 07:21:44.265623 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podadfa05f5_5e61_4cc6_8879_2d298697becf.slice/crio-573be4b8310d884773708bb84a159c622b93ac53e6e12a82ecd2d1aefd67ea58 WatchSource:0}: Error finding container 573be4b8310d884773708bb84a159c622b93ac53e6e12a82ecd2d1aefd67ea58: Status 404 returned error can't find the container with id 573be4b8310d884773708bb84a159c622b93ac53e6e12a82ecd2d1aefd67ea58 Jan 22 07:21:44 crc kubenswrapper[4933]: I0122 07:21:44.696099 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-xt5hm" event={"ID":"adfa05f5-5e61-4cc6-8879-2d298697becf","Type":"ContainerStarted","Data":"573be4b8310d884773708bb84a159c622b93ac53e6e12a82ecd2d1aefd67ea58"} Jan 22 07:21:44 crc kubenswrapper[4933]: I0122 07:21:44.697567 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" event={"ID":"f3f3b160-8f16-4d47-9658-893cc951620f","Type":"ContainerStarted","Data":"3237ce6bba4795f5c7ff53809cc7b7c4efa69f36e2d7b9f7ca71e8838c83f62c"} Jan 22 07:21:46 crc kubenswrapper[4933]: I0122 07:21:46.714403 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" event={"ID":"f3f3b160-8f16-4d47-9658-893cc951620f","Type":"ContainerStarted","Data":"e6aa2a7025bc620a28af97bc40861c665624319305ac963a438363c949aaad6a"} Jan 22 07:21:47 crc kubenswrapper[4933]: I0122 07:21:47.722463 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-xt5hm" event={"ID":"adfa05f5-5e61-4cc6-8879-2d298697becf","Type":"ContainerStarted","Data":"25bd4394692a554f3012bc97b2acde6aed5e4f34547a4eee1d724b8560e9c010"} Jan 22 07:21:47 crc kubenswrapper[4933]: I0122 07:21:47.724904 4933 generic.go:334] "Generic (PLEG): container finished" podID="f3f3b160-8f16-4d47-9658-893cc951620f" containerID="e6aa2a7025bc620a28af97bc40861c665624319305ac963a438363c949aaad6a" exitCode=0 Jan 22 07:21:47 crc kubenswrapper[4933]: I0122 07:21:47.724932 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" event={"ID":"f3f3b160-8f16-4d47-9658-893cc951620f","Type":"ContainerDied","Data":"e6aa2a7025bc620a28af97bc40861c665624319305ac963a438363c949aaad6a"} Jan 22 07:21:47 crc kubenswrapper[4933]: I0122 07:21:47.756694 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-xt5hm" podStartSLOduration=4.756674006 podStartE2EDuration="4.756674006s" podCreationTimestamp="2026-01-22 07:21:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:21:47.749384929 +0000 UTC m=+5755.586510322" watchObservedRunningTime="2026-01-22 07:21:47.756674006 +0000 UTC m=+5755.593799359" Jan 22 07:21:48 crc kubenswrapper[4933]: I0122 07:21:48.734456 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" event={"ID":"f3f3b160-8f16-4d47-9658-893cc951620f","Type":"ContainerStarted","Data":"9bf50eb554c05342fca0f27b2aea099b3dd6af03e05946538ce521661f68de3b"} Jan 22 07:21:48 crc kubenswrapper[4933]: I0122 07:21:48.759260 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" podStartSLOduration=5.759237338 podStartE2EDuration="5.759237338s" podCreationTimestamp="2026-01-22 07:21:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:21:48.75643653 +0000 UTC m=+5756.593561903" watchObservedRunningTime="2026-01-22 07:21:48.759237338 +0000 UTC m=+5756.596362691" Jan 22 07:21:49 crc kubenswrapper[4933]: I0122 07:21:49.745069 4933 generic.go:334] "Generic (PLEG): container finished" podID="adfa05f5-5e61-4cc6-8879-2d298697becf" containerID="25bd4394692a554f3012bc97b2acde6aed5e4f34547a4eee1d724b8560e9c010" exitCode=0 Jan 22 07:21:49 crc kubenswrapper[4933]: I0122 07:21:49.745128 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-xt5hm" event={"ID":"adfa05f5-5e61-4cc6-8879-2d298697becf","Type":"ContainerDied","Data":"25bd4394692a554f3012bc97b2acde6aed5e4f34547a4eee1d724b8560e9c010"} Jan 22 07:21:49 crc kubenswrapper[4933]: I0122 07:21:49.745964 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.124127 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-xt5hm" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.232056 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-clxwh\" (UniqueName: \"kubernetes.io/projected/adfa05f5-5e61-4cc6-8879-2d298697becf-kube-api-access-clxwh\") pod \"adfa05f5-5e61-4cc6-8879-2d298697becf\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.232190 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/adfa05f5-5e61-4cc6-8879-2d298697becf-logs\") pod \"adfa05f5-5e61-4cc6-8879-2d298697becf\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.232367 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adfa05f5-5e61-4cc6-8879-2d298697becf-config-data\") pod \"adfa05f5-5e61-4cc6-8879-2d298697becf\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.232471 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/adfa05f5-5e61-4cc6-8879-2d298697becf-scripts\") pod \"adfa05f5-5e61-4cc6-8879-2d298697becf\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.232517 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adfa05f5-5e61-4cc6-8879-2d298697becf-combined-ca-bundle\") pod \"adfa05f5-5e61-4cc6-8879-2d298697becf\" (UID: \"adfa05f5-5e61-4cc6-8879-2d298697becf\") " Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.232616 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/adfa05f5-5e61-4cc6-8879-2d298697becf-logs" (OuterVolumeSpecName: "logs") pod "adfa05f5-5e61-4cc6-8879-2d298697becf" (UID: "adfa05f5-5e61-4cc6-8879-2d298697becf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.232861 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/adfa05f5-5e61-4cc6-8879-2d298697becf-logs\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.238307 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adfa05f5-5e61-4cc6-8879-2d298697becf-scripts" (OuterVolumeSpecName: "scripts") pod "adfa05f5-5e61-4cc6-8879-2d298697becf" (UID: "adfa05f5-5e61-4cc6-8879-2d298697becf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.238516 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/adfa05f5-5e61-4cc6-8879-2d298697becf-kube-api-access-clxwh" (OuterVolumeSpecName: "kube-api-access-clxwh") pod "adfa05f5-5e61-4cc6-8879-2d298697becf" (UID: "adfa05f5-5e61-4cc6-8879-2d298697becf"). InnerVolumeSpecName "kube-api-access-clxwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.263870 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adfa05f5-5e61-4cc6-8879-2d298697becf-config-data" (OuterVolumeSpecName: "config-data") pod "adfa05f5-5e61-4cc6-8879-2d298697becf" (UID: "adfa05f5-5e61-4cc6-8879-2d298697becf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.272895 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adfa05f5-5e61-4cc6-8879-2d298697becf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "adfa05f5-5e61-4cc6-8879-2d298697becf" (UID: "adfa05f5-5e61-4cc6-8879-2d298697becf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.334042 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adfa05f5-5e61-4cc6-8879-2d298697becf-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.334655 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/adfa05f5-5e61-4cc6-8879-2d298697becf-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.334710 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adfa05f5-5e61-4cc6-8879-2d298697becf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.334725 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-clxwh\" (UniqueName: \"kubernetes.io/projected/adfa05f5-5e61-4cc6-8879-2d298697becf-kube-api-access-clxwh\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.765396 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-xt5hm" event={"ID":"adfa05f5-5e61-4cc6-8879-2d298697becf","Type":"ContainerDied","Data":"573be4b8310d884773708bb84a159c622b93ac53e6e12a82ecd2d1aefd67ea58"} Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.765447 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="573be4b8310d884773708bb84a159c622b93ac53e6e12a82ecd2d1aefd67ea58" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.765508 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-xt5hm" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.851449 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-56899cfc48-xqft6"] Jan 22 07:21:51 crc kubenswrapper[4933]: E0122 07:21:51.852253 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="adfa05f5-5e61-4cc6-8879-2d298697becf" containerName="placement-db-sync" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.852278 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="adfa05f5-5e61-4cc6-8879-2d298697becf" containerName="placement-db-sync" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.852516 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="adfa05f5-5e61-4cc6-8879-2d298697becf" containerName="placement-db-sync" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.854184 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.858267 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.860269 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.864375 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.867301 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-tp55q" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.867746 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.868967 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-56899cfc48-xqft6"] Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.962697 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54e27225-7863-4049-9383-3c57a391d7e0-config-data\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.962755 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/54e27225-7863-4049-9383-3c57a391d7e0-public-tls-certs\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.962790 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54e27225-7863-4049-9383-3c57a391d7e0-logs\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.962853 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54e27225-7863-4049-9383-3c57a391d7e0-combined-ca-bundle\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.962919 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/54e27225-7863-4049-9383-3c57a391d7e0-internal-tls-certs\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.963241 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wp49\" (UniqueName: \"kubernetes.io/projected/54e27225-7863-4049-9383-3c57a391d7e0-kube-api-access-6wp49\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:51 crc kubenswrapper[4933]: I0122 07:21:51.963322 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54e27225-7863-4049-9383-3c57a391d7e0-scripts\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.065711 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54e27225-7863-4049-9383-3c57a391d7e0-config-data\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.065771 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/54e27225-7863-4049-9383-3c57a391d7e0-public-tls-certs\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.066545 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54e27225-7863-4049-9383-3c57a391d7e0-logs\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.066622 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54e27225-7863-4049-9383-3c57a391d7e0-combined-ca-bundle\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.066786 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/54e27225-7863-4049-9383-3c57a391d7e0-internal-tls-certs\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.066983 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wp49\" (UniqueName: \"kubernetes.io/projected/54e27225-7863-4049-9383-3c57a391d7e0-kube-api-access-6wp49\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.067047 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54e27225-7863-4049-9383-3c57a391d7e0-scripts\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.068052 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54e27225-7863-4049-9383-3c57a391d7e0-logs\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.069754 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/54e27225-7863-4049-9383-3c57a391d7e0-public-tls-certs\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.070614 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54e27225-7863-4049-9383-3c57a391d7e0-scripts\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.070698 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54e27225-7863-4049-9383-3c57a391d7e0-combined-ca-bundle\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.070892 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/54e27225-7863-4049-9383-3c57a391d7e0-internal-tls-certs\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.089310 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54e27225-7863-4049-9383-3c57a391d7e0-config-data\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.098790 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wp49\" (UniqueName: \"kubernetes.io/projected/54e27225-7863-4049-9383-3c57a391d7e0-kube-api-access-6wp49\") pod \"placement-56899cfc48-xqft6\" (UID: \"54e27225-7863-4049-9383-3c57a391d7e0\") " pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.194320 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.516138 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vvkc9" Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.565402 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vvkc9" Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.716943 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-56899cfc48-xqft6"] Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.765244 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vvkc9"] Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.781483 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6szwq" Jan 22 07:21:52 crc kubenswrapper[4933]: I0122 07:21:52.810206 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-56899cfc48-xqft6" event={"ID":"54e27225-7863-4049-9383-3c57a391d7e0","Type":"ContainerStarted","Data":"490cd96b9577df71c86bfbe73c8f254916c7e1af1346e9d6f3faf4926b6d2fed"} Jan 22 07:21:53 crc kubenswrapper[4933]: I0122 07:21:53.619283 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:21:53 crc kubenswrapper[4933]: I0122 07:21:53.694189 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77b79d864c-9jrkg"] Jan 22 07:21:53 crc kubenswrapper[4933]: I0122 07:21:53.694518 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" podUID="31f9bca1-a923-47bb-948e-10191d7f05f8" containerName="dnsmasq-dns" containerID="cri-o://ec9e06d8f8bfa760f366368f5598245487201f3a1f497c757fd46b32110f06ae" gracePeriod=10 Jan 22 07:21:53 crc kubenswrapper[4933]: I0122 07:21:53.824378 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vvkc9" podUID="965afeae-5409-40b6-8826-b159a46d87cd" containerName="registry-server" containerID="cri-o://414d0b928f546c68cc4ec3592b08d5ccd9f0e0a9f9b5f523e638c37bbec1c891" gracePeriod=2 Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.677014 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.733428 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-config\") pod \"31f9bca1-a923-47bb-948e-10191d7f05f8\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.733538 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-ovsdbserver-sb\") pod \"31f9bca1-a923-47bb-948e-10191d7f05f8\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.733670 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-ovsdbserver-nb\") pod \"31f9bca1-a923-47bb-948e-10191d7f05f8\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.733745 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-dns-svc\") pod \"31f9bca1-a923-47bb-948e-10191d7f05f8\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.733794 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bt8bv\" (UniqueName: \"kubernetes.io/projected/31f9bca1-a923-47bb-948e-10191d7f05f8-kube-api-access-bt8bv\") pod \"31f9bca1-a923-47bb-948e-10191d7f05f8\" (UID: \"31f9bca1-a923-47bb-948e-10191d7f05f8\") " Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.739733 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31f9bca1-a923-47bb-948e-10191d7f05f8-kube-api-access-bt8bv" (OuterVolumeSpecName: "kube-api-access-bt8bv") pod "31f9bca1-a923-47bb-948e-10191d7f05f8" (UID: "31f9bca1-a923-47bb-948e-10191d7f05f8"). InnerVolumeSpecName "kube-api-access-bt8bv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.793892 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vvkc9" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.794852 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "31f9bca1-a923-47bb-948e-10191d7f05f8" (UID: "31f9bca1-a923-47bb-948e-10191d7f05f8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.797090 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-config" (OuterVolumeSpecName: "config") pod "31f9bca1-a923-47bb-948e-10191d7f05f8" (UID: "31f9bca1-a923-47bb-948e-10191d7f05f8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.806500 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "31f9bca1-a923-47bb-948e-10191d7f05f8" (UID: "31f9bca1-a923-47bb-948e-10191d7f05f8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.818258 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "31f9bca1-a923-47bb-948e-10191d7f05f8" (UID: "31f9bca1-a923-47bb-948e-10191d7f05f8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.836811 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/965afeae-5409-40b6-8826-b159a46d87cd-catalog-content\") pod \"965afeae-5409-40b6-8826-b159a46d87cd\" (UID: \"965afeae-5409-40b6-8826-b159a46d87cd\") " Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.836905 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/965afeae-5409-40b6-8826-b159a46d87cd-utilities\") pod \"965afeae-5409-40b6-8826-b159a46d87cd\" (UID: \"965afeae-5409-40b6-8826-b159a46d87cd\") " Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.836990 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7mrd\" (UniqueName: \"kubernetes.io/projected/965afeae-5409-40b6-8826-b159a46d87cd-kube-api-access-h7mrd\") pod \"965afeae-5409-40b6-8826-b159a46d87cd\" (UID: \"965afeae-5409-40b6-8826-b159a46d87cd\") " Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.837435 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.837459 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.837476 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.837489 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31f9bca1-a923-47bb-948e-10191d7f05f8-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.837500 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bt8bv\" (UniqueName: \"kubernetes.io/projected/31f9bca1-a923-47bb-948e-10191d7f05f8-kube-api-access-bt8bv\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.838031 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/965afeae-5409-40b6-8826-b159a46d87cd-utilities" (OuterVolumeSpecName: "utilities") pod "965afeae-5409-40b6-8826-b159a46d87cd" (UID: "965afeae-5409-40b6-8826-b159a46d87cd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.840256 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/965afeae-5409-40b6-8826-b159a46d87cd-kube-api-access-h7mrd" (OuterVolumeSpecName: "kube-api-access-h7mrd") pod "965afeae-5409-40b6-8826-b159a46d87cd" (UID: "965afeae-5409-40b6-8826-b159a46d87cd"). InnerVolumeSpecName "kube-api-access-h7mrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.841590 4933 generic.go:334] "Generic (PLEG): container finished" podID="31f9bca1-a923-47bb-948e-10191d7f05f8" containerID="ec9e06d8f8bfa760f366368f5598245487201f3a1f497c757fd46b32110f06ae" exitCode=0 Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.841671 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" event={"ID":"31f9bca1-a923-47bb-948e-10191d7f05f8","Type":"ContainerDied","Data":"ec9e06d8f8bfa760f366368f5598245487201f3a1f497c757fd46b32110f06ae"} Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.841702 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" event={"ID":"31f9bca1-a923-47bb-948e-10191d7f05f8","Type":"ContainerDied","Data":"5194c9ee47ee31d84630089e4ff5275a2d9557c69f955c84f8352e0e65233d24"} Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.841720 4933 scope.go:117] "RemoveContainer" containerID="ec9e06d8f8bfa760f366368f5598245487201f3a1f497c757fd46b32110f06ae" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.841835 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77b79d864c-9jrkg" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.864577 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-56899cfc48-xqft6" event={"ID":"54e27225-7863-4049-9383-3c57a391d7e0","Type":"ContainerStarted","Data":"0fd84dba43d14ddbed6497abb0c0c6e63c856cc8abbeab8b2e0f7c912a556269"} Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.864630 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-56899cfc48-xqft6" event={"ID":"54e27225-7863-4049-9383-3c57a391d7e0","Type":"ContainerStarted","Data":"d35afcbd0f6659bf2fc5d10214edef4de55f188443c8559616e4578f0fdb3ad5"} Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.866252 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.866577 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.873504 4933 generic.go:334] "Generic (PLEG): container finished" podID="965afeae-5409-40b6-8826-b159a46d87cd" containerID="414d0b928f546c68cc4ec3592b08d5ccd9f0e0a9f9b5f523e638c37bbec1c891" exitCode=0 Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.873546 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vvkc9" event={"ID":"965afeae-5409-40b6-8826-b159a46d87cd","Type":"ContainerDied","Data":"414d0b928f546c68cc4ec3592b08d5ccd9f0e0a9f9b5f523e638c37bbec1c891"} Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.873570 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vvkc9" event={"ID":"965afeae-5409-40b6-8826-b159a46d87cd","Type":"ContainerDied","Data":"43e32dc758f0b92c3de2900df438676f9285fee6be67ef05406d65b779521a0d"} Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.873738 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vvkc9" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.888669 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-56899cfc48-xqft6" podStartSLOduration=3.888649446 podStartE2EDuration="3.888649446s" podCreationTimestamp="2026-01-22 07:21:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:21:54.883288126 +0000 UTC m=+5762.720413479" watchObservedRunningTime="2026-01-22 07:21:54.888649446 +0000 UTC m=+5762.725774799" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.901036 4933 scope.go:117] "RemoveContainer" containerID="69deb8ecabb767fe1a56c94fcf88ccd53fd8ebbf1c701dac93c6ea7dae10f8aa" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.926150 4933 scope.go:117] "RemoveContainer" containerID="ec9e06d8f8bfa760f366368f5598245487201f3a1f497c757fd46b32110f06ae" Jan 22 07:21:54 crc kubenswrapper[4933]: E0122 07:21:54.926703 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec9e06d8f8bfa760f366368f5598245487201f3a1f497c757fd46b32110f06ae\": container with ID starting with ec9e06d8f8bfa760f366368f5598245487201f3a1f497c757fd46b32110f06ae not found: ID does not exist" containerID="ec9e06d8f8bfa760f366368f5598245487201f3a1f497c757fd46b32110f06ae" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.926758 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec9e06d8f8bfa760f366368f5598245487201f3a1f497c757fd46b32110f06ae"} err="failed to get container status \"ec9e06d8f8bfa760f366368f5598245487201f3a1f497c757fd46b32110f06ae\": rpc error: code = NotFound desc = could not find container \"ec9e06d8f8bfa760f366368f5598245487201f3a1f497c757fd46b32110f06ae\": container with ID starting with ec9e06d8f8bfa760f366368f5598245487201f3a1f497c757fd46b32110f06ae not found: ID does not exist" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.926791 4933 scope.go:117] "RemoveContainer" containerID="69deb8ecabb767fe1a56c94fcf88ccd53fd8ebbf1c701dac93c6ea7dae10f8aa" Jan 22 07:21:54 crc kubenswrapper[4933]: E0122 07:21:54.927245 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69deb8ecabb767fe1a56c94fcf88ccd53fd8ebbf1c701dac93c6ea7dae10f8aa\": container with ID starting with 69deb8ecabb767fe1a56c94fcf88ccd53fd8ebbf1c701dac93c6ea7dae10f8aa not found: ID does not exist" containerID="69deb8ecabb767fe1a56c94fcf88ccd53fd8ebbf1c701dac93c6ea7dae10f8aa" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.927378 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69deb8ecabb767fe1a56c94fcf88ccd53fd8ebbf1c701dac93c6ea7dae10f8aa"} err="failed to get container status \"69deb8ecabb767fe1a56c94fcf88ccd53fd8ebbf1c701dac93c6ea7dae10f8aa\": rpc error: code = NotFound desc = could not find container \"69deb8ecabb767fe1a56c94fcf88ccd53fd8ebbf1c701dac93c6ea7dae10f8aa\": container with ID starting with 69deb8ecabb767fe1a56c94fcf88ccd53fd8ebbf1c701dac93c6ea7dae10f8aa not found: ID does not exist" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.927398 4933 scope.go:117] "RemoveContainer" containerID="414d0b928f546c68cc4ec3592b08d5ccd9f0e0a9f9b5f523e638c37bbec1c891" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.931434 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77b79d864c-9jrkg"] Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.938861 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-77b79d864c-9jrkg"] Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.939289 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/965afeae-5409-40b6-8826-b159a46d87cd-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.939335 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7mrd\" (UniqueName: \"kubernetes.io/projected/965afeae-5409-40b6-8826-b159a46d87cd-kube-api-access-h7mrd\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.944845 4933 scope.go:117] "RemoveContainer" containerID="5a9c6effadefa149fa255fa2cb5e75ad7a527e0a8b7c1f0235a8bc5d52a6b4ee" Jan 22 07:21:54 crc kubenswrapper[4933]: I0122 07:21:54.981108 4933 scope.go:117] "RemoveContainer" containerID="ebdb1207f6c10c2fa8a403ace8b344a0c8ed48ed3218c4859d2f03dc4cad1889" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.006919 4933 scope.go:117] "RemoveContainer" containerID="414d0b928f546c68cc4ec3592b08d5ccd9f0e0a9f9b5f523e638c37bbec1c891" Jan 22 07:21:55 crc kubenswrapper[4933]: E0122 07:21:55.008433 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"414d0b928f546c68cc4ec3592b08d5ccd9f0e0a9f9b5f523e638c37bbec1c891\": container with ID starting with 414d0b928f546c68cc4ec3592b08d5ccd9f0e0a9f9b5f523e638c37bbec1c891 not found: ID does not exist" containerID="414d0b928f546c68cc4ec3592b08d5ccd9f0e0a9f9b5f523e638c37bbec1c891" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.008465 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"414d0b928f546c68cc4ec3592b08d5ccd9f0e0a9f9b5f523e638c37bbec1c891"} err="failed to get container status \"414d0b928f546c68cc4ec3592b08d5ccd9f0e0a9f9b5f523e638c37bbec1c891\": rpc error: code = NotFound desc = could not find container \"414d0b928f546c68cc4ec3592b08d5ccd9f0e0a9f9b5f523e638c37bbec1c891\": container with ID starting with 414d0b928f546c68cc4ec3592b08d5ccd9f0e0a9f9b5f523e638c37bbec1c891 not found: ID does not exist" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.008491 4933 scope.go:117] "RemoveContainer" containerID="5a9c6effadefa149fa255fa2cb5e75ad7a527e0a8b7c1f0235a8bc5d52a6b4ee" Jan 22 07:21:55 crc kubenswrapper[4933]: E0122 07:21:55.008752 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a9c6effadefa149fa255fa2cb5e75ad7a527e0a8b7c1f0235a8bc5d52a6b4ee\": container with ID starting with 5a9c6effadefa149fa255fa2cb5e75ad7a527e0a8b7c1f0235a8bc5d52a6b4ee not found: ID does not exist" containerID="5a9c6effadefa149fa255fa2cb5e75ad7a527e0a8b7c1f0235a8bc5d52a6b4ee" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.008771 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a9c6effadefa149fa255fa2cb5e75ad7a527e0a8b7c1f0235a8bc5d52a6b4ee"} err="failed to get container status \"5a9c6effadefa149fa255fa2cb5e75ad7a527e0a8b7c1f0235a8bc5d52a6b4ee\": rpc error: code = NotFound desc = could not find container \"5a9c6effadefa149fa255fa2cb5e75ad7a527e0a8b7c1f0235a8bc5d52a6b4ee\": container with ID starting with 5a9c6effadefa149fa255fa2cb5e75ad7a527e0a8b7c1f0235a8bc5d52a6b4ee not found: ID does not exist" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.008788 4933 scope.go:117] "RemoveContainer" containerID="ebdb1207f6c10c2fa8a403ace8b344a0c8ed48ed3218c4859d2f03dc4cad1889" Jan 22 07:21:55 crc kubenswrapper[4933]: E0122 07:21:55.010129 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebdb1207f6c10c2fa8a403ace8b344a0c8ed48ed3218c4859d2f03dc4cad1889\": container with ID starting with ebdb1207f6c10c2fa8a403ace8b344a0c8ed48ed3218c4859d2f03dc4cad1889 not found: ID does not exist" containerID="ebdb1207f6c10c2fa8a403ace8b344a0c8ed48ed3218c4859d2f03dc4cad1889" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.010152 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebdb1207f6c10c2fa8a403ace8b344a0c8ed48ed3218c4859d2f03dc4cad1889"} err="failed to get container status \"ebdb1207f6c10c2fa8a403ace8b344a0c8ed48ed3218c4859d2f03dc4cad1889\": rpc error: code = NotFound desc = could not find container \"ebdb1207f6c10c2fa8a403ace8b344a0c8ed48ed3218c4859d2f03dc4cad1889\": container with ID starting with ebdb1207f6c10c2fa8a403ace8b344a0c8ed48ed3218c4859d2f03dc4cad1889 not found: ID does not exist" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.035220 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/965afeae-5409-40b6-8826-b159a46d87cd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "965afeae-5409-40b6-8826-b159a46d87cd" (UID: "965afeae-5409-40b6-8826-b159a46d87cd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.041421 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/965afeae-5409-40b6-8826-b159a46d87cd-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.159479 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6szwq"] Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.159737 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6szwq" podUID="a04be798-513a-46bd-9433-d4f6e9ad5d91" containerName="registry-server" containerID="cri-o://00a82d3f218d8e2f1a4405b8f6c4ede877973c028bd19cbf918b17220441afe5" gracePeriod=2 Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.209137 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vvkc9"] Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.217718 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vvkc9"] Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.737593 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6szwq" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.855306 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a04be798-513a-46bd-9433-d4f6e9ad5d91-utilities\") pod \"a04be798-513a-46bd-9433-d4f6e9ad5d91\" (UID: \"a04be798-513a-46bd-9433-d4f6e9ad5d91\") " Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.855423 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bfzh9\" (UniqueName: \"kubernetes.io/projected/a04be798-513a-46bd-9433-d4f6e9ad5d91-kube-api-access-bfzh9\") pod \"a04be798-513a-46bd-9433-d4f6e9ad5d91\" (UID: \"a04be798-513a-46bd-9433-d4f6e9ad5d91\") " Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.855533 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a04be798-513a-46bd-9433-d4f6e9ad5d91-catalog-content\") pod \"a04be798-513a-46bd-9433-d4f6e9ad5d91\" (UID: \"a04be798-513a-46bd-9433-d4f6e9ad5d91\") " Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.856120 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a04be798-513a-46bd-9433-d4f6e9ad5d91-utilities" (OuterVolumeSpecName: "utilities") pod "a04be798-513a-46bd-9433-d4f6e9ad5d91" (UID: "a04be798-513a-46bd-9433-d4f6e9ad5d91"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.864309 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a04be798-513a-46bd-9433-d4f6e9ad5d91-kube-api-access-bfzh9" (OuterVolumeSpecName: "kube-api-access-bfzh9") pod "a04be798-513a-46bd-9433-d4f6e9ad5d91" (UID: "a04be798-513a-46bd-9433-d4f6e9ad5d91"). InnerVolumeSpecName "kube-api-access-bfzh9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.885855 4933 generic.go:334] "Generic (PLEG): container finished" podID="a04be798-513a-46bd-9433-d4f6e9ad5d91" containerID="00a82d3f218d8e2f1a4405b8f6c4ede877973c028bd19cbf918b17220441afe5" exitCode=0 Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.885915 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6szwq" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.885934 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6szwq" event={"ID":"a04be798-513a-46bd-9433-d4f6e9ad5d91","Type":"ContainerDied","Data":"00a82d3f218d8e2f1a4405b8f6c4ede877973c028bd19cbf918b17220441afe5"} Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.886817 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6szwq" event={"ID":"a04be798-513a-46bd-9433-d4f6e9ad5d91","Type":"ContainerDied","Data":"3e29f22b1b2128bdfc0463a481b87af7aa784396f97e579f62279bbbfee9b160"} Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.886848 4933 scope.go:117] "RemoveContainer" containerID="00a82d3f218d8e2f1a4405b8f6c4ede877973c028bd19cbf918b17220441afe5" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.902034 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a04be798-513a-46bd-9433-d4f6e9ad5d91-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a04be798-513a-46bd-9433-d4f6e9ad5d91" (UID: "a04be798-513a-46bd-9433-d4f6e9ad5d91"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.915238 4933 scope.go:117] "RemoveContainer" containerID="a8998225796e4e79325a84b852097660293be808797760a2e94fb5dbc028dd84" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.931774 4933 scope.go:117] "RemoveContainer" containerID="e1e6be2678d931627596caeab4786a5caae914ce2d9100dcfee538adcc245190" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.948495 4933 scope.go:117] "RemoveContainer" containerID="00a82d3f218d8e2f1a4405b8f6c4ede877973c028bd19cbf918b17220441afe5" Jan 22 07:21:55 crc kubenswrapper[4933]: E0122 07:21:55.948923 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00a82d3f218d8e2f1a4405b8f6c4ede877973c028bd19cbf918b17220441afe5\": container with ID starting with 00a82d3f218d8e2f1a4405b8f6c4ede877973c028bd19cbf918b17220441afe5 not found: ID does not exist" containerID="00a82d3f218d8e2f1a4405b8f6c4ede877973c028bd19cbf918b17220441afe5" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.948955 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00a82d3f218d8e2f1a4405b8f6c4ede877973c028bd19cbf918b17220441afe5"} err="failed to get container status \"00a82d3f218d8e2f1a4405b8f6c4ede877973c028bd19cbf918b17220441afe5\": rpc error: code = NotFound desc = could not find container \"00a82d3f218d8e2f1a4405b8f6c4ede877973c028bd19cbf918b17220441afe5\": container with ID starting with 00a82d3f218d8e2f1a4405b8f6c4ede877973c028bd19cbf918b17220441afe5 not found: ID does not exist" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.948973 4933 scope.go:117] "RemoveContainer" containerID="a8998225796e4e79325a84b852097660293be808797760a2e94fb5dbc028dd84" Jan 22 07:21:55 crc kubenswrapper[4933]: E0122 07:21:55.950172 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8998225796e4e79325a84b852097660293be808797760a2e94fb5dbc028dd84\": container with ID starting with a8998225796e4e79325a84b852097660293be808797760a2e94fb5dbc028dd84 not found: ID does not exist" containerID="a8998225796e4e79325a84b852097660293be808797760a2e94fb5dbc028dd84" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.950257 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8998225796e4e79325a84b852097660293be808797760a2e94fb5dbc028dd84"} err="failed to get container status \"a8998225796e4e79325a84b852097660293be808797760a2e94fb5dbc028dd84\": rpc error: code = NotFound desc = could not find container \"a8998225796e4e79325a84b852097660293be808797760a2e94fb5dbc028dd84\": container with ID starting with a8998225796e4e79325a84b852097660293be808797760a2e94fb5dbc028dd84 not found: ID does not exist" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.950321 4933 scope.go:117] "RemoveContainer" containerID="e1e6be2678d931627596caeab4786a5caae914ce2d9100dcfee538adcc245190" Jan 22 07:21:55 crc kubenswrapper[4933]: E0122 07:21:55.950631 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1e6be2678d931627596caeab4786a5caae914ce2d9100dcfee538adcc245190\": container with ID starting with e1e6be2678d931627596caeab4786a5caae914ce2d9100dcfee538adcc245190 not found: ID does not exist" containerID="e1e6be2678d931627596caeab4786a5caae914ce2d9100dcfee538adcc245190" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.950674 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1e6be2678d931627596caeab4786a5caae914ce2d9100dcfee538adcc245190"} err="failed to get container status \"e1e6be2678d931627596caeab4786a5caae914ce2d9100dcfee538adcc245190\": rpc error: code = NotFound desc = could not find container \"e1e6be2678d931627596caeab4786a5caae914ce2d9100dcfee538adcc245190\": container with ID starting with e1e6be2678d931627596caeab4786a5caae914ce2d9100dcfee538adcc245190 not found: ID does not exist" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.957308 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a04be798-513a-46bd-9433-d4f6e9ad5d91-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.957329 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a04be798-513a-46bd-9433-d4f6e9ad5d91-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:55 crc kubenswrapper[4933]: I0122 07:21:55.957339 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bfzh9\" (UniqueName: \"kubernetes.io/projected/a04be798-513a-46bd-9433-d4f6e9ad5d91-kube-api-access-bfzh9\") on node \"crc\" DevicePath \"\"" Jan 22 07:21:56 crc kubenswrapper[4933]: I0122 07:21:56.221511 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6szwq"] Jan 22 07:21:56 crc kubenswrapper[4933]: I0122 07:21:56.229607 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6szwq"] Jan 22 07:21:56 crc kubenswrapper[4933]: I0122 07:21:56.500994 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31f9bca1-a923-47bb-948e-10191d7f05f8" path="/var/lib/kubelet/pods/31f9bca1-a923-47bb-948e-10191d7f05f8/volumes" Jan 22 07:21:56 crc kubenswrapper[4933]: I0122 07:21:56.501573 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="965afeae-5409-40b6-8826-b159a46d87cd" path="/var/lib/kubelet/pods/965afeae-5409-40b6-8826-b159a46d87cd/volumes" Jan 22 07:21:56 crc kubenswrapper[4933]: I0122 07:21:56.502170 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a04be798-513a-46bd-9433-d4f6e9ad5d91" path="/var/lib/kubelet/pods/a04be798-513a-46bd-9433-d4f6e9ad5d91/volumes" Jan 22 07:22:10 crc kubenswrapper[4933]: I0122 07:22:10.943296 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:22:10 crc kubenswrapper[4933]: I0122 07:22:10.943840 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:22:23 crc kubenswrapper[4933]: I0122 07:22:23.168446 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:22:24 crc kubenswrapper[4933]: I0122 07:22:24.213356 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-56899cfc48-xqft6" Jan 22 07:22:40 crc kubenswrapper[4933]: I0122 07:22:40.943385 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:22:40 crc kubenswrapper[4933]: I0122 07:22:40.944106 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:22:40 crc kubenswrapper[4933]: I0122 07:22:40.944185 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 07:22:40 crc kubenswrapper[4933]: I0122 07:22:40.944895 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"df77120a276e772bee7dad3e476bcf8749c071d28246be8da71e670976f60157"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 07:22:40 crc kubenswrapper[4933]: I0122 07:22:40.944956 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://df77120a276e772bee7dad3e476bcf8749c071d28246be8da71e670976f60157" gracePeriod=600 Jan 22 07:22:41 crc kubenswrapper[4933]: I0122 07:22:41.290038 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="df77120a276e772bee7dad3e476bcf8749c071d28246be8da71e670976f60157" exitCode=0 Jan 22 07:22:41 crc kubenswrapper[4933]: I0122 07:22:41.290144 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"df77120a276e772bee7dad3e476bcf8749c071d28246be8da71e670976f60157"} Jan 22 07:22:41 crc kubenswrapper[4933]: I0122 07:22:41.290373 4933 scope.go:117] "RemoveContainer" containerID="8d0ff8f158acf8d88510b1fddaf0b35b7a3fb6e84561545d6c60a78c2e36fc92" Jan 22 07:22:42 crc kubenswrapper[4933]: I0122 07:22:42.300708 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336"} Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.005811 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-88n4d"] Jan 22 07:22:46 crc kubenswrapper[4933]: E0122 07:22:46.006961 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="965afeae-5409-40b6-8826-b159a46d87cd" containerName="registry-server" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.006975 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="965afeae-5409-40b6-8826-b159a46d87cd" containerName="registry-server" Jan 22 07:22:46 crc kubenswrapper[4933]: E0122 07:22:46.006987 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a04be798-513a-46bd-9433-d4f6e9ad5d91" containerName="extract-content" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.006992 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a04be798-513a-46bd-9433-d4f6e9ad5d91" containerName="extract-content" Jan 22 07:22:46 crc kubenswrapper[4933]: E0122 07:22:46.007011 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="965afeae-5409-40b6-8826-b159a46d87cd" containerName="extract-utilities" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.007017 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="965afeae-5409-40b6-8826-b159a46d87cd" containerName="extract-utilities" Jan 22 07:22:46 crc kubenswrapper[4933]: E0122 07:22:46.007030 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="965afeae-5409-40b6-8826-b159a46d87cd" containerName="extract-content" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.007037 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="965afeae-5409-40b6-8826-b159a46d87cd" containerName="extract-content" Jan 22 07:22:46 crc kubenswrapper[4933]: E0122 07:22:46.007077 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31f9bca1-a923-47bb-948e-10191d7f05f8" containerName="init" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.007096 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="31f9bca1-a923-47bb-948e-10191d7f05f8" containerName="init" Jan 22 07:22:46 crc kubenswrapper[4933]: E0122 07:22:46.007113 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31f9bca1-a923-47bb-948e-10191d7f05f8" containerName="dnsmasq-dns" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.007121 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="31f9bca1-a923-47bb-948e-10191d7f05f8" containerName="dnsmasq-dns" Jan 22 07:22:46 crc kubenswrapper[4933]: E0122 07:22:46.007134 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a04be798-513a-46bd-9433-d4f6e9ad5d91" containerName="registry-server" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.007143 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a04be798-513a-46bd-9433-d4f6e9ad5d91" containerName="registry-server" Jan 22 07:22:46 crc kubenswrapper[4933]: E0122 07:22:46.007165 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a04be798-513a-46bd-9433-d4f6e9ad5d91" containerName="extract-utilities" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.007174 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a04be798-513a-46bd-9433-d4f6e9ad5d91" containerName="extract-utilities" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.007524 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="31f9bca1-a923-47bb-948e-10191d7f05f8" containerName="dnsmasq-dns" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.007550 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a04be798-513a-46bd-9433-d4f6e9ad5d91" containerName="registry-server" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.007567 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="965afeae-5409-40b6-8826-b159a46d87cd" containerName="registry-server" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.008400 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-88n4d" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.024332 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-88n4d"] Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.089089 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-pzr6g"] Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.090479 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pzr6g" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.101042 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-a924-account-create-update-z6hhh"] Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.102520 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-a924-account-create-update-z6hhh" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.105441 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.110114 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec4d3e32-6483-4056-aa0c-fe34326a6c07-operator-scripts\") pod \"nova-api-db-create-88n4d\" (UID: \"ec4d3e32-6483-4056-aa0c-fe34326a6c07\") " pod="openstack/nova-api-db-create-88n4d" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.110257 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tpbh\" (UniqueName: \"kubernetes.io/projected/ec4d3e32-6483-4056-aa0c-fe34326a6c07-kube-api-access-7tpbh\") pod \"nova-api-db-create-88n4d\" (UID: \"ec4d3e32-6483-4056-aa0c-fe34326a6c07\") " pod="openstack/nova-api-db-create-88n4d" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.110724 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-pzr6g"] Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.125773 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-a924-account-create-update-z6hhh"] Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.212254 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7tpbh\" (UniqueName: \"kubernetes.io/projected/ec4d3e32-6483-4056-aa0c-fe34326a6c07-kube-api-access-7tpbh\") pod \"nova-api-db-create-88n4d\" (UID: \"ec4d3e32-6483-4056-aa0c-fe34326a6c07\") " pod="openstack/nova-api-db-create-88n4d" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.212355 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/76c29d0a-c2e2-466b-a3e4-6e26ab04d57c-operator-scripts\") pod \"nova-cell0-db-create-pzr6g\" (UID: \"76c29d0a-c2e2-466b-a3e4-6e26ab04d57c\") " pod="openstack/nova-cell0-db-create-pzr6g" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.212423 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkkpq\" (UniqueName: \"kubernetes.io/projected/76c29d0a-c2e2-466b-a3e4-6e26ab04d57c-kube-api-access-xkkpq\") pod \"nova-cell0-db-create-pzr6g\" (UID: \"76c29d0a-c2e2-466b-a3e4-6e26ab04d57c\") " pod="openstack/nova-cell0-db-create-pzr6g" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.212485 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec4d3e32-6483-4056-aa0c-fe34326a6c07-operator-scripts\") pod \"nova-api-db-create-88n4d\" (UID: \"ec4d3e32-6483-4056-aa0c-fe34326a6c07\") " pod="openstack/nova-api-db-create-88n4d" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.212529 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mbzr\" (UniqueName: \"kubernetes.io/projected/b5263403-5544-4431-a713-f43156c25601-kube-api-access-5mbzr\") pod \"nova-api-a924-account-create-update-z6hhh\" (UID: \"b5263403-5544-4431-a713-f43156c25601\") " pod="openstack/nova-api-a924-account-create-update-z6hhh" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.212570 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5263403-5544-4431-a713-f43156c25601-operator-scripts\") pod \"nova-api-a924-account-create-update-z6hhh\" (UID: \"b5263403-5544-4431-a713-f43156c25601\") " pod="openstack/nova-api-a924-account-create-update-z6hhh" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.213231 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec4d3e32-6483-4056-aa0c-fe34326a6c07-operator-scripts\") pod \"nova-api-db-create-88n4d\" (UID: \"ec4d3e32-6483-4056-aa0c-fe34326a6c07\") " pod="openstack/nova-api-db-create-88n4d" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.229836 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tpbh\" (UniqueName: \"kubernetes.io/projected/ec4d3e32-6483-4056-aa0c-fe34326a6c07-kube-api-access-7tpbh\") pod \"nova-api-db-create-88n4d\" (UID: \"ec4d3e32-6483-4056-aa0c-fe34326a6c07\") " pod="openstack/nova-api-db-create-88n4d" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.292060 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-pmf24"] Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.293525 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-pmf24" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.298962 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-a894-account-create-update-kmtkp"] Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.300331 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a894-account-create-update-kmtkp" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.302271 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.313925 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/76c29d0a-c2e2-466b-a3e4-6e26ab04d57c-operator-scripts\") pod \"nova-cell0-db-create-pzr6g\" (UID: \"76c29d0a-c2e2-466b-a3e4-6e26ab04d57c\") " pod="openstack/nova-cell0-db-create-pzr6g" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.314006 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkkpq\" (UniqueName: \"kubernetes.io/projected/76c29d0a-c2e2-466b-a3e4-6e26ab04d57c-kube-api-access-xkkpq\") pod \"nova-cell0-db-create-pzr6g\" (UID: \"76c29d0a-c2e2-466b-a3e4-6e26ab04d57c\") " pod="openstack/nova-cell0-db-create-pzr6g" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.314064 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mbzr\" (UniqueName: \"kubernetes.io/projected/b5263403-5544-4431-a713-f43156c25601-kube-api-access-5mbzr\") pod \"nova-api-a924-account-create-update-z6hhh\" (UID: \"b5263403-5544-4431-a713-f43156c25601\") " pod="openstack/nova-api-a924-account-create-update-z6hhh" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.314127 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5263403-5544-4431-a713-f43156c25601-operator-scripts\") pod \"nova-api-a924-account-create-update-z6hhh\" (UID: \"b5263403-5544-4431-a713-f43156c25601\") " pod="openstack/nova-api-a924-account-create-update-z6hhh" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.314794 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5263403-5544-4431-a713-f43156c25601-operator-scripts\") pod \"nova-api-a924-account-create-update-z6hhh\" (UID: \"b5263403-5544-4431-a713-f43156c25601\") " pod="openstack/nova-api-a924-account-create-update-z6hhh" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.314967 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/76c29d0a-c2e2-466b-a3e4-6e26ab04d57c-operator-scripts\") pod \"nova-cell0-db-create-pzr6g\" (UID: \"76c29d0a-c2e2-466b-a3e4-6e26ab04d57c\") " pod="openstack/nova-cell0-db-create-pzr6g" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.320237 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-a894-account-create-update-kmtkp"] Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.331691 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-pmf24"] Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.334472 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-88n4d" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.335387 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkkpq\" (UniqueName: \"kubernetes.io/projected/76c29d0a-c2e2-466b-a3e4-6e26ab04d57c-kube-api-access-xkkpq\") pod \"nova-cell0-db-create-pzr6g\" (UID: \"76c29d0a-c2e2-466b-a3e4-6e26ab04d57c\") " pod="openstack/nova-cell0-db-create-pzr6g" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.337976 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mbzr\" (UniqueName: \"kubernetes.io/projected/b5263403-5544-4431-a713-f43156c25601-kube-api-access-5mbzr\") pod \"nova-api-a924-account-create-update-z6hhh\" (UID: \"b5263403-5544-4431-a713-f43156c25601\") " pod="openstack/nova-api-a924-account-create-update-z6hhh" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.411221 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pzr6g" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.416430 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5-operator-scripts\") pod \"nova-cell1-db-create-pmf24\" (UID: \"12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5\") " pod="openstack/nova-cell1-db-create-pmf24" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.416582 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928-operator-scripts\") pod \"nova-cell0-a894-account-create-update-kmtkp\" (UID: \"0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928\") " pod="openstack/nova-cell0-a894-account-create-update-kmtkp" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.416643 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2px5\" (UniqueName: \"kubernetes.io/projected/12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5-kube-api-access-k2px5\") pod \"nova-cell1-db-create-pmf24\" (UID: \"12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5\") " pod="openstack/nova-cell1-db-create-pmf24" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.416681 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-br9f8\" (UniqueName: \"kubernetes.io/projected/0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928-kube-api-access-br9f8\") pod \"nova-cell0-a894-account-create-update-kmtkp\" (UID: \"0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928\") " pod="openstack/nova-cell0-a894-account-create-update-kmtkp" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.432387 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-a924-account-create-update-z6hhh" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.515882 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-51cb-account-create-update-4r7vn"] Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.518052 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-51cb-account-create-update-4r7vn" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.518113 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5-operator-scripts\") pod \"nova-cell1-db-create-pmf24\" (UID: \"12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5\") " pod="openstack/nova-cell1-db-create-pmf24" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.518172 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928-operator-scripts\") pod \"nova-cell0-a894-account-create-update-kmtkp\" (UID: \"0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928\") " pod="openstack/nova-cell0-a894-account-create-update-kmtkp" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.518210 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2px5\" (UniqueName: \"kubernetes.io/projected/12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5-kube-api-access-k2px5\") pod \"nova-cell1-db-create-pmf24\" (UID: \"12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5\") " pod="openstack/nova-cell1-db-create-pmf24" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.518246 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-br9f8\" (UniqueName: \"kubernetes.io/projected/0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928-kube-api-access-br9f8\") pod \"nova-cell0-a894-account-create-update-kmtkp\" (UID: \"0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928\") " pod="openstack/nova-cell0-a894-account-create-update-kmtkp" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.519205 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5-operator-scripts\") pod \"nova-cell1-db-create-pmf24\" (UID: \"12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5\") " pod="openstack/nova-cell1-db-create-pmf24" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.519428 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928-operator-scripts\") pod \"nova-cell0-a894-account-create-update-kmtkp\" (UID: \"0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928\") " pod="openstack/nova-cell0-a894-account-create-update-kmtkp" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.521230 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.524224 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-51cb-account-create-update-4r7vn"] Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.541485 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2px5\" (UniqueName: \"kubernetes.io/projected/12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5-kube-api-access-k2px5\") pod \"nova-cell1-db-create-pmf24\" (UID: \"12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5\") " pod="openstack/nova-cell1-db-create-pmf24" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.541490 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-br9f8\" (UniqueName: \"kubernetes.io/projected/0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928-kube-api-access-br9f8\") pod \"nova-cell0-a894-account-create-update-kmtkp\" (UID: \"0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928\") " pod="openstack/nova-cell0-a894-account-create-update-kmtkp" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.620502 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-pmf24" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.621125 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdccg\" (UniqueName: \"kubernetes.io/projected/b02e2932-3602-40b2-b58d-f4c0f3384ad2-kube-api-access-qdccg\") pod \"nova-cell1-51cb-account-create-update-4r7vn\" (UID: \"b02e2932-3602-40b2-b58d-f4c0f3384ad2\") " pod="openstack/nova-cell1-51cb-account-create-update-4r7vn" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.621413 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b02e2932-3602-40b2-b58d-f4c0f3384ad2-operator-scripts\") pod \"nova-cell1-51cb-account-create-update-4r7vn\" (UID: \"b02e2932-3602-40b2-b58d-f4c0f3384ad2\") " pod="openstack/nova-cell1-51cb-account-create-update-4r7vn" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.723901 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a894-account-create-update-kmtkp" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.725218 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdccg\" (UniqueName: \"kubernetes.io/projected/b02e2932-3602-40b2-b58d-f4c0f3384ad2-kube-api-access-qdccg\") pod \"nova-cell1-51cb-account-create-update-4r7vn\" (UID: \"b02e2932-3602-40b2-b58d-f4c0f3384ad2\") " pod="openstack/nova-cell1-51cb-account-create-update-4r7vn" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.725316 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b02e2932-3602-40b2-b58d-f4c0f3384ad2-operator-scripts\") pod \"nova-cell1-51cb-account-create-update-4r7vn\" (UID: \"b02e2932-3602-40b2-b58d-f4c0f3384ad2\") " pod="openstack/nova-cell1-51cb-account-create-update-4r7vn" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.726144 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b02e2932-3602-40b2-b58d-f4c0f3384ad2-operator-scripts\") pod \"nova-cell1-51cb-account-create-update-4r7vn\" (UID: \"b02e2932-3602-40b2-b58d-f4c0f3384ad2\") " pod="openstack/nova-cell1-51cb-account-create-update-4r7vn" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.744680 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdccg\" (UniqueName: \"kubernetes.io/projected/b02e2932-3602-40b2-b58d-f4c0f3384ad2-kube-api-access-qdccg\") pod \"nova-cell1-51cb-account-create-update-4r7vn\" (UID: \"b02e2932-3602-40b2-b58d-f4c0f3384ad2\") " pod="openstack/nova-cell1-51cb-account-create-update-4r7vn" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.858923 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-51cb-account-create-update-4r7vn" Jan 22 07:22:46 crc kubenswrapper[4933]: I0122 07:22:46.859985 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-88n4d"] Jan 22 07:22:47 crc kubenswrapper[4933]: W0122 07:22:47.008004 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb5263403_5544_4431_a713_f43156c25601.slice/crio-ce9509bd29192fb7a8269de5569bdb9c4c2351425e2ae5fbe9ac715d003229f3 WatchSource:0}: Error finding container ce9509bd29192fb7a8269de5569bdb9c4c2351425e2ae5fbe9ac715d003229f3: Status 404 returned error can't find the container with id ce9509bd29192fb7a8269de5569bdb9c4c2351425e2ae5fbe9ac715d003229f3 Jan 22 07:22:47 crc kubenswrapper[4933]: I0122 07:22:47.012778 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-a924-account-create-update-z6hhh"] Jan 22 07:22:47 crc kubenswrapper[4933]: I0122 07:22:47.023542 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-pzr6g"] Jan 22 07:22:47 crc kubenswrapper[4933]: W0122 07:22:47.025053 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod76c29d0a_c2e2_466b_a3e4_6e26ab04d57c.slice/crio-953f54d47b086fdd859193303700f61ab6444100e88df8a8cd3d698af427b8c6 WatchSource:0}: Error finding container 953f54d47b086fdd859193303700f61ab6444100e88df8a8cd3d698af427b8c6: Status 404 returned error can't find the container with id 953f54d47b086fdd859193303700f61ab6444100e88df8a8cd3d698af427b8c6 Jan 22 07:22:47 crc kubenswrapper[4933]: I0122 07:22:47.144681 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-pmf24"] Jan 22 07:22:47 crc kubenswrapper[4933]: W0122 07:22:47.166873 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12a6bc5b_33ea_4358_ab8b_17ca45d4e6e5.slice/crio-5728d4053b18a413560b863672c89d8760f65f1261cd457d09330ed1a6235e29 WatchSource:0}: Error finding container 5728d4053b18a413560b863672c89d8760f65f1261cd457d09330ed1a6235e29: Status 404 returned error can't find the container with id 5728d4053b18a413560b863672c89d8760f65f1261cd457d09330ed1a6235e29 Jan 22 07:22:47 crc kubenswrapper[4933]: I0122 07:22:47.249671 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-a894-account-create-update-kmtkp"] Jan 22 07:22:47 crc kubenswrapper[4933]: W0122 07:22:47.264982 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0f5c88e1_9441_4f5b_a7d1_a0b90ad0b928.slice/crio-b0cbdb75ad6c7bd880341e4680b6948fd09598ea5676d59819d3d0e977c8444b WatchSource:0}: Error finding container b0cbdb75ad6c7bd880341e4680b6948fd09598ea5676d59819d3d0e977c8444b: Status 404 returned error can't find the container with id b0cbdb75ad6c7bd880341e4680b6948fd09598ea5676d59819d3d0e977c8444b Jan 22 07:22:47 crc kubenswrapper[4933]: I0122 07:22:47.350266 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-a924-account-create-update-z6hhh" event={"ID":"b5263403-5544-4431-a713-f43156c25601","Type":"ContainerStarted","Data":"186fbf45413b6b9348b00354eb02c2ecbd9b71e08a6475e01f31b9ef86833b8e"} Jan 22 07:22:47 crc kubenswrapper[4933]: I0122 07:22:47.350555 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-a924-account-create-update-z6hhh" event={"ID":"b5263403-5544-4431-a713-f43156c25601","Type":"ContainerStarted","Data":"ce9509bd29192fb7a8269de5569bdb9c4c2351425e2ae5fbe9ac715d003229f3"} Jan 22 07:22:47 crc kubenswrapper[4933]: I0122 07:22:47.351765 4933 generic.go:334] "Generic (PLEG): container finished" podID="ec4d3e32-6483-4056-aa0c-fe34326a6c07" containerID="14cfc24e318f731f9c159ebb3a4806af9209b2c87aac48f0257087412e0c29bf" exitCode=0 Jan 22 07:22:47 crc kubenswrapper[4933]: I0122 07:22:47.351869 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-88n4d" event={"ID":"ec4d3e32-6483-4056-aa0c-fe34326a6c07","Type":"ContainerDied","Data":"14cfc24e318f731f9c159ebb3a4806af9209b2c87aac48f0257087412e0c29bf"} Jan 22 07:22:47 crc kubenswrapper[4933]: I0122 07:22:47.351910 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-88n4d" event={"ID":"ec4d3e32-6483-4056-aa0c-fe34326a6c07","Type":"ContainerStarted","Data":"a04e6d75a354645a372bb8da7c6f12447a13a42ffc55b4da3e09c1c41044cda6"} Jan 22 07:22:47 crc kubenswrapper[4933]: I0122 07:22:47.352877 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-pmf24" event={"ID":"12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5","Type":"ContainerStarted","Data":"5728d4053b18a413560b863672c89d8760f65f1261cd457d09330ed1a6235e29"} Jan 22 07:22:47 crc kubenswrapper[4933]: I0122 07:22:47.354973 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a894-account-create-update-kmtkp" event={"ID":"0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928","Type":"ContainerStarted","Data":"b0cbdb75ad6c7bd880341e4680b6948fd09598ea5676d59819d3d0e977c8444b"} Jan 22 07:22:47 crc kubenswrapper[4933]: I0122 07:22:47.357126 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-pzr6g" event={"ID":"76c29d0a-c2e2-466b-a3e4-6e26ab04d57c","Type":"ContainerStarted","Data":"38566bba7fe60f7004b85a512051ce0973fccf829f1c9e4c250b0c97c6064992"} Jan 22 07:22:47 crc kubenswrapper[4933]: I0122 07:22:47.357252 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-pzr6g" event={"ID":"76c29d0a-c2e2-466b-a3e4-6e26ab04d57c","Type":"ContainerStarted","Data":"953f54d47b086fdd859193303700f61ab6444100e88df8a8cd3d698af427b8c6"} Jan 22 07:22:47 crc kubenswrapper[4933]: I0122 07:22:47.367439 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-a924-account-create-update-z6hhh" podStartSLOduration=1.36741489 podStartE2EDuration="1.36741489s" podCreationTimestamp="2026-01-22 07:22:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:22:47.36329983 +0000 UTC m=+5815.200425183" watchObservedRunningTime="2026-01-22 07:22:47.36741489 +0000 UTC m=+5815.204540263" Jan 22 07:22:47 crc kubenswrapper[4933]: I0122 07:22:47.386911 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-51cb-account-create-update-4r7vn"] Jan 22 07:22:47 crc kubenswrapper[4933]: I0122 07:22:47.403047 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-pzr6g" podStartSLOduration=1.403028886 podStartE2EDuration="1.403028886s" podCreationTimestamp="2026-01-22 07:22:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:22:47.394018147 +0000 UTC m=+5815.231143500" watchObservedRunningTime="2026-01-22 07:22:47.403028886 +0000 UTC m=+5815.240154239" Jan 22 07:22:47 crc kubenswrapper[4933]: W0122 07:22:47.423322 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb02e2932_3602_40b2_b58d_f4c0f3384ad2.slice/crio-bef165884fcb346c72e908c33b95b96ea7057a44cf4acccfe14019903c69b978 WatchSource:0}: Error finding container bef165884fcb346c72e908c33b95b96ea7057a44cf4acccfe14019903c69b978: Status 404 returned error can't find the container with id bef165884fcb346c72e908c33b95b96ea7057a44cf4acccfe14019903c69b978 Jan 22 07:22:48 crc kubenswrapper[4933]: I0122 07:22:48.371741 4933 generic.go:334] "Generic (PLEG): container finished" podID="b02e2932-3602-40b2-b58d-f4c0f3384ad2" containerID="ce8bb8126c7fa2d71339243af024c9e8ef779d059f1eb80f76566aca8417f6d2" exitCode=0 Jan 22 07:22:48 crc kubenswrapper[4933]: I0122 07:22:48.371855 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-51cb-account-create-update-4r7vn" event={"ID":"b02e2932-3602-40b2-b58d-f4c0f3384ad2","Type":"ContainerDied","Data":"ce8bb8126c7fa2d71339243af024c9e8ef779d059f1eb80f76566aca8417f6d2"} Jan 22 07:22:48 crc kubenswrapper[4933]: I0122 07:22:48.373161 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-51cb-account-create-update-4r7vn" event={"ID":"b02e2932-3602-40b2-b58d-f4c0f3384ad2","Type":"ContainerStarted","Data":"bef165884fcb346c72e908c33b95b96ea7057a44cf4acccfe14019903c69b978"} Jan 22 07:22:48 crc kubenswrapper[4933]: I0122 07:22:48.375550 4933 generic.go:334] "Generic (PLEG): container finished" podID="b5263403-5544-4431-a713-f43156c25601" containerID="186fbf45413b6b9348b00354eb02c2ecbd9b71e08a6475e01f31b9ef86833b8e" exitCode=0 Jan 22 07:22:48 crc kubenswrapper[4933]: I0122 07:22:48.375684 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-a924-account-create-update-z6hhh" event={"ID":"b5263403-5544-4431-a713-f43156c25601","Type":"ContainerDied","Data":"186fbf45413b6b9348b00354eb02c2ecbd9b71e08a6475e01f31b9ef86833b8e"} Jan 22 07:22:48 crc kubenswrapper[4933]: I0122 07:22:48.378210 4933 generic.go:334] "Generic (PLEG): container finished" podID="12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5" containerID="f83c4f0ee0da98516b8642d22b8acef00175376dbe27f61fdabc4350ed34d7dd" exitCode=0 Jan 22 07:22:48 crc kubenswrapper[4933]: I0122 07:22:48.378289 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-pmf24" event={"ID":"12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5","Type":"ContainerDied","Data":"f83c4f0ee0da98516b8642d22b8acef00175376dbe27f61fdabc4350ed34d7dd"} Jan 22 07:22:48 crc kubenswrapper[4933]: I0122 07:22:48.380461 4933 generic.go:334] "Generic (PLEG): container finished" podID="0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928" containerID="fa241839c3a6bf08562dd861f23bd9436c5f15afbcda480f4631b959d2f1137f" exitCode=0 Jan 22 07:22:48 crc kubenswrapper[4933]: I0122 07:22:48.380518 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a894-account-create-update-kmtkp" event={"ID":"0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928","Type":"ContainerDied","Data":"fa241839c3a6bf08562dd861f23bd9436c5f15afbcda480f4631b959d2f1137f"} Jan 22 07:22:48 crc kubenswrapper[4933]: I0122 07:22:48.382361 4933 generic.go:334] "Generic (PLEG): container finished" podID="76c29d0a-c2e2-466b-a3e4-6e26ab04d57c" containerID="38566bba7fe60f7004b85a512051ce0973fccf829f1c9e4c250b0c97c6064992" exitCode=0 Jan 22 07:22:48 crc kubenswrapper[4933]: I0122 07:22:48.382462 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-pzr6g" event={"ID":"76c29d0a-c2e2-466b-a3e4-6e26ab04d57c","Type":"ContainerDied","Data":"38566bba7fe60f7004b85a512051ce0973fccf829f1c9e4c250b0c97c6064992"} Jan 22 07:22:48 crc kubenswrapper[4933]: I0122 07:22:48.751165 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-88n4d" Jan 22 07:22:48 crc kubenswrapper[4933]: I0122 07:22:48.865399 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec4d3e32-6483-4056-aa0c-fe34326a6c07-operator-scripts\") pod \"ec4d3e32-6483-4056-aa0c-fe34326a6c07\" (UID: \"ec4d3e32-6483-4056-aa0c-fe34326a6c07\") " Jan 22 07:22:48 crc kubenswrapper[4933]: I0122 07:22:48.865479 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7tpbh\" (UniqueName: \"kubernetes.io/projected/ec4d3e32-6483-4056-aa0c-fe34326a6c07-kube-api-access-7tpbh\") pod \"ec4d3e32-6483-4056-aa0c-fe34326a6c07\" (UID: \"ec4d3e32-6483-4056-aa0c-fe34326a6c07\") " Jan 22 07:22:48 crc kubenswrapper[4933]: I0122 07:22:48.866325 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec4d3e32-6483-4056-aa0c-fe34326a6c07-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ec4d3e32-6483-4056-aa0c-fe34326a6c07" (UID: "ec4d3e32-6483-4056-aa0c-fe34326a6c07"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:22:48 crc kubenswrapper[4933]: I0122 07:22:48.877312 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec4d3e32-6483-4056-aa0c-fe34326a6c07-kube-api-access-7tpbh" (OuterVolumeSpecName: "kube-api-access-7tpbh") pod "ec4d3e32-6483-4056-aa0c-fe34326a6c07" (UID: "ec4d3e32-6483-4056-aa0c-fe34326a6c07"). InnerVolumeSpecName "kube-api-access-7tpbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:22:48 crc kubenswrapper[4933]: I0122 07:22:48.968020 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec4d3e32-6483-4056-aa0c-fe34326a6c07-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:22:48 crc kubenswrapper[4933]: I0122 07:22:48.968053 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7tpbh\" (UniqueName: \"kubernetes.io/projected/ec4d3e32-6483-4056-aa0c-fe34326a6c07-kube-api-access-7tpbh\") on node \"crc\" DevicePath \"\"" Jan 22 07:22:49 crc kubenswrapper[4933]: I0122 07:22:49.395140 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-88n4d" event={"ID":"ec4d3e32-6483-4056-aa0c-fe34326a6c07","Type":"ContainerDied","Data":"a04e6d75a354645a372bb8da7c6f12447a13a42ffc55b4da3e09c1c41044cda6"} Jan 22 07:22:49 crc kubenswrapper[4933]: I0122 07:22:49.395217 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a04e6d75a354645a372bb8da7c6f12447a13a42ffc55b4da3e09c1c41044cda6" Jan 22 07:22:49 crc kubenswrapper[4933]: I0122 07:22:49.395478 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-88n4d" Jan 22 07:22:49 crc kubenswrapper[4933]: I0122 07:22:49.889001 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-51cb-account-create-update-4r7vn" Jan 22 07:22:49 crc kubenswrapper[4933]: I0122 07:22:49.986697 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qdccg\" (UniqueName: \"kubernetes.io/projected/b02e2932-3602-40b2-b58d-f4c0f3384ad2-kube-api-access-qdccg\") pod \"b02e2932-3602-40b2-b58d-f4c0f3384ad2\" (UID: \"b02e2932-3602-40b2-b58d-f4c0f3384ad2\") " Jan 22 07:22:49 crc kubenswrapper[4933]: I0122 07:22:49.986897 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b02e2932-3602-40b2-b58d-f4c0f3384ad2-operator-scripts\") pod \"b02e2932-3602-40b2-b58d-f4c0f3384ad2\" (UID: \"b02e2932-3602-40b2-b58d-f4c0f3384ad2\") " Jan 22 07:22:49 crc kubenswrapper[4933]: I0122 07:22:49.987939 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b02e2932-3602-40b2-b58d-f4c0f3384ad2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b02e2932-3602-40b2-b58d-f4c0f3384ad2" (UID: "b02e2932-3602-40b2-b58d-f4c0f3384ad2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.012439 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b02e2932-3602-40b2-b58d-f4c0f3384ad2-kube-api-access-qdccg" (OuterVolumeSpecName: "kube-api-access-qdccg") pod "b02e2932-3602-40b2-b58d-f4c0f3384ad2" (UID: "b02e2932-3602-40b2-b58d-f4c0f3384ad2"). InnerVolumeSpecName "kube-api-access-qdccg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.056536 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-a924-account-create-update-z6hhh" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.063140 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-pmf24" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.075279 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pzr6g" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.088123 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5mbzr\" (UniqueName: \"kubernetes.io/projected/b5263403-5544-4431-a713-f43156c25601-kube-api-access-5mbzr\") pod \"b5263403-5544-4431-a713-f43156c25601\" (UID: \"b5263403-5544-4431-a713-f43156c25601\") " Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.088174 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k2px5\" (UniqueName: \"kubernetes.io/projected/12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5-kube-api-access-k2px5\") pod \"12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5\" (UID: \"12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5\") " Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.088202 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5-operator-scripts\") pod \"12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5\" (UID: \"12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5\") " Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.088245 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5263403-5544-4431-a713-f43156c25601-operator-scripts\") pod \"b5263403-5544-4431-a713-f43156c25601\" (UID: \"b5263403-5544-4431-a713-f43156c25601\") " Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.089081 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qdccg\" (UniqueName: \"kubernetes.io/projected/b02e2932-3602-40b2-b58d-f4c0f3384ad2-kube-api-access-qdccg\") on node \"crc\" DevicePath \"\"" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.089113 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b02e2932-3602-40b2-b58d-f4c0f3384ad2-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.089138 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5" (UID: "12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.089539 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5263403-5544-4431-a713-f43156c25601-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b5263403-5544-4431-a713-f43156c25601" (UID: "b5263403-5544-4431-a713-f43156c25601"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.090815 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5-kube-api-access-k2px5" (OuterVolumeSpecName: "kube-api-access-k2px5") pod "12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5" (UID: "12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5"). InnerVolumeSpecName "kube-api-access-k2px5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.092976 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5263403-5544-4431-a713-f43156c25601-kube-api-access-5mbzr" (OuterVolumeSpecName: "kube-api-access-5mbzr") pod "b5263403-5544-4431-a713-f43156c25601" (UID: "b5263403-5544-4431-a713-f43156c25601"). InnerVolumeSpecName "kube-api-access-5mbzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.094872 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a894-account-create-update-kmtkp" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.189990 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-br9f8\" (UniqueName: \"kubernetes.io/projected/0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928-kube-api-access-br9f8\") pod \"0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928\" (UID: \"0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928\") " Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.190057 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/76c29d0a-c2e2-466b-a3e4-6e26ab04d57c-operator-scripts\") pod \"76c29d0a-c2e2-466b-a3e4-6e26ab04d57c\" (UID: \"76c29d0a-c2e2-466b-a3e4-6e26ab04d57c\") " Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.190121 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xkkpq\" (UniqueName: \"kubernetes.io/projected/76c29d0a-c2e2-466b-a3e4-6e26ab04d57c-kube-api-access-xkkpq\") pod \"76c29d0a-c2e2-466b-a3e4-6e26ab04d57c\" (UID: \"76c29d0a-c2e2-466b-a3e4-6e26ab04d57c\") " Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.190160 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928-operator-scripts\") pod \"0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928\" (UID: \"0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928\") " Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.190650 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5mbzr\" (UniqueName: \"kubernetes.io/projected/b5263403-5544-4431-a713-f43156c25601-kube-api-access-5mbzr\") on node \"crc\" DevicePath \"\"" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.190674 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k2px5\" (UniqueName: \"kubernetes.io/projected/12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5-kube-api-access-k2px5\") on node \"crc\" DevicePath \"\"" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.190687 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.190700 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b5263403-5544-4431-a713-f43156c25601-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.190686 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/76c29d0a-c2e2-466b-a3e4-6e26ab04d57c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "76c29d0a-c2e2-466b-a3e4-6e26ab04d57c" (UID: "76c29d0a-c2e2-466b-a3e4-6e26ab04d57c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.190932 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928" (UID: "0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.193688 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928-kube-api-access-br9f8" (OuterVolumeSpecName: "kube-api-access-br9f8") pod "0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928" (UID: "0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928"). InnerVolumeSpecName "kube-api-access-br9f8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.195414 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76c29d0a-c2e2-466b-a3e4-6e26ab04d57c-kube-api-access-xkkpq" (OuterVolumeSpecName: "kube-api-access-xkkpq") pod "76c29d0a-c2e2-466b-a3e4-6e26ab04d57c" (UID: "76c29d0a-c2e2-466b-a3e4-6e26ab04d57c"). InnerVolumeSpecName "kube-api-access-xkkpq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.293362 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-br9f8\" (UniqueName: \"kubernetes.io/projected/0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928-kube-api-access-br9f8\") on node \"crc\" DevicePath \"\"" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.293411 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/76c29d0a-c2e2-466b-a3e4-6e26ab04d57c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.293428 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xkkpq\" (UniqueName: \"kubernetes.io/projected/76c29d0a-c2e2-466b-a3e4-6e26ab04d57c-kube-api-access-xkkpq\") on node \"crc\" DevicePath \"\"" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.293440 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.435076 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-pmf24" event={"ID":"12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5","Type":"ContainerDied","Data":"5728d4053b18a413560b863672c89d8760f65f1261cd457d09330ed1a6235e29"} Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.435197 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5728d4053b18a413560b863672c89d8760f65f1261cd457d09330ed1a6235e29" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.435167 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-pmf24" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.441280 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a894-account-create-update-kmtkp" event={"ID":"0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928","Type":"ContainerDied","Data":"b0cbdb75ad6c7bd880341e4680b6948fd09598ea5676d59819d3d0e977c8444b"} Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.441620 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0cbdb75ad6c7bd880341e4680b6948fd09598ea5676d59819d3d0e977c8444b" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.441339 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a894-account-create-update-kmtkp" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.443566 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-pzr6g" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.443564 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-pzr6g" event={"ID":"76c29d0a-c2e2-466b-a3e4-6e26ab04d57c","Type":"ContainerDied","Data":"953f54d47b086fdd859193303700f61ab6444100e88df8a8cd3d698af427b8c6"} Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.443706 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="953f54d47b086fdd859193303700f61ab6444100e88df8a8cd3d698af427b8c6" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.445426 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-51cb-account-create-update-4r7vn" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.445419 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-51cb-account-create-update-4r7vn" event={"ID":"b02e2932-3602-40b2-b58d-f4c0f3384ad2","Type":"ContainerDied","Data":"bef165884fcb346c72e908c33b95b96ea7057a44cf4acccfe14019903c69b978"} Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.445549 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bef165884fcb346c72e908c33b95b96ea7057a44cf4acccfe14019903c69b978" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.447965 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-a924-account-create-update-z6hhh" event={"ID":"b5263403-5544-4431-a713-f43156c25601","Type":"ContainerDied","Data":"ce9509bd29192fb7a8269de5569bdb9c4c2351425e2ae5fbe9ac715d003229f3"} Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.447989 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce9509bd29192fb7a8269de5569bdb9c4c2351425e2ae5fbe9ac715d003229f3" Jan 22 07:22:50 crc kubenswrapper[4933]: I0122 07:22:50.448055 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-a924-account-create-update-z6hhh" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.486916 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-7nzw9"] Jan 22 07:22:51 crc kubenswrapper[4933]: E0122 07:22:51.487622 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928" containerName="mariadb-account-create-update" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.487641 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928" containerName="mariadb-account-create-update" Jan 22 07:22:51 crc kubenswrapper[4933]: E0122 07:22:51.487659 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec4d3e32-6483-4056-aa0c-fe34326a6c07" containerName="mariadb-database-create" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.487668 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec4d3e32-6483-4056-aa0c-fe34326a6c07" containerName="mariadb-database-create" Jan 22 07:22:51 crc kubenswrapper[4933]: E0122 07:22:51.487682 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5" containerName="mariadb-database-create" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.487689 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5" containerName="mariadb-database-create" Jan 22 07:22:51 crc kubenswrapper[4933]: E0122 07:22:51.487706 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b02e2932-3602-40b2-b58d-f4c0f3384ad2" containerName="mariadb-account-create-update" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.487714 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="b02e2932-3602-40b2-b58d-f4c0f3384ad2" containerName="mariadb-account-create-update" Jan 22 07:22:51 crc kubenswrapper[4933]: E0122 07:22:51.487726 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76c29d0a-c2e2-466b-a3e4-6e26ab04d57c" containerName="mariadb-database-create" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.487734 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="76c29d0a-c2e2-466b-a3e4-6e26ab04d57c" containerName="mariadb-database-create" Jan 22 07:22:51 crc kubenswrapper[4933]: E0122 07:22:51.487749 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5263403-5544-4431-a713-f43156c25601" containerName="mariadb-account-create-update" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.487757 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5263403-5544-4431-a713-f43156c25601" containerName="mariadb-account-create-update" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.487966 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec4d3e32-6483-4056-aa0c-fe34326a6c07" containerName="mariadb-database-create" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.487986 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="b02e2932-3602-40b2-b58d-f4c0f3384ad2" containerName="mariadb-account-create-update" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.487998 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5" containerName="mariadb-database-create" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.488012 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5263403-5544-4431-a713-f43156c25601" containerName="mariadb-account-create-update" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.488021 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="76c29d0a-c2e2-466b-a3e4-6e26ab04d57c" containerName="mariadb-database-create" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.488037 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928" containerName="mariadb-account-create-update" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.488847 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-7nzw9" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.493045 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.493168 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.499675 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-4wpz4" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.500856 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-7nzw9"] Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.515804 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-scripts\") pod \"nova-cell0-conductor-db-sync-7nzw9\" (UID: \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\") " pod="openstack/nova-cell0-conductor-db-sync-7nzw9" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.515885 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2n7nw\" (UniqueName: \"kubernetes.io/projected/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-kube-api-access-2n7nw\") pod \"nova-cell0-conductor-db-sync-7nzw9\" (UID: \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\") " pod="openstack/nova-cell0-conductor-db-sync-7nzw9" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.515929 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-7nzw9\" (UID: \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\") " pod="openstack/nova-cell0-conductor-db-sync-7nzw9" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.516001 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-config-data\") pod \"nova-cell0-conductor-db-sync-7nzw9\" (UID: \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\") " pod="openstack/nova-cell0-conductor-db-sync-7nzw9" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.616461 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-scripts\") pod \"nova-cell0-conductor-db-sync-7nzw9\" (UID: \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\") " pod="openstack/nova-cell0-conductor-db-sync-7nzw9" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.616519 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2n7nw\" (UniqueName: \"kubernetes.io/projected/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-kube-api-access-2n7nw\") pod \"nova-cell0-conductor-db-sync-7nzw9\" (UID: \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\") " pod="openstack/nova-cell0-conductor-db-sync-7nzw9" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.616570 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-7nzw9\" (UID: \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\") " pod="openstack/nova-cell0-conductor-db-sync-7nzw9" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.617370 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-config-data\") pod \"nova-cell0-conductor-db-sync-7nzw9\" (UID: \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\") " pod="openstack/nova-cell0-conductor-db-sync-7nzw9" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.621804 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-scripts\") pod \"nova-cell0-conductor-db-sync-7nzw9\" (UID: \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\") " pod="openstack/nova-cell0-conductor-db-sync-7nzw9" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.622629 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-config-data\") pod \"nova-cell0-conductor-db-sync-7nzw9\" (UID: \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\") " pod="openstack/nova-cell0-conductor-db-sync-7nzw9" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.622890 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-7nzw9\" (UID: \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\") " pod="openstack/nova-cell0-conductor-db-sync-7nzw9" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.632183 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2n7nw\" (UniqueName: \"kubernetes.io/projected/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-kube-api-access-2n7nw\") pod \"nova-cell0-conductor-db-sync-7nzw9\" (UID: \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\") " pod="openstack/nova-cell0-conductor-db-sync-7nzw9" Jan 22 07:22:51 crc kubenswrapper[4933]: I0122 07:22:51.814867 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-7nzw9" Jan 22 07:22:52 crc kubenswrapper[4933]: I0122 07:22:52.073856 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-7nzw9"] Jan 22 07:22:52 crc kubenswrapper[4933]: W0122 07:22:52.074875 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c3d1484_a1be_4965_96f9_3b0b1f7f83f0.slice/crio-f6a5fbebc9c7175c58b74f50bd89aba5a7ff24a5eb500fd0fab6f151551a9a7e WatchSource:0}: Error finding container f6a5fbebc9c7175c58b74f50bd89aba5a7ff24a5eb500fd0fab6f151551a9a7e: Status 404 returned error can't find the container with id f6a5fbebc9c7175c58b74f50bd89aba5a7ff24a5eb500fd0fab6f151551a9a7e Jan 22 07:22:52 crc kubenswrapper[4933]: I0122 07:22:52.470610 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-7nzw9" event={"ID":"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0","Type":"ContainerStarted","Data":"f6a5fbebc9c7175c58b74f50bd89aba5a7ff24a5eb500fd0fab6f151551a9a7e"} Jan 22 07:22:53 crc kubenswrapper[4933]: I0122 07:22:53.481988 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-7nzw9" event={"ID":"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0","Type":"ContainerStarted","Data":"430cacf2b01c899fd6ac8fd6eb06a620fcdd91a01354deb4f8fad4294ad902b7"} Jan 22 07:22:53 crc kubenswrapper[4933]: I0122 07:22:53.504146 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-7nzw9" podStartSLOduration=2.504122385 podStartE2EDuration="2.504122385s" podCreationTimestamp="2026-01-22 07:22:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:22:53.502044244 +0000 UTC m=+5821.339169607" watchObservedRunningTime="2026-01-22 07:22:53.504122385 +0000 UTC m=+5821.341247778" Jan 22 07:22:59 crc kubenswrapper[4933]: I0122 07:22:59.532896 4933 generic.go:334] "Generic (PLEG): container finished" podID="1c3d1484-a1be-4965-96f9-3b0b1f7f83f0" containerID="430cacf2b01c899fd6ac8fd6eb06a620fcdd91a01354deb4f8fad4294ad902b7" exitCode=0 Jan 22 07:22:59 crc kubenswrapper[4933]: I0122 07:22:59.533000 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-7nzw9" event={"ID":"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0","Type":"ContainerDied","Data":"430cacf2b01c899fd6ac8fd6eb06a620fcdd91a01354deb4f8fad4294ad902b7"} Jan 22 07:23:00 crc kubenswrapper[4933]: I0122 07:23:00.892887 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-7nzw9" Jan 22 07:23:00 crc kubenswrapper[4933]: I0122 07:23:00.998592 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-scripts\") pod \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\" (UID: \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\") " Jan 22 07:23:00 crc kubenswrapper[4933]: I0122 07:23:00.998994 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-combined-ca-bundle\") pod \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\" (UID: \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\") " Jan 22 07:23:00 crc kubenswrapper[4933]: I0122 07:23:00.999146 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2n7nw\" (UniqueName: \"kubernetes.io/projected/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-kube-api-access-2n7nw\") pod \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\" (UID: \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\") " Jan 22 07:23:00 crc kubenswrapper[4933]: I0122 07:23:00.999196 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-config-data\") pod \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\" (UID: \"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0\") " Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.004452 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-scripts" (OuterVolumeSpecName: "scripts") pod "1c3d1484-a1be-4965-96f9-3b0b1f7f83f0" (UID: "1c3d1484-a1be-4965-96f9-3b0b1f7f83f0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.009159 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-kube-api-access-2n7nw" (OuterVolumeSpecName: "kube-api-access-2n7nw") pod "1c3d1484-a1be-4965-96f9-3b0b1f7f83f0" (UID: "1c3d1484-a1be-4965-96f9-3b0b1f7f83f0"). InnerVolumeSpecName "kube-api-access-2n7nw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.027912 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-config-data" (OuterVolumeSpecName: "config-data") pod "1c3d1484-a1be-4965-96f9-3b0b1f7f83f0" (UID: "1c3d1484-a1be-4965-96f9-3b0b1f7f83f0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.028452 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1c3d1484-a1be-4965-96f9-3b0b1f7f83f0" (UID: "1c3d1484-a1be-4965-96f9-3b0b1f7f83f0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.101503 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.101533 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.101543 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.101553 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2n7nw\" (UniqueName: \"kubernetes.io/projected/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0-kube-api-access-2n7nw\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.556628 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-7nzw9" event={"ID":"1c3d1484-a1be-4965-96f9-3b0b1f7f83f0","Type":"ContainerDied","Data":"f6a5fbebc9c7175c58b74f50bd89aba5a7ff24a5eb500fd0fab6f151551a9a7e"} Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.556670 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f6a5fbebc9c7175c58b74f50bd89aba5a7ff24a5eb500fd0fab6f151551a9a7e" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.556702 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-7nzw9" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.633007 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 07:23:01 crc kubenswrapper[4933]: E0122 07:23:01.633537 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c3d1484-a1be-4965-96f9-3b0b1f7f83f0" containerName="nova-cell0-conductor-db-sync" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.633598 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c3d1484-a1be-4965-96f9-3b0b1f7f83f0" containerName="nova-cell0-conductor-db-sync" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.633812 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c3d1484-a1be-4965-96f9-3b0b1f7f83f0" containerName="nova-cell0-conductor-db-sync" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.634440 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.637015 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-4wpz4" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.637928 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.647961 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.813843 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gxml\" (UniqueName: \"kubernetes.io/projected/db56d8cd-22b3-464a-a259-4f7e6c01c565-kube-api-access-9gxml\") pod \"nova-cell0-conductor-0\" (UID: \"db56d8cd-22b3-464a-a259-4f7e6c01c565\") " pod="openstack/nova-cell0-conductor-0" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.813907 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db56d8cd-22b3-464a-a259-4f7e6c01c565-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"db56d8cd-22b3-464a-a259-4f7e6c01c565\") " pod="openstack/nova-cell0-conductor-0" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.814274 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db56d8cd-22b3-464a-a259-4f7e6c01c565-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"db56d8cd-22b3-464a-a259-4f7e6c01c565\") " pod="openstack/nova-cell0-conductor-0" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.916205 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db56d8cd-22b3-464a-a259-4f7e6c01c565-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"db56d8cd-22b3-464a-a259-4f7e6c01c565\") " pod="openstack/nova-cell0-conductor-0" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.916316 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gxml\" (UniqueName: \"kubernetes.io/projected/db56d8cd-22b3-464a-a259-4f7e6c01c565-kube-api-access-9gxml\") pod \"nova-cell0-conductor-0\" (UID: \"db56d8cd-22b3-464a-a259-4f7e6c01c565\") " pod="openstack/nova-cell0-conductor-0" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.916361 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db56d8cd-22b3-464a-a259-4f7e6c01c565-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"db56d8cd-22b3-464a-a259-4f7e6c01c565\") " pod="openstack/nova-cell0-conductor-0" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.920306 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db56d8cd-22b3-464a-a259-4f7e6c01c565-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"db56d8cd-22b3-464a-a259-4f7e6c01c565\") " pod="openstack/nova-cell0-conductor-0" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.933112 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db56d8cd-22b3-464a-a259-4f7e6c01c565-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"db56d8cd-22b3-464a-a259-4f7e6c01c565\") " pod="openstack/nova-cell0-conductor-0" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.933529 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gxml\" (UniqueName: \"kubernetes.io/projected/db56d8cd-22b3-464a-a259-4f7e6c01c565-kube-api-access-9gxml\") pod \"nova-cell0-conductor-0\" (UID: \"db56d8cd-22b3-464a-a259-4f7e6c01c565\") " pod="openstack/nova-cell0-conductor-0" Jan 22 07:23:01 crc kubenswrapper[4933]: I0122 07:23:01.950639 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 22 07:23:02 crc kubenswrapper[4933]: I0122 07:23:02.364545 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 07:23:02 crc kubenswrapper[4933]: I0122 07:23:02.566773 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"db56d8cd-22b3-464a-a259-4f7e6c01c565","Type":"ContainerStarted","Data":"9bf1b23f071462b17f2abe04a8a0bddab77f6d60fa7e798a879e48ac008229d3"} Jan 22 07:23:02 crc kubenswrapper[4933]: I0122 07:23:02.567213 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 22 07:23:02 crc kubenswrapper[4933]: I0122 07:23:02.567231 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"db56d8cd-22b3-464a-a259-4f7e6c01c565","Type":"ContainerStarted","Data":"a8bdcf21fe15c1da1ee41f9877d45bc822a29ca58d09f20c53224bfaaede7e8c"} Jan 22 07:23:02 crc kubenswrapper[4933]: I0122 07:23:02.604822 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=1.604789173 podStartE2EDuration="1.604789173s" podCreationTimestamp="2026-01-22 07:23:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:23:02.58571629 +0000 UTC m=+5830.422841643" watchObservedRunningTime="2026-01-22 07:23:02.604789173 +0000 UTC m=+5830.441914526" Jan 22 07:23:04 crc kubenswrapper[4933]: I0122 07:23:04.874778 4933 scope.go:117] "RemoveContainer" containerID="8ae27aad85561ed4352a9f367620378b6df429f1a1fb0246c209f26794e78c15" Jan 22 07:23:04 crc kubenswrapper[4933]: I0122 07:23:04.898472 4933 scope.go:117] "RemoveContainer" containerID="32d75eb167ed591863169b95b4cebbb71b547d938f20085555d7a0d80d1d00b6" Jan 22 07:23:04 crc kubenswrapper[4933]: I0122 07:23:04.922885 4933 scope.go:117] "RemoveContainer" containerID="c052d1e4e59b6201f3685f885e1878013fbd4d3c4483a43be02d861567ba6523" Jan 22 07:23:04 crc kubenswrapper[4933]: I0122 07:23:04.945869 4933 scope.go:117] "RemoveContainer" containerID="5989abb962e233737d57849de36d6565e9813de433e42a5fe0adab89dd1ca89a" Jan 22 07:23:11 crc kubenswrapper[4933]: I0122 07:23:11.977264 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.466763 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-d5cfk"] Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.468150 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-d5cfk" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.470212 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.470326 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.482632 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-d5cfk"] Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.612418 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e955489f-8aff-49bd-848f-ac5bc3cd398d-scripts\") pod \"nova-cell0-cell-mapping-d5cfk\" (UID: \"e955489f-8aff-49bd-848f-ac5bc3cd398d\") " pod="openstack/nova-cell0-cell-mapping-d5cfk" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.612540 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdbgb\" (UniqueName: \"kubernetes.io/projected/e955489f-8aff-49bd-848f-ac5bc3cd398d-kube-api-access-hdbgb\") pod \"nova-cell0-cell-mapping-d5cfk\" (UID: \"e955489f-8aff-49bd-848f-ac5bc3cd398d\") " pod="openstack/nova-cell0-cell-mapping-d5cfk" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.612570 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e955489f-8aff-49bd-848f-ac5bc3cd398d-config-data\") pod \"nova-cell0-cell-mapping-d5cfk\" (UID: \"e955489f-8aff-49bd-848f-ac5bc3cd398d\") " pod="openstack/nova-cell0-cell-mapping-d5cfk" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.612702 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e955489f-8aff-49bd-848f-ac5bc3cd398d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-d5cfk\" (UID: \"e955489f-8aff-49bd-848f-ac5bc3cd398d\") " pod="openstack/nova-cell0-cell-mapping-d5cfk" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.649501 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.651292 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.652731 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.666789 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.672208 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.676437 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.677053 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.700940 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.710804 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.712618 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.714558 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e955489f-8aff-49bd-848f-ac5bc3cd398d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-d5cfk\" (UID: \"e955489f-8aff-49bd-848f-ac5bc3cd398d\") " pod="openstack/nova-cell0-cell-mapping-d5cfk" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.714672 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e955489f-8aff-49bd-848f-ac5bc3cd398d-scripts\") pod \"nova-cell0-cell-mapping-d5cfk\" (UID: \"e955489f-8aff-49bd-848f-ac5bc3cd398d\") " pod="openstack/nova-cell0-cell-mapping-d5cfk" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.714742 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdbgb\" (UniqueName: \"kubernetes.io/projected/e955489f-8aff-49bd-848f-ac5bc3cd398d-kube-api-access-hdbgb\") pod \"nova-cell0-cell-mapping-d5cfk\" (UID: \"e955489f-8aff-49bd-848f-ac5bc3cd398d\") " pod="openstack/nova-cell0-cell-mapping-d5cfk" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.714763 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e955489f-8aff-49bd-848f-ac5bc3cd398d-config-data\") pod \"nova-cell0-cell-mapping-d5cfk\" (UID: \"e955489f-8aff-49bd-848f-ac5bc3cd398d\") " pod="openstack/nova-cell0-cell-mapping-d5cfk" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.717359 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.727734 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e955489f-8aff-49bd-848f-ac5bc3cd398d-scripts\") pod \"nova-cell0-cell-mapping-d5cfk\" (UID: \"e955489f-8aff-49bd-848f-ac5bc3cd398d\") " pod="openstack/nova-cell0-cell-mapping-d5cfk" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.760196 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e955489f-8aff-49bd-848f-ac5bc3cd398d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-d5cfk\" (UID: \"e955489f-8aff-49bd-848f-ac5bc3cd398d\") " pod="openstack/nova-cell0-cell-mapping-d5cfk" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.760674 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e955489f-8aff-49bd-848f-ac5bc3cd398d-config-data\") pod \"nova-cell0-cell-mapping-d5cfk\" (UID: \"e955489f-8aff-49bd-848f-ac5bc3cd398d\") " pod="openstack/nova-cell0-cell-mapping-d5cfk" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.810309 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdbgb\" (UniqueName: \"kubernetes.io/projected/e955489f-8aff-49bd-848f-ac5bc3cd398d-kube-api-access-hdbgb\") pod \"nova-cell0-cell-mapping-d5cfk\" (UID: \"e955489f-8aff-49bd-848f-ac5bc3cd398d\") " pod="openstack/nova-cell0-cell-mapping-d5cfk" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.816705 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32485906-e101-4c63-ba20-1c5385b46e47-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"32485906-e101-4c63-ba20-1c5385b46e47\") " pod="openstack/nova-metadata-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.816791 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/324012ea-092d-423e-a452-7e9bb177bd61-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"324012ea-092d-423e-a452-7e9bb177bd61\") " pod="openstack/nova-api-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.816884 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dpmd\" (UniqueName: \"kubernetes.io/projected/32485906-e101-4c63-ba20-1c5385b46e47-kube-api-access-8dpmd\") pod \"nova-metadata-0\" (UID: \"32485906-e101-4c63-ba20-1c5385b46e47\") " pod="openstack/nova-metadata-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.816932 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32485906-e101-4c63-ba20-1c5385b46e47-config-data\") pod \"nova-metadata-0\" (UID: \"32485906-e101-4c63-ba20-1c5385b46e47\") " pod="openstack/nova-metadata-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.816959 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32485906-e101-4c63-ba20-1c5385b46e47-logs\") pod \"nova-metadata-0\" (UID: \"32485906-e101-4c63-ba20-1c5385b46e47\") " pod="openstack/nova-metadata-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.816983 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.817007 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.817041 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jx8b\" (UniqueName: \"kubernetes.io/projected/758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60-kube-api-access-4jx8b\") pod \"nova-cell1-novncproxy-0\" (UID: \"758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.817104 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8pxg\" (UniqueName: \"kubernetes.io/projected/324012ea-092d-423e-a452-7e9bb177bd61-kube-api-access-w8pxg\") pod \"nova-api-0\" (UID: \"324012ea-092d-423e-a452-7e9bb177bd61\") " pod="openstack/nova-api-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.817150 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/324012ea-092d-423e-a452-7e9bb177bd61-config-data\") pod \"nova-api-0\" (UID: \"324012ea-092d-423e-a452-7e9bb177bd61\") " pod="openstack/nova-api-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.817175 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/324012ea-092d-423e-a452-7e9bb177bd61-logs\") pod \"nova-api-0\" (UID: \"324012ea-092d-423e-a452-7e9bb177bd61\") " pod="openstack/nova-api-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.821647 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.903080 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.904592 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.907187 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.911881 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.919110 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32485906-e101-4c63-ba20-1c5385b46e47-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"32485906-e101-4c63-ba20-1c5385b46e47\") " pod="openstack/nova-metadata-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.919155 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/324012ea-092d-423e-a452-7e9bb177bd61-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"324012ea-092d-423e-a452-7e9bb177bd61\") " pod="openstack/nova-api-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.919237 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dpmd\" (UniqueName: \"kubernetes.io/projected/32485906-e101-4c63-ba20-1c5385b46e47-kube-api-access-8dpmd\") pod \"nova-metadata-0\" (UID: \"32485906-e101-4c63-ba20-1c5385b46e47\") " pod="openstack/nova-metadata-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.919287 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32485906-e101-4c63-ba20-1c5385b46e47-config-data\") pod \"nova-metadata-0\" (UID: \"32485906-e101-4c63-ba20-1c5385b46e47\") " pod="openstack/nova-metadata-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.919329 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32485906-e101-4c63-ba20-1c5385b46e47-logs\") pod \"nova-metadata-0\" (UID: \"32485906-e101-4c63-ba20-1c5385b46e47\") " pod="openstack/nova-metadata-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.919357 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.919385 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.919422 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jx8b\" (UniqueName: \"kubernetes.io/projected/758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60-kube-api-access-4jx8b\") pod \"nova-cell1-novncproxy-0\" (UID: \"758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.919475 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8pxg\" (UniqueName: \"kubernetes.io/projected/324012ea-092d-423e-a452-7e9bb177bd61-kube-api-access-w8pxg\") pod \"nova-api-0\" (UID: \"324012ea-092d-423e-a452-7e9bb177bd61\") " pod="openstack/nova-api-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.919523 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/324012ea-092d-423e-a452-7e9bb177bd61-config-data\") pod \"nova-api-0\" (UID: \"324012ea-092d-423e-a452-7e9bb177bd61\") " pod="openstack/nova-api-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.919541 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/324012ea-092d-423e-a452-7e9bb177bd61-logs\") pod \"nova-api-0\" (UID: \"324012ea-092d-423e-a452-7e9bb177bd61\") " pod="openstack/nova-api-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.919991 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/324012ea-092d-423e-a452-7e9bb177bd61-logs\") pod \"nova-api-0\" (UID: \"324012ea-092d-423e-a452-7e9bb177bd61\") " pod="openstack/nova-api-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.928253 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-65df48cf75-dnvbm"] Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.929546 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-65df48cf75-dnvbm"] Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.929628 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.930777 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32485906-e101-4c63-ba20-1c5385b46e47-logs\") pod \"nova-metadata-0\" (UID: \"32485906-e101-4c63-ba20-1c5385b46e47\") " pod="openstack/nova-metadata-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.939894 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.946891 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.962491 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32485906-e101-4c63-ba20-1c5385b46e47-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"32485906-e101-4c63-ba20-1c5385b46e47\") " pod="openstack/nova-metadata-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.965761 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32485906-e101-4c63-ba20-1c5385b46e47-config-data\") pod \"nova-metadata-0\" (UID: \"32485906-e101-4c63-ba20-1c5385b46e47\") " pod="openstack/nova-metadata-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.966270 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8pxg\" (UniqueName: \"kubernetes.io/projected/324012ea-092d-423e-a452-7e9bb177bd61-kube-api-access-w8pxg\") pod \"nova-api-0\" (UID: \"324012ea-092d-423e-a452-7e9bb177bd61\") " pod="openstack/nova-api-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.966341 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/324012ea-092d-423e-a452-7e9bb177bd61-config-data\") pod \"nova-api-0\" (UID: \"324012ea-092d-423e-a452-7e9bb177bd61\") " pod="openstack/nova-api-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.966990 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jx8b\" (UniqueName: \"kubernetes.io/projected/758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60-kube-api-access-4jx8b\") pod \"nova-cell1-novncproxy-0\" (UID: \"758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.969445 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/324012ea-092d-423e-a452-7e9bb177bd61-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"324012ea-092d-423e-a452-7e9bb177bd61\") " pod="openstack/nova-api-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.973664 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 07:23:12 crc kubenswrapper[4933]: I0122 07:23:12.978770 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dpmd\" (UniqueName: \"kubernetes.io/projected/32485906-e101-4c63-ba20-1c5385b46e47-kube-api-access-8dpmd\") pod \"nova-metadata-0\" (UID: \"32485906-e101-4c63-ba20-1c5385b46e47\") " pod="openstack/nova-metadata-0" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.001501 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.028251 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-ovsdbserver-nb\") pod \"dnsmasq-dns-65df48cf75-dnvbm\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.028308 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e2edc4a-a87d-4d50-bbd1-5006af141eb7-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3e2edc4a-a87d-4d50-bbd1-5006af141eb7\") " pod="openstack/nova-scheduler-0" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.028352 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2m58\" (UniqueName: \"kubernetes.io/projected/3e2edc4a-a87d-4d50-bbd1-5006af141eb7-kube-api-access-n2m58\") pod \"nova-scheduler-0\" (UID: \"3e2edc4a-a87d-4d50-bbd1-5006af141eb7\") " pod="openstack/nova-scheduler-0" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.028422 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-config\") pod \"dnsmasq-dns-65df48cf75-dnvbm\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.028443 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-ovsdbserver-sb\") pod \"dnsmasq-dns-65df48cf75-dnvbm\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.028471 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-dns-svc\") pod \"dnsmasq-dns-65df48cf75-dnvbm\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.028528 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8p8pk\" (UniqueName: \"kubernetes.io/projected/7d881258-6f0e-4c52-b036-3805dba06666-kube-api-access-8p8pk\") pod \"dnsmasq-dns-65df48cf75-dnvbm\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.028562 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e2edc4a-a87d-4d50-bbd1-5006af141eb7-config-data\") pod \"nova-scheduler-0\" (UID: \"3e2edc4a-a87d-4d50-bbd1-5006af141eb7\") " pod="openstack/nova-scheduler-0" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.087153 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-d5cfk" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.137531 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8p8pk\" (UniqueName: \"kubernetes.io/projected/7d881258-6f0e-4c52-b036-3805dba06666-kube-api-access-8p8pk\") pod \"dnsmasq-dns-65df48cf75-dnvbm\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.137652 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e2edc4a-a87d-4d50-bbd1-5006af141eb7-config-data\") pod \"nova-scheduler-0\" (UID: \"3e2edc4a-a87d-4d50-bbd1-5006af141eb7\") " pod="openstack/nova-scheduler-0" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.137710 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-ovsdbserver-nb\") pod \"dnsmasq-dns-65df48cf75-dnvbm\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.137737 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e2edc4a-a87d-4d50-bbd1-5006af141eb7-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3e2edc4a-a87d-4d50-bbd1-5006af141eb7\") " pod="openstack/nova-scheduler-0" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.137823 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2m58\" (UniqueName: \"kubernetes.io/projected/3e2edc4a-a87d-4d50-bbd1-5006af141eb7-kube-api-access-n2m58\") pod \"nova-scheduler-0\" (UID: \"3e2edc4a-a87d-4d50-bbd1-5006af141eb7\") " pod="openstack/nova-scheduler-0" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.137997 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-config\") pod \"dnsmasq-dns-65df48cf75-dnvbm\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.138022 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-ovsdbserver-sb\") pod \"dnsmasq-dns-65df48cf75-dnvbm\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.138106 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-dns-svc\") pod \"dnsmasq-dns-65df48cf75-dnvbm\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.139146 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-ovsdbserver-nb\") pod \"dnsmasq-dns-65df48cf75-dnvbm\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.141157 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-dns-svc\") pod \"dnsmasq-dns-65df48cf75-dnvbm\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.141238 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-ovsdbserver-sb\") pod \"dnsmasq-dns-65df48cf75-dnvbm\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.143348 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e2edc4a-a87d-4d50-bbd1-5006af141eb7-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3e2edc4a-a87d-4d50-bbd1-5006af141eb7\") " pod="openstack/nova-scheduler-0" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.146697 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e2edc4a-a87d-4d50-bbd1-5006af141eb7-config-data\") pod \"nova-scheduler-0\" (UID: \"3e2edc4a-a87d-4d50-bbd1-5006af141eb7\") " pod="openstack/nova-scheduler-0" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.159714 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-config\") pod \"dnsmasq-dns-65df48cf75-dnvbm\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.162365 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8p8pk\" (UniqueName: \"kubernetes.io/projected/7d881258-6f0e-4c52-b036-3805dba06666-kube-api-access-8p8pk\") pod \"dnsmasq-dns-65df48cf75-dnvbm\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.165473 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2m58\" (UniqueName: \"kubernetes.io/projected/3e2edc4a-a87d-4d50-bbd1-5006af141eb7-kube-api-access-n2m58\") pod \"nova-scheduler-0\" (UID: \"3e2edc4a-a87d-4d50-bbd1-5006af141eb7\") " pod="openstack/nova-scheduler-0" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.173425 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.430933 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.458558 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.546151 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.567943 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.729727 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.740440 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"324012ea-092d-423e-a452-7e9bb177bd61","Type":"ContainerStarted","Data":"38f404aead6b5dbeb0f1b0c98a91113c77814ca0e17261b1f38a30afe3508bb7"} Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.744908 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60","Type":"ContainerStarted","Data":"f1fc07f2f3958c8bb99542828f87c3ccb5c04c1256f6d413fa19923a4805b2e1"} Jan 22 07:23:13 crc kubenswrapper[4933]: W0122 07:23:13.777335 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode955489f_8aff_49bd_848f_ac5bc3cd398d.slice/crio-44938a0ee23a0be8cc20511ab64ef181fd72fa02df112d4fd41ea6cecb1eb2f1 WatchSource:0}: Error finding container 44938a0ee23a0be8cc20511ab64ef181fd72fa02df112d4fd41ea6cecb1eb2f1: Status 404 returned error can't find the container with id 44938a0ee23a0be8cc20511ab64ef181fd72fa02df112d4fd41ea6cecb1eb2f1 Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.802088 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-d5cfk"] Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.846335 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4vth5"] Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.850280 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-4vth5" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.853569 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.858443 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.880826 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4vth5"] Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.980882 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c406803-95c9-46e7-953f-0c56e8daa84e-scripts\") pod \"nova-cell1-conductor-db-sync-4vth5\" (UID: \"6c406803-95c9-46e7-953f-0c56e8daa84e\") " pod="openstack/nova-cell1-conductor-db-sync-4vth5" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.980939 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c406803-95c9-46e7-953f-0c56e8daa84e-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-4vth5\" (UID: \"6c406803-95c9-46e7-953f-0c56e8daa84e\") " pod="openstack/nova-cell1-conductor-db-sync-4vth5" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.981016 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c406803-95c9-46e7-953f-0c56e8daa84e-config-data\") pod \"nova-cell1-conductor-db-sync-4vth5\" (UID: \"6c406803-95c9-46e7-953f-0c56e8daa84e\") " pod="openstack/nova-cell1-conductor-db-sync-4vth5" Jan 22 07:23:13 crc kubenswrapper[4933]: I0122 07:23:13.981149 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mqhn\" (UniqueName: \"kubernetes.io/projected/6c406803-95c9-46e7-953f-0c56e8daa84e-kube-api-access-7mqhn\") pod \"nova-cell1-conductor-db-sync-4vth5\" (UID: \"6c406803-95c9-46e7-953f-0c56e8daa84e\") " pod="openstack/nova-cell1-conductor-db-sync-4vth5" Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.082285 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mqhn\" (UniqueName: \"kubernetes.io/projected/6c406803-95c9-46e7-953f-0c56e8daa84e-kube-api-access-7mqhn\") pod \"nova-cell1-conductor-db-sync-4vth5\" (UID: \"6c406803-95c9-46e7-953f-0c56e8daa84e\") " pod="openstack/nova-cell1-conductor-db-sync-4vth5" Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.082696 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c406803-95c9-46e7-953f-0c56e8daa84e-scripts\") pod \"nova-cell1-conductor-db-sync-4vth5\" (UID: \"6c406803-95c9-46e7-953f-0c56e8daa84e\") " pod="openstack/nova-cell1-conductor-db-sync-4vth5" Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.082738 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c406803-95c9-46e7-953f-0c56e8daa84e-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-4vth5\" (UID: \"6c406803-95c9-46e7-953f-0c56e8daa84e\") " pod="openstack/nova-cell1-conductor-db-sync-4vth5" Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.082820 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c406803-95c9-46e7-953f-0c56e8daa84e-config-data\") pod \"nova-cell1-conductor-db-sync-4vth5\" (UID: \"6c406803-95c9-46e7-953f-0c56e8daa84e\") " pod="openstack/nova-cell1-conductor-db-sync-4vth5" Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.086199 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c406803-95c9-46e7-953f-0c56e8daa84e-scripts\") pod \"nova-cell1-conductor-db-sync-4vth5\" (UID: \"6c406803-95c9-46e7-953f-0c56e8daa84e\") " pod="openstack/nova-cell1-conductor-db-sync-4vth5" Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.088536 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c406803-95c9-46e7-953f-0c56e8daa84e-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-4vth5\" (UID: \"6c406803-95c9-46e7-953f-0c56e8daa84e\") " pod="openstack/nova-cell1-conductor-db-sync-4vth5" Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.088817 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c406803-95c9-46e7-953f-0c56e8daa84e-config-data\") pod \"nova-cell1-conductor-db-sync-4vth5\" (UID: \"6c406803-95c9-46e7-953f-0c56e8daa84e\") " pod="openstack/nova-cell1-conductor-db-sync-4vth5" Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.120361 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mqhn\" (UniqueName: \"kubernetes.io/projected/6c406803-95c9-46e7-953f-0c56e8daa84e-kube-api-access-7mqhn\") pod \"nova-cell1-conductor-db-sync-4vth5\" (UID: \"6c406803-95c9-46e7-953f-0c56e8daa84e\") " pod="openstack/nova-cell1-conductor-db-sync-4vth5" Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.129299 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 07:23:14 crc kubenswrapper[4933]: W0122 07:23:14.147970 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e2edc4a_a87d_4d50_bbd1_5006af141eb7.slice/crio-63092c6f0c6fea67cebe498a27d85bb74274369d898b9942bdeab305844d423f WatchSource:0}: Error finding container 63092c6f0c6fea67cebe498a27d85bb74274369d898b9942bdeab305844d423f: Status 404 returned error can't find the container with id 63092c6f0c6fea67cebe498a27d85bb74274369d898b9942bdeab305844d423f Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.154057 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-4vth5" Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.199940 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-65df48cf75-dnvbm"] Jan 22 07:23:14 crc kubenswrapper[4933]: W0122 07:23:14.232675 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7d881258_6f0e_4c52_b036_3805dba06666.slice/crio-15fd74bfa3dbcf91ecdbb6d28920684b42c85075a1726743a225359751a374ec WatchSource:0}: Error finding container 15fd74bfa3dbcf91ecdbb6d28920684b42c85075a1726743a225359751a374ec: Status 404 returned error can't find the container with id 15fd74bfa3dbcf91ecdbb6d28920684b42c85075a1726743a225359751a374ec Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.706060 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4vth5"] Jan 22 07:23:14 crc kubenswrapper[4933]: W0122 07:23:14.720070 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c406803_95c9_46e7_953f_0c56e8daa84e.slice/crio-461d8bbbcfe036061ba4bc54f4116d2c4c8d981be0cc8f13cbff7eb8e04f49f7 WatchSource:0}: Error finding container 461d8bbbcfe036061ba4bc54f4116d2c4c8d981be0cc8f13cbff7eb8e04f49f7: Status 404 returned error can't find the container with id 461d8bbbcfe036061ba4bc54f4116d2c4c8d981be0cc8f13cbff7eb8e04f49f7 Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.771481 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-d5cfk" event={"ID":"e955489f-8aff-49bd-848f-ac5bc3cd398d","Type":"ContainerStarted","Data":"14867d1e7cf0f45df7187c3d96dcfee03d2eedefd52f33223a1434fc1b64aff7"} Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.771524 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-d5cfk" event={"ID":"e955489f-8aff-49bd-848f-ac5bc3cd398d","Type":"ContainerStarted","Data":"44938a0ee23a0be8cc20511ab64ef181fd72fa02df112d4fd41ea6cecb1eb2f1"} Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.795446 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-d5cfk" podStartSLOduration=2.7954268410000003 podStartE2EDuration="2.795426841s" podCreationTimestamp="2026-01-22 07:23:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:23:14.786364791 +0000 UTC m=+5842.623490144" watchObservedRunningTime="2026-01-22 07:23:14.795426841 +0000 UTC m=+5842.632552194" Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.799560 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"324012ea-092d-423e-a452-7e9bb177bd61","Type":"ContainerStarted","Data":"c7e246459f93cd58e28fd9b77ec63de80a78c5131a1435dd127d773fabde7386"} Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.799638 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"324012ea-092d-423e-a452-7e9bb177bd61","Type":"ContainerStarted","Data":"890447e30aa41e1a3d0ded31cbc5e76214485e8c91e8e0262115ef51f056ed4d"} Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.805029 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60","Type":"ContainerStarted","Data":"72f18ea98e3e0075c78cc2695167f39db79f90c667841470fb85799652e1da3a"} Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.807394 4933 generic.go:334] "Generic (PLEG): container finished" podID="7d881258-6f0e-4c52-b036-3805dba06666" containerID="60353b0cd3e4cc985bd382318ae6d7c2bb2c2ebf5d61a2e4d0619d8b7875e7a9" exitCode=0 Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.807438 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" event={"ID":"7d881258-6f0e-4c52-b036-3805dba06666","Type":"ContainerDied","Data":"60353b0cd3e4cc985bd382318ae6d7c2bb2c2ebf5d61a2e4d0619d8b7875e7a9"} Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.807480 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" event={"ID":"7d881258-6f0e-4c52-b036-3805dba06666","Type":"ContainerStarted","Data":"15fd74bfa3dbcf91ecdbb6d28920684b42c85075a1726743a225359751a374ec"} Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.808838 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-4vth5" event={"ID":"6c406803-95c9-46e7-953f-0c56e8daa84e","Type":"ContainerStarted","Data":"461d8bbbcfe036061ba4bc54f4116d2c4c8d981be0cc8f13cbff7eb8e04f49f7"} Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.809961 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"32485906-e101-4c63-ba20-1c5385b46e47","Type":"ContainerStarted","Data":"8572360048bdcbf8c231c9ea49e3e8a7dd5cfa2732537b02ffb5ad40a644c476"} Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.809983 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"32485906-e101-4c63-ba20-1c5385b46e47","Type":"ContainerStarted","Data":"b3479f6b959ea3004821621138813317870b1dff91bc9e35e470f5934ed38666"} Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.809992 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"32485906-e101-4c63-ba20-1c5385b46e47","Type":"ContainerStarted","Data":"6ba5239179eee8d008df9c5de64e733f2a27a6fc80427bedd45b40ee43626a14"} Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.819885 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3e2edc4a-a87d-4d50-bbd1-5006af141eb7","Type":"ContainerStarted","Data":"00be8d04b65720f0972bc7dd15fbb35735279426416862480ca65e8873323fb2"} Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.819933 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3e2edc4a-a87d-4d50-bbd1-5006af141eb7","Type":"ContainerStarted","Data":"63092c6f0c6fea67cebe498a27d85bb74274369d898b9942bdeab305844d423f"} Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.839259 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.839241697 podStartE2EDuration="2.839241697s" podCreationTimestamp="2026-01-22 07:23:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:23:14.827883511 +0000 UTC m=+5842.665008864" watchObservedRunningTime="2026-01-22 07:23:14.839241697 +0000 UTC m=+5842.676367050" Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.857044 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.85702816 podStartE2EDuration="2.85702816s" podCreationTimestamp="2026-01-22 07:23:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:23:14.84673384 +0000 UTC m=+5842.683859203" watchObservedRunningTime="2026-01-22 07:23:14.85702816 +0000 UTC m=+5842.694153513" Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.874892 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.874850164 podStartE2EDuration="2.874850164s" podCreationTimestamp="2026-01-22 07:23:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:23:14.871167714 +0000 UTC m=+5842.708293087" watchObservedRunningTime="2026-01-22 07:23:14.874850164 +0000 UTC m=+5842.711975517" Jan 22 07:23:14 crc kubenswrapper[4933]: I0122 07:23:14.948137 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.948113186 podStartE2EDuration="2.948113186s" podCreationTimestamp="2026-01-22 07:23:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:23:14.946899166 +0000 UTC m=+5842.784024519" watchObservedRunningTime="2026-01-22 07:23:14.948113186 +0000 UTC m=+5842.785238549" Jan 22 07:23:15 crc kubenswrapper[4933]: I0122 07:23:15.831994 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" event={"ID":"7d881258-6f0e-4c52-b036-3805dba06666","Type":"ContainerStarted","Data":"104fccaaa80183fbe07fc7a867e73d76b87c4cc126c630ee2b3f7bd0a76b862a"} Jan 22 07:23:15 crc kubenswrapper[4933]: I0122 07:23:15.832571 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:23:15 crc kubenswrapper[4933]: I0122 07:23:15.835912 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-4vth5" event={"ID":"6c406803-95c9-46e7-953f-0c56e8daa84e","Type":"ContainerStarted","Data":"047c85e476622cfa484c6fa7dcff291d3f9e45a5a861763c8dd2d93f2612f665"} Jan 22 07:23:15 crc kubenswrapper[4933]: I0122 07:23:15.899202 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" podStartSLOduration=3.899180705 podStartE2EDuration="3.899180705s" podCreationTimestamp="2026-01-22 07:23:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:23:15.85784924 +0000 UTC m=+5843.694974613" watchObservedRunningTime="2026-01-22 07:23:15.899180705 +0000 UTC m=+5843.736306058" Jan 22 07:23:15 crc kubenswrapper[4933]: I0122 07:23:15.915927 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-4vth5" podStartSLOduration=2.915910892 podStartE2EDuration="2.915910892s" podCreationTimestamp="2026-01-22 07:23:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:23:15.892316768 +0000 UTC m=+5843.729442141" watchObservedRunningTime="2026-01-22 07:23:15.915910892 +0000 UTC m=+5843.753036245" Jan 22 07:23:16 crc kubenswrapper[4933]: I0122 07:23:16.886483 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:16 crc kubenswrapper[4933]: I0122 07:23:16.886728 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="32485906-e101-4c63-ba20-1c5385b46e47" containerName="nova-metadata-log" containerID="cri-o://b3479f6b959ea3004821621138813317870b1dff91bc9e35e470f5934ed38666" gracePeriod=30 Jan 22 07:23:16 crc kubenswrapper[4933]: I0122 07:23:16.886861 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="32485906-e101-4c63-ba20-1c5385b46e47" containerName="nova-metadata-metadata" containerID="cri-o://8572360048bdcbf8c231c9ea49e3e8a7dd5cfa2732537b02ffb5ad40a644c476" gracePeriod=30 Jan 22 07:23:16 crc kubenswrapper[4933]: I0122 07:23:16.895464 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 07:23:16 crc kubenswrapper[4933]: I0122 07:23:16.895690 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://72f18ea98e3e0075c78cc2695167f39db79f90c667841470fb85799652e1da3a" gracePeriod=30 Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.611753 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.771819 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8dpmd\" (UniqueName: \"kubernetes.io/projected/32485906-e101-4c63-ba20-1c5385b46e47-kube-api-access-8dpmd\") pod \"32485906-e101-4c63-ba20-1c5385b46e47\" (UID: \"32485906-e101-4c63-ba20-1c5385b46e47\") " Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.771875 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32485906-e101-4c63-ba20-1c5385b46e47-combined-ca-bundle\") pod \"32485906-e101-4c63-ba20-1c5385b46e47\" (UID: \"32485906-e101-4c63-ba20-1c5385b46e47\") " Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.771912 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32485906-e101-4c63-ba20-1c5385b46e47-logs\") pod \"32485906-e101-4c63-ba20-1c5385b46e47\" (UID: \"32485906-e101-4c63-ba20-1c5385b46e47\") " Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.772053 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32485906-e101-4c63-ba20-1c5385b46e47-config-data\") pod \"32485906-e101-4c63-ba20-1c5385b46e47\" (UID: \"32485906-e101-4c63-ba20-1c5385b46e47\") " Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.772385 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32485906-e101-4c63-ba20-1c5385b46e47-logs" (OuterVolumeSpecName: "logs") pod "32485906-e101-4c63-ba20-1c5385b46e47" (UID: "32485906-e101-4c63-ba20-1c5385b46e47"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.772624 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32485906-e101-4c63-ba20-1c5385b46e47-logs\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.780034 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32485906-e101-4c63-ba20-1c5385b46e47-kube-api-access-8dpmd" (OuterVolumeSpecName: "kube-api-access-8dpmd") pod "32485906-e101-4c63-ba20-1c5385b46e47" (UID: "32485906-e101-4c63-ba20-1c5385b46e47"). InnerVolumeSpecName "kube-api-access-8dpmd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.786355 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.799012 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32485906-e101-4c63-ba20-1c5385b46e47-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "32485906-e101-4c63-ba20-1c5385b46e47" (UID: "32485906-e101-4c63-ba20-1c5385b46e47"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.799347 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32485906-e101-4c63-ba20-1c5385b46e47-config-data" (OuterVolumeSpecName: "config-data") pod "32485906-e101-4c63-ba20-1c5385b46e47" (UID: "32485906-e101-4c63-ba20-1c5385b46e47"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.858558 4933 generic.go:334] "Generic (PLEG): container finished" podID="32485906-e101-4c63-ba20-1c5385b46e47" containerID="8572360048bdcbf8c231c9ea49e3e8a7dd5cfa2732537b02ffb5ad40a644c476" exitCode=0 Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.858595 4933 generic.go:334] "Generic (PLEG): container finished" podID="32485906-e101-4c63-ba20-1c5385b46e47" containerID="b3479f6b959ea3004821621138813317870b1dff91bc9e35e470f5934ed38666" exitCode=143 Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.858611 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"32485906-e101-4c63-ba20-1c5385b46e47","Type":"ContainerDied","Data":"8572360048bdcbf8c231c9ea49e3e8a7dd5cfa2732537b02ffb5ad40a644c476"} Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.858672 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"32485906-e101-4c63-ba20-1c5385b46e47","Type":"ContainerDied","Data":"b3479f6b959ea3004821621138813317870b1dff91bc9e35e470f5934ed38666"} Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.858687 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"32485906-e101-4c63-ba20-1c5385b46e47","Type":"ContainerDied","Data":"6ba5239179eee8d008df9c5de64e733f2a27a6fc80427bedd45b40ee43626a14"} Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.858625 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.858708 4933 scope.go:117] "RemoveContainer" containerID="8572360048bdcbf8c231c9ea49e3e8a7dd5cfa2732537b02ffb5ad40a644c476" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.861756 4933 generic.go:334] "Generic (PLEG): container finished" podID="758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60" containerID="72f18ea98e3e0075c78cc2695167f39db79f90c667841470fb85799652e1da3a" exitCode=0 Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.861810 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60","Type":"ContainerDied","Data":"72f18ea98e3e0075c78cc2695167f39db79f90c667841470fb85799652e1da3a"} Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.861840 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60","Type":"ContainerDied","Data":"f1fc07f2f3958c8bb99542828f87c3ccb5c04c1256f6d413fa19923a4805b2e1"} Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.861880 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.874469 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8dpmd\" (UniqueName: \"kubernetes.io/projected/32485906-e101-4c63-ba20-1c5385b46e47-kube-api-access-8dpmd\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.874523 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32485906-e101-4c63-ba20-1c5385b46e47-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.874535 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32485906-e101-4c63-ba20-1c5385b46e47-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.893718 4933 scope.go:117] "RemoveContainer" containerID="b3479f6b959ea3004821621138813317870b1dff91bc9e35e470f5934ed38666" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.905310 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.925609 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.927306 4933 scope.go:117] "RemoveContainer" containerID="8572360048bdcbf8c231c9ea49e3e8a7dd5cfa2732537b02ffb5ad40a644c476" Jan 22 07:23:17 crc kubenswrapper[4933]: E0122 07:23:17.927786 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8572360048bdcbf8c231c9ea49e3e8a7dd5cfa2732537b02ffb5ad40a644c476\": container with ID starting with 8572360048bdcbf8c231c9ea49e3e8a7dd5cfa2732537b02ffb5ad40a644c476 not found: ID does not exist" containerID="8572360048bdcbf8c231c9ea49e3e8a7dd5cfa2732537b02ffb5ad40a644c476" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.927828 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8572360048bdcbf8c231c9ea49e3e8a7dd5cfa2732537b02ffb5ad40a644c476"} err="failed to get container status \"8572360048bdcbf8c231c9ea49e3e8a7dd5cfa2732537b02ffb5ad40a644c476\": rpc error: code = NotFound desc = could not find container \"8572360048bdcbf8c231c9ea49e3e8a7dd5cfa2732537b02ffb5ad40a644c476\": container with ID starting with 8572360048bdcbf8c231c9ea49e3e8a7dd5cfa2732537b02ffb5ad40a644c476 not found: ID does not exist" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.927859 4933 scope.go:117] "RemoveContainer" containerID="b3479f6b959ea3004821621138813317870b1dff91bc9e35e470f5934ed38666" Jan 22 07:23:17 crc kubenswrapper[4933]: E0122 07:23:17.928268 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3479f6b959ea3004821621138813317870b1dff91bc9e35e470f5934ed38666\": container with ID starting with b3479f6b959ea3004821621138813317870b1dff91bc9e35e470f5934ed38666 not found: ID does not exist" containerID="b3479f6b959ea3004821621138813317870b1dff91bc9e35e470f5934ed38666" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.928299 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3479f6b959ea3004821621138813317870b1dff91bc9e35e470f5934ed38666"} err="failed to get container status \"b3479f6b959ea3004821621138813317870b1dff91bc9e35e470f5934ed38666\": rpc error: code = NotFound desc = could not find container \"b3479f6b959ea3004821621138813317870b1dff91bc9e35e470f5934ed38666\": container with ID starting with b3479f6b959ea3004821621138813317870b1dff91bc9e35e470f5934ed38666 not found: ID does not exist" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.928323 4933 scope.go:117] "RemoveContainer" containerID="8572360048bdcbf8c231c9ea49e3e8a7dd5cfa2732537b02ffb5ad40a644c476" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.928684 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8572360048bdcbf8c231c9ea49e3e8a7dd5cfa2732537b02ffb5ad40a644c476"} err="failed to get container status \"8572360048bdcbf8c231c9ea49e3e8a7dd5cfa2732537b02ffb5ad40a644c476\": rpc error: code = NotFound desc = could not find container \"8572360048bdcbf8c231c9ea49e3e8a7dd5cfa2732537b02ffb5ad40a644c476\": container with ID starting with 8572360048bdcbf8c231c9ea49e3e8a7dd5cfa2732537b02ffb5ad40a644c476 not found: ID does not exist" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.928720 4933 scope.go:117] "RemoveContainer" containerID="b3479f6b959ea3004821621138813317870b1dff91bc9e35e470f5934ed38666" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.930953 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3479f6b959ea3004821621138813317870b1dff91bc9e35e470f5934ed38666"} err="failed to get container status \"b3479f6b959ea3004821621138813317870b1dff91bc9e35e470f5934ed38666\": rpc error: code = NotFound desc = could not find container \"b3479f6b959ea3004821621138813317870b1dff91bc9e35e470f5934ed38666\": container with ID starting with b3479f6b959ea3004821621138813317870b1dff91bc9e35e470f5934ed38666 not found: ID does not exist" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.930991 4933 scope.go:117] "RemoveContainer" containerID="72f18ea98e3e0075c78cc2695167f39db79f90c667841470fb85799652e1da3a" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.932829 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:17 crc kubenswrapper[4933]: E0122 07:23:17.933326 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32485906-e101-4c63-ba20-1c5385b46e47" containerName="nova-metadata-log" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.933353 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="32485906-e101-4c63-ba20-1c5385b46e47" containerName="nova-metadata-log" Jan 22 07:23:17 crc kubenswrapper[4933]: E0122 07:23:17.933385 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32485906-e101-4c63-ba20-1c5385b46e47" containerName="nova-metadata-metadata" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.933396 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="32485906-e101-4c63-ba20-1c5385b46e47" containerName="nova-metadata-metadata" Jan 22 07:23:17 crc kubenswrapper[4933]: E0122 07:23:17.933417 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60" containerName="nova-cell1-novncproxy-novncproxy" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.933425 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60" containerName="nova-cell1-novncproxy-novncproxy" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.933663 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="32485906-e101-4c63-ba20-1c5385b46e47" containerName="nova-metadata-log" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.933691 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60" containerName="nova-cell1-novncproxy-novncproxy" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.933718 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="32485906-e101-4c63-ba20-1c5385b46e47" containerName="nova-metadata-metadata" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.938747 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.945531 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.946805 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.947021 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.954212 4933 scope.go:117] "RemoveContainer" containerID="72f18ea98e3e0075c78cc2695167f39db79f90c667841470fb85799652e1da3a" Jan 22 07:23:17 crc kubenswrapper[4933]: E0122 07:23:17.956720 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72f18ea98e3e0075c78cc2695167f39db79f90c667841470fb85799652e1da3a\": container with ID starting with 72f18ea98e3e0075c78cc2695167f39db79f90c667841470fb85799652e1da3a not found: ID does not exist" containerID="72f18ea98e3e0075c78cc2695167f39db79f90c667841470fb85799652e1da3a" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.956803 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72f18ea98e3e0075c78cc2695167f39db79f90c667841470fb85799652e1da3a"} err="failed to get container status \"72f18ea98e3e0075c78cc2695167f39db79f90c667841470fb85799652e1da3a\": rpc error: code = NotFound desc = could not find container \"72f18ea98e3e0075c78cc2695167f39db79f90c667841470fb85799652e1da3a\": container with ID starting with 72f18ea98e3e0075c78cc2695167f39db79f90c667841470fb85799652e1da3a not found: ID does not exist" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.976743 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60-combined-ca-bundle\") pod \"758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60\" (UID: \"758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60\") " Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.977219 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60-config-data\") pod \"758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60\" (UID: \"758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60\") " Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.977370 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jx8b\" (UniqueName: \"kubernetes.io/projected/758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60-kube-api-access-4jx8b\") pod \"758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60\" (UID: \"758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60\") " Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.977882 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c20b00c-c33c-48a7-9f47-09275ce2249e-config-data\") pod \"nova-metadata-0\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " pod="openstack/nova-metadata-0" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.978003 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c20b00c-c33c-48a7-9f47-09275ce2249e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " pod="openstack/nova-metadata-0" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.978377 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c20b00c-c33c-48a7-9f47-09275ce2249e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " pod="openstack/nova-metadata-0" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.978540 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c20b00c-c33c-48a7-9f47-09275ce2249e-logs\") pod \"nova-metadata-0\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " pod="openstack/nova-metadata-0" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.978805 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnhd4\" (UniqueName: \"kubernetes.io/projected/9c20b00c-c33c-48a7-9f47-09275ce2249e-kube-api-access-cnhd4\") pod \"nova-metadata-0\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " pod="openstack/nova-metadata-0" Jan 22 07:23:17 crc kubenswrapper[4933]: I0122 07:23:17.980501 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60-kube-api-access-4jx8b" (OuterVolumeSpecName: "kube-api-access-4jx8b") pod "758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60" (UID: "758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60"). InnerVolumeSpecName "kube-api-access-4jx8b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.002508 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60-config-data" (OuterVolumeSpecName: "config-data") pod "758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60" (UID: "758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.009516 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60" (UID: "758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.080885 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c20b00c-c33c-48a7-9f47-09275ce2249e-config-data\") pod \"nova-metadata-0\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " pod="openstack/nova-metadata-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.081478 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c20b00c-c33c-48a7-9f47-09275ce2249e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " pod="openstack/nova-metadata-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.081618 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c20b00c-c33c-48a7-9f47-09275ce2249e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " pod="openstack/nova-metadata-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.081750 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c20b00c-c33c-48a7-9f47-09275ce2249e-logs\") pod \"nova-metadata-0\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " pod="openstack/nova-metadata-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.081966 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnhd4\" (UniqueName: \"kubernetes.io/projected/9c20b00c-c33c-48a7-9f47-09275ce2249e-kube-api-access-cnhd4\") pod \"nova-metadata-0\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " pod="openstack/nova-metadata-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.082157 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.082248 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jx8b\" (UniqueName: \"kubernetes.io/projected/758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60-kube-api-access-4jx8b\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.082328 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.082354 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c20b00c-c33c-48a7-9f47-09275ce2249e-logs\") pod \"nova-metadata-0\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " pod="openstack/nova-metadata-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.084763 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c20b00c-c33c-48a7-9f47-09275ce2249e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " pod="openstack/nova-metadata-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.085098 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c20b00c-c33c-48a7-9f47-09275ce2249e-config-data\") pod \"nova-metadata-0\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " pod="openstack/nova-metadata-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.085960 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c20b00c-c33c-48a7-9f47-09275ce2249e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " pod="openstack/nova-metadata-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.101120 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnhd4\" (UniqueName: \"kubernetes.io/projected/9c20b00c-c33c-48a7-9f47-09275ce2249e-kube-api-access-cnhd4\") pod \"nova-metadata-0\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " pod="openstack/nova-metadata-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.227487 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.235311 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.255079 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.256305 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.267436 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.267644 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.267866 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.268081 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.284024 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.291710 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2411e726-603b-497a-966b-f0519bdef29a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2411e726-603b-497a-966b-f0519bdef29a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.292963 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2411e726-603b-497a-966b-f0519bdef29a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2411e726-603b-497a-966b-f0519bdef29a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.293008 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/2411e726-603b-497a-966b-f0519bdef29a-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2411e726-603b-497a-966b-f0519bdef29a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.293109 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njxvc\" (UniqueName: \"kubernetes.io/projected/2411e726-603b-497a-966b-f0519bdef29a-kube-api-access-njxvc\") pod \"nova-cell1-novncproxy-0\" (UID: \"2411e726-603b-497a-966b-f0519bdef29a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.293153 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/2411e726-603b-497a-966b-f0519bdef29a-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2411e726-603b-497a-966b-f0519bdef29a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.394616 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2411e726-603b-497a-966b-f0519bdef29a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2411e726-603b-497a-966b-f0519bdef29a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.394679 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2411e726-603b-497a-966b-f0519bdef29a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2411e726-603b-497a-966b-f0519bdef29a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.394728 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/2411e726-603b-497a-966b-f0519bdef29a-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2411e726-603b-497a-966b-f0519bdef29a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.394764 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njxvc\" (UniqueName: \"kubernetes.io/projected/2411e726-603b-497a-966b-f0519bdef29a-kube-api-access-njxvc\") pod \"nova-cell1-novncproxy-0\" (UID: \"2411e726-603b-497a-966b-f0519bdef29a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.394797 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/2411e726-603b-497a-966b-f0519bdef29a-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2411e726-603b-497a-966b-f0519bdef29a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.399675 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2411e726-603b-497a-966b-f0519bdef29a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2411e726-603b-497a-966b-f0519bdef29a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.403327 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/2411e726-603b-497a-966b-f0519bdef29a-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2411e726-603b-497a-966b-f0519bdef29a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.405506 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/2411e726-603b-497a-966b-f0519bdef29a-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2411e726-603b-497a-966b-f0519bdef29a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.405892 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2411e726-603b-497a-966b-f0519bdef29a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2411e726-603b-497a-966b-f0519bdef29a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.410404 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njxvc\" (UniqueName: \"kubernetes.io/projected/2411e726-603b-497a-966b-f0519bdef29a-kube-api-access-njxvc\") pod \"nova-cell1-novncproxy-0\" (UID: \"2411e726-603b-497a-966b-f0519bdef29a\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.432894 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.501959 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32485906-e101-4c63-ba20-1c5385b46e47" path="/var/lib/kubelet/pods/32485906-e101-4c63-ba20-1c5385b46e47/volumes" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.502776 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60" path="/var/lib/kubelet/pods/758ffeda-7a9c-4da4-a8e2-a6f79ca8fb60/volumes" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.666391 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.728782 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.891609 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9c20b00c-c33c-48a7-9f47-09275ce2249e","Type":"ContainerStarted","Data":"5584e7f0895c9b65b7761b41250bd9a3f2a9f05e8afa0b726fb1c077f446fd6b"} Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.899529 4933 generic.go:334] "Generic (PLEG): container finished" podID="6c406803-95c9-46e7-953f-0c56e8daa84e" containerID="047c85e476622cfa484c6fa7dcff291d3f9e45a5a861763c8dd2d93f2612f665" exitCode=0 Jan 22 07:23:18 crc kubenswrapper[4933]: I0122 07:23:18.899566 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-4vth5" event={"ID":"6c406803-95c9-46e7-953f-0c56e8daa84e","Type":"ContainerDied","Data":"047c85e476622cfa484c6fa7dcff291d3f9e45a5a861763c8dd2d93f2612f665"} Jan 22 07:23:19 crc kubenswrapper[4933]: W0122 07:23:19.114428 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2411e726_603b_497a_966b_f0519bdef29a.slice/crio-8d8941559f7744581fcf87dafb6aac4f2580e2e854230995a1783e5dd1d9019d WatchSource:0}: Error finding container 8d8941559f7744581fcf87dafb6aac4f2580e2e854230995a1783e5dd1d9019d: Status 404 returned error can't find the container with id 8d8941559f7744581fcf87dafb6aac4f2580e2e854230995a1783e5dd1d9019d Jan 22 07:23:19 crc kubenswrapper[4933]: I0122 07:23:19.118177 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 07:23:19 crc kubenswrapper[4933]: I0122 07:23:19.908723 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2411e726-603b-497a-966b-f0519bdef29a","Type":"ContainerStarted","Data":"ca9d2c133da0e2f910999dc83ed4eef7c6105b502a72f93257820775be98f776"} Jan 22 07:23:19 crc kubenswrapper[4933]: I0122 07:23:19.909106 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2411e726-603b-497a-966b-f0519bdef29a","Type":"ContainerStarted","Data":"8d8941559f7744581fcf87dafb6aac4f2580e2e854230995a1783e5dd1d9019d"} Jan 22 07:23:19 crc kubenswrapper[4933]: I0122 07:23:19.910367 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9c20b00c-c33c-48a7-9f47-09275ce2249e","Type":"ContainerStarted","Data":"0f45835b5a893ef8c17772daba766623540cb99f95529c5870495724f8c2eb83"} Jan 22 07:23:19 crc kubenswrapper[4933]: I0122 07:23:19.910408 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9c20b00c-c33c-48a7-9f47-09275ce2249e","Type":"ContainerStarted","Data":"d74a0b04fa8b718a204da9d72d832d20fedc5de4f8bcaa2a9ca0d7d0f624bc5a"} Jan 22 07:23:19 crc kubenswrapper[4933]: I0122 07:23:19.911844 4933 generic.go:334] "Generic (PLEG): container finished" podID="e955489f-8aff-49bd-848f-ac5bc3cd398d" containerID="14867d1e7cf0f45df7187c3d96dcfee03d2eedefd52f33223a1434fc1b64aff7" exitCode=0 Jan 22 07:23:19 crc kubenswrapper[4933]: I0122 07:23:19.911929 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-d5cfk" event={"ID":"e955489f-8aff-49bd-848f-ac5bc3cd398d","Type":"ContainerDied","Data":"14867d1e7cf0f45df7187c3d96dcfee03d2eedefd52f33223a1434fc1b64aff7"} Jan 22 07:23:19 crc kubenswrapper[4933]: I0122 07:23:19.941025 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=1.9410060420000002 podStartE2EDuration="1.941006042s" podCreationTimestamp="2026-01-22 07:23:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:23:19.928221391 +0000 UTC m=+5847.765346754" watchObservedRunningTime="2026-01-22 07:23:19.941006042 +0000 UTC m=+5847.778131395" Jan 22 07:23:19 crc kubenswrapper[4933]: I0122 07:23:19.950952 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.950931514 podStartE2EDuration="2.950931514s" podCreationTimestamp="2026-01-22 07:23:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:23:19.94913469 +0000 UTC m=+5847.786260063" watchObservedRunningTime="2026-01-22 07:23:19.950931514 +0000 UTC m=+5847.788056867" Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.309734 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-4vth5" Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.335851 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c406803-95c9-46e7-953f-0c56e8daa84e-scripts\") pod \"6c406803-95c9-46e7-953f-0c56e8daa84e\" (UID: \"6c406803-95c9-46e7-953f-0c56e8daa84e\") " Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.336011 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c406803-95c9-46e7-953f-0c56e8daa84e-config-data\") pod \"6c406803-95c9-46e7-953f-0c56e8daa84e\" (UID: \"6c406803-95c9-46e7-953f-0c56e8daa84e\") " Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.336043 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c406803-95c9-46e7-953f-0c56e8daa84e-combined-ca-bundle\") pod \"6c406803-95c9-46e7-953f-0c56e8daa84e\" (UID: \"6c406803-95c9-46e7-953f-0c56e8daa84e\") " Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.336169 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mqhn\" (UniqueName: \"kubernetes.io/projected/6c406803-95c9-46e7-953f-0c56e8daa84e-kube-api-access-7mqhn\") pod \"6c406803-95c9-46e7-953f-0c56e8daa84e\" (UID: \"6c406803-95c9-46e7-953f-0c56e8daa84e\") " Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.343395 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c406803-95c9-46e7-953f-0c56e8daa84e-scripts" (OuterVolumeSpecName: "scripts") pod "6c406803-95c9-46e7-953f-0c56e8daa84e" (UID: "6c406803-95c9-46e7-953f-0c56e8daa84e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.343429 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c406803-95c9-46e7-953f-0c56e8daa84e-kube-api-access-7mqhn" (OuterVolumeSpecName: "kube-api-access-7mqhn") pod "6c406803-95c9-46e7-953f-0c56e8daa84e" (UID: "6c406803-95c9-46e7-953f-0c56e8daa84e"). InnerVolumeSpecName "kube-api-access-7mqhn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.373302 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c406803-95c9-46e7-953f-0c56e8daa84e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c406803-95c9-46e7-953f-0c56e8daa84e" (UID: "6c406803-95c9-46e7-953f-0c56e8daa84e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.388219 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c406803-95c9-46e7-953f-0c56e8daa84e-config-data" (OuterVolumeSpecName: "config-data") pod "6c406803-95c9-46e7-953f-0c56e8daa84e" (UID: "6c406803-95c9-46e7-953f-0c56e8daa84e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.439426 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c406803-95c9-46e7-953f-0c56e8daa84e-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.439477 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c406803-95c9-46e7-953f-0c56e8daa84e-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.439491 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c406803-95c9-46e7-953f-0c56e8daa84e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.439507 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mqhn\" (UniqueName: \"kubernetes.io/projected/6c406803-95c9-46e7-953f-0c56e8daa84e-kube-api-access-7mqhn\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.922428 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-4vth5" Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.927610 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-4vth5" event={"ID":"6c406803-95c9-46e7-953f-0c56e8daa84e","Type":"ContainerDied","Data":"461d8bbbcfe036061ba4bc54f4116d2c4c8d981be0cc8f13cbff7eb8e04f49f7"} Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.927703 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="461d8bbbcfe036061ba4bc54f4116d2c4c8d981be0cc8f13cbff7eb8e04f49f7" Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.995442 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 22 07:23:20 crc kubenswrapper[4933]: E0122 07:23:20.995833 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c406803-95c9-46e7-953f-0c56e8daa84e" containerName="nova-cell1-conductor-db-sync" Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.995851 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c406803-95c9-46e7-953f-0c56e8daa84e" containerName="nova-cell1-conductor-db-sync" Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.996015 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c406803-95c9-46e7-953f-0c56e8daa84e" containerName="nova-cell1-conductor-db-sync" Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.996978 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 22 07:23:20 crc kubenswrapper[4933]: I0122 07:23:20.999194 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.010679 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.051008 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee69d559-ca87-4c15-9311-94390d23e206-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"ee69d559-ca87-4c15-9311-94390d23e206\") " pod="openstack/nova-cell1-conductor-0" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.051064 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmrq7\" (UniqueName: \"kubernetes.io/projected/ee69d559-ca87-4c15-9311-94390d23e206-kube-api-access-cmrq7\") pod \"nova-cell1-conductor-0\" (UID: \"ee69d559-ca87-4c15-9311-94390d23e206\") " pod="openstack/nova-cell1-conductor-0" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.051149 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee69d559-ca87-4c15-9311-94390d23e206-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"ee69d559-ca87-4c15-9311-94390d23e206\") " pod="openstack/nova-cell1-conductor-0" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.153041 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee69d559-ca87-4c15-9311-94390d23e206-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"ee69d559-ca87-4c15-9311-94390d23e206\") " pod="openstack/nova-cell1-conductor-0" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.153356 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmrq7\" (UniqueName: \"kubernetes.io/projected/ee69d559-ca87-4c15-9311-94390d23e206-kube-api-access-cmrq7\") pod \"nova-cell1-conductor-0\" (UID: \"ee69d559-ca87-4c15-9311-94390d23e206\") " pod="openstack/nova-cell1-conductor-0" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.153410 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee69d559-ca87-4c15-9311-94390d23e206-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"ee69d559-ca87-4c15-9311-94390d23e206\") " pod="openstack/nova-cell1-conductor-0" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.158060 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee69d559-ca87-4c15-9311-94390d23e206-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"ee69d559-ca87-4c15-9311-94390d23e206\") " pod="openstack/nova-cell1-conductor-0" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.167652 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee69d559-ca87-4c15-9311-94390d23e206-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"ee69d559-ca87-4c15-9311-94390d23e206\") " pod="openstack/nova-cell1-conductor-0" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.168474 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmrq7\" (UniqueName: \"kubernetes.io/projected/ee69d559-ca87-4c15-9311-94390d23e206-kube-api-access-cmrq7\") pod \"nova-cell1-conductor-0\" (UID: \"ee69d559-ca87-4c15-9311-94390d23e206\") " pod="openstack/nova-cell1-conductor-0" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.283874 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-d5cfk" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.316142 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.356394 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e955489f-8aff-49bd-848f-ac5bc3cd398d-combined-ca-bundle\") pod \"e955489f-8aff-49bd-848f-ac5bc3cd398d\" (UID: \"e955489f-8aff-49bd-848f-ac5bc3cd398d\") " Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.356526 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e955489f-8aff-49bd-848f-ac5bc3cd398d-scripts\") pod \"e955489f-8aff-49bd-848f-ac5bc3cd398d\" (UID: \"e955489f-8aff-49bd-848f-ac5bc3cd398d\") " Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.356680 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hdbgb\" (UniqueName: \"kubernetes.io/projected/e955489f-8aff-49bd-848f-ac5bc3cd398d-kube-api-access-hdbgb\") pod \"e955489f-8aff-49bd-848f-ac5bc3cd398d\" (UID: \"e955489f-8aff-49bd-848f-ac5bc3cd398d\") " Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.356720 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e955489f-8aff-49bd-848f-ac5bc3cd398d-config-data\") pod \"e955489f-8aff-49bd-848f-ac5bc3cd398d\" (UID: \"e955489f-8aff-49bd-848f-ac5bc3cd398d\") " Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.360431 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e955489f-8aff-49bd-848f-ac5bc3cd398d-scripts" (OuterVolumeSpecName: "scripts") pod "e955489f-8aff-49bd-848f-ac5bc3cd398d" (UID: "e955489f-8aff-49bd-848f-ac5bc3cd398d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.361276 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e955489f-8aff-49bd-848f-ac5bc3cd398d-kube-api-access-hdbgb" (OuterVolumeSpecName: "kube-api-access-hdbgb") pod "e955489f-8aff-49bd-848f-ac5bc3cd398d" (UID: "e955489f-8aff-49bd-848f-ac5bc3cd398d"). InnerVolumeSpecName "kube-api-access-hdbgb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.390356 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e955489f-8aff-49bd-848f-ac5bc3cd398d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e955489f-8aff-49bd-848f-ac5bc3cd398d" (UID: "e955489f-8aff-49bd-848f-ac5bc3cd398d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.392697 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e955489f-8aff-49bd-848f-ac5bc3cd398d-config-data" (OuterVolumeSpecName: "config-data") pod "e955489f-8aff-49bd-848f-ac5bc3cd398d" (UID: "e955489f-8aff-49bd-848f-ac5bc3cd398d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.459440 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hdbgb\" (UniqueName: \"kubernetes.io/projected/e955489f-8aff-49bd-848f-ac5bc3cd398d-kube-api-access-hdbgb\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.459473 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e955489f-8aff-49bd-848f-ac5bc3cd398d-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.459483 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e955489f-8aff-49bd-848f-ac5bc3cd398d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.459490 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e955489f-8aff-49bd-848f-ac5bc3cd398d-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.799537 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.933942 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-d5cfk" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.935441 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-d5cfk" event={"ID":"e955489f-8aff-49bd-848f-ac5bc3cd398d","Type":"ContainerDied","Data":"44938a0ee23a0be8cc20511ab64ef181fd72fa02df112d4fd41ea6cecb1eb2f1"} Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.935627 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="44938a0ee23a0be8cc20511ab64ef181fd72fa02df112d4fd41ea6cecb1eb2f1" Jan 22 07:23:21 crc kubenswrapper[4933]: I0122 07:23:21.936719 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"ee69d559-ca87-4c15-9311-94390d23e206","Type":"ContainerStarted","Data":"6d79d7d3172bbccaf650fd8a75a80e2f219756f19b9427ba451f2bfc9f5a9453"} Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.146251 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.146584 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="324012ea-092d-423e-a452-7e9bb177bd61" containerName="nova-api-log" containerID="cri-o://890447e30aa41e1a3d0ded31cbc5e76214485e8c91e8e0262115ef51f056ed4d" gracePeriod=30 Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.146751 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="324012ea-092d-423e-a452-7e9bb177bd61" containerName="nova-api-api" containerID="cri-o://c7e246459f93cd58e28fd9b77ec63de80a78c5131a1435dd127d773fabde7386" gracePeriod=30 Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.156424 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.162352 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="3e2edc4a-a87d-4d50-bbd1-5006af141eb7" containerName="nova-scheduler-scheduler" containerID="cri-o://00be8d04b65720f0972bc7dd15fbb35735279426416862480ca65e8873323fb2" gracePeriod=30 Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.170870 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.171107 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="9c20b00c-c33c-48a7-9f47-09275ce2249e" containerName="nova-metadata-log" containerID="cri-o://d74a0b04fa8b718a204da9d72d832d20fedc5de4f8bcaa2a9ca0d7d0f624bc5a" gracePeriod=30 Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.171622 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="9c20b00c-c33c-48a7-9f47-09275ce2249e" containerName="nova-metadata-metadata" containerID="cri-o://0f45835b5a893ef8c17772daba766623540cb99f95529c5870495724f8c2eb83" gracePeriod=30 Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.655186 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.682066 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/324012ea-092d-423e-a452-7e9bb177bd61-logs\") pod \"324012ea-092d-423e-a452-7e9bb177bd61\" (UID: \"324012ea-092d-423e-a452-7e9bb177bd61\") " Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.682142 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/324012ea-092d-423e-a452-7e9bb177bd61-combined-ca-bundle\") pod \"324012ea-092d-423e-a452-7e9bb177bd61\" (UID: \"324012ea-092d-423e-a452-7e9bb177bd61\") " Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.682230 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8pxg\" (UniqueName: \"kubernetes.io/projected/324012ea-092d-423e-a452-7e9bb177bd61-kube-api-access-w8pxg\") pod \"324012ea-092d-423e-a452-7e9bb177bd61\" (UID: \"324012ea-092d-423e-a452-7e9bb177bd61\") " Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.682359 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/324012ea-092d-423e-a452-7e9bb177bd61-config-data\") pod \"324012ea-092d-423e-a452-7e9bb177bd61\" (UID: \"324012ea-092d-423e-a452-7e9bb177bd61\") " Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.682435 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/324012ea-092d-423e-a452-7e9bb177bd61-logs" (OuterVolumeSpecName: "logs") pod "324012ea-092d-423e-a452-7e9bb177bd61" (UID: "324012ea-092d-423e-a452-7e9bb177bd61"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.682683 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/324012ea-092d-423e-a452-7e9bb177bd61-logs\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.688223 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/324012ea-092d-423e-a452-7e9bb177bd61-kube-api-access-w8pxg" (OuterVolumeSpecName: "kube-api-access-w8pxg") pod "324012ea-092d-423e-a452-7e9bb177bd61" (UID: "324012ea-092d-423e-a452-7e9bb177bd61"). InnerVolumeSpecName "kube-api-access-w8pxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.711215 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/324012ea-092d-423e-a452-7e9bb177bd61-config-data" (OuterVolumeSpecName: "config-data") pod "324012ea-092d-423e-a452-7e9bb177bd61" (UID: "324012ea-092d-423e-a452-7e9bb177bd61"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.713516 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/324012ea-092d-423e-a452-7e9bb177bd61-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "324012ea-092d-423e-a452-7e9bb177bd61" (UID: "324012ea-092d-423e-a452-7e9bb177bd61"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.760140 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.784383 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/324012ea-092d-423e-a452-7e9bb177bd61-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.784421 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8pxg\" (UniqueName: \"kubernetes.io/projected/324012ea-092d-423e-a452-7e9bb177bd61-kube-api-access-w8pxg\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.784435 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/324012ea-092d-423e-a452-7e9bb177bd61-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.885366 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c20b00c-c33c-48a7-9f47-09275ce2249e-logs\") pod \"9c20b00c-c33c-48a7-9f47-09275ce2249e\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.885478 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c20b00c-c33c-48a7-9f47-09275ce2249e-config-data\") pod \"9c20b00c-c33c-48a7-9f47-09275ce2249e\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.885523 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c20b00c-c33c-48a7-9f47-09275ce2249e-nova-metadata-tls-certs\") pod \"9c20b00c-c33c-48a7-9f47-09275ce2249e\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.885563 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cnhd4\" (UniqueName: \"kubernetes.io/projected/9c20b00c-c33c-48a7-9f47-09275ce2249e-kube-api-access-cnhd4\") pod \"9c20b00c-c33c-48a7-9f47-09275ce2249e\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.885592 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c20b00c-c33c-48a7-9f47-09275ce2249e-combined-ca-bundle\") pod \"9c20b00c-c33c-48a7-9f47-09275ce2249e\" (UID: \"9c20b00c-c33c-48a7-9f47-09275ce2249e\") " Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.885727 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c20b00c-c33c-48a7-9f47-09275ce2249e-logs" (OuterVolumeSpecName: "logs") pod "9c20b00c-c33c-48a7-9f47-09275ce2249e" (UID: "9c20b00c-c33c-48a7-9f47-09275ce2249e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.886234 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c20b00c-c33c-48a7-9f47-09275ce2249e-logs\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.890087 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c20b00c-c33c-48a7-9f47-09275ce2249e-kube-api-access-cnhd4" (OuterVolumeSpecName: "kube-api-access-cnhd4") pod "9c20b00c-c33c-48a7-9f47-09275ce2249e" (UID: "9c20b00c-c33c-48a7-9f47-09275ce2249e"). InnerVolumeSpecName "kube-api-access-cnhd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.911938 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c20b00c-c33c-48a7-9f47-09275ce2249e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9c20b00c-c33c-48a7-9f47-09275ce2249e" (UID: "9c20b00c-c33c-48a7-9f47-09275ce2249e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.916671 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c20b00c-c33c-48a7-9f47-09275ce2249e-config-data" (OuterVolumeSpecName: "config-data") pod "9c20b00c-c33c-48a7-9f47-09275ce2249e" (UID: "9c20b00c-c33c-48a7-9f47-09275ce2249e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.948148 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"ee69d559-ca87-4c15-9311-94390d23e206","Type":"ContainerStarted","Data":"d2d10fcc9df6fcab52121f0c88e0ee873602fd0543c963029078e19204ff7331"} Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.948735 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.952088 4933 generic.go:334] "Generic (PLEG): container finished" podID="9c20b00c-c33c-48a7-9f47-09275ce2249e" containerID="0f45835b5a893ef8c17772daba766623540cb99f95529c5870495724f8c2eb83" exitCode=0 Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.952127 4933 generic.go:334] "Generic (PLEG): container finished" podID="9c20b00c-c33c-48a7-9f47-09275ce2249e" containerID="d74a0b04fa8b718a204da9d72d832d20fedc5de4f8bcaa2a9ca0d7d0f624bc5a" exitCode=143 Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.952180 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9c20b00c-c33c-48a7-9f47-09275ce2249e","Type":"ContainerDied","Data":"0f45835b5a893ef8c17772daba766623540cb99f95529c5870495724f8c2eb83"} Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.952213 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9c20b00c-c33c-48a7-9f47-09275ce2249e","Type":"ContainerDied","Data":"d74a0b04fa8b718a204da9d72d832d20fedc5de4f8bcaa2a9ca0d7d0f624bc5a"} Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.952228 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9c20b00c-c33c-48a7-9f47-09275ce2249e","Type":"ContainerDied","Data":"5584e7f0895c9b65b7761b41250bd9a3f2a9f05e8afa0b726fb1c077f446fd6b"} Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.952247 4933 scope.go:117] "RemoveContainer" containerID="0f45835b5a893ef8c17772daba766623540cb99f95529c5870495724f8c2eb83" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.952382 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.955161 4933 generic.go:334] "Generic (PLEG): container finished" podID="324012ea-092d-423e-a452-7e9bb177bd61" containerID="c7e246459f93cd58e28fd9b77ec63de80a78c5131a1435dd127d773fabde7386" exitCode=0 Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.955193 4933 generic.go:334] "Generic (PLEG): container finished" podID="324012ea-092d-423e-a452-7e9bb177bd61" containerID="890447e30aa41e1a3d0ded31cbc5e76214485e8c91e8e0262115ef51f056ed4d" exitCode=143 Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.955217 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"324012ea-092d-423e-a452-7e9bb177bd61","Type":"ContainerDied","Data":"c7e246459f93cd58e28fd9b77ec63de80a78c5131a1435dd127d773fabde7386"} Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.955241 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"324012ea-092d-423e-a452-7e9bb177bd61","Type":"ContainerDied","Data":"890447e30aa41e1a3d0ded31cbc5e76214485e8c91e8e0262115ef51f056ed4d"} Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.955253 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"324012ea-092d-423e-a452-7e9bb177bd61","Type":"ContainerDied","Data":"38f404aead6b5dbeb0f1b0c98a91113c77814ca0e17261b1f38a30afe3508bb7"} Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.955309 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.958099 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c20b00c-c33c-48a7-9f47-09275ce2249e-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "9c20b00c-c33c-48a7-9f47-09275ce2249e" (UID: "9c20b00c-c33c-48a7-9f47-09275ce2249e"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.968450 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.968431629 podStartE2EDuration="2.968431629s" podCreationTimestamp="2026-01-22 07:23:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:23:22.965507838 +0000 UTC m=+5850.802633211" watchObservedRunningTime="2026-01-22 07:23:22.968431629 +0000 UTC m=+5850.805556982" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.980657 4933 scope.go:117] "RemoveContainer" containerID="d74a0b04fa8b718a204da9d72d832d20fedc5de4f8bcaa2a9ca0d7d0f624bc5a" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.998163 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c20b00c-c33c-48a7-9f47-09275ce2249e-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.998197 4933 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c20b00c-c33c-48a7-9f47-09275ce2249e-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.998212 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cnhd4\" (UniqueName: \"kubernetes.io/projected/9c20b00c-c33c-48a7-9f47-09275ce2249e-kube-api-access-cnhd4\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:22 crc kubenswrapper[4933]: I0122 07:23:22.998222 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c20b00c-c33c-48a7-9f47-09275ce2249e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.011273 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.023528 4933 scope.go:117] "RemoveContainer" containerID="0f45835b5a893ef8c17772daba766623540cb99f95529c5870495724f8c2eb83" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.023653 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 22 07:23:23 crc kubenswrapper[4933]: E0122 07:23:23.023872 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f45835b5a893ef8c17772daba766623540cb99f95529c5870495724f8c2eb83\": container with ID starting with 0f45835b5a893ef8c17772daba766623540cb99f95529c5870495724f8c2eb83 not found: ID does not exist" containerID="0f45835b5a893ef8c17772daba766623540cb99f95529c5870495724f8c2eb83" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.023903 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f45835b5a893ef8c17772daba766623540cb99f95529c5870495724f8c2eb83"} err="failed to get container status \"0f45835b5a893ef8c17772daba766623540cb99f95529c5870495724f8c2eb83\": rpc error: code = NotFound desc = could not find container \"0f45835b5a893ef8c17772daba766623540cb99f95529c5870495724f8c2eb83\": container with ID starting with 0f45835b5a893ef8c17772daba766623540cb99f95529c5870495724f8c2eb83 not found: ID does not exist" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.023926 4933 scope.go:117] "RemoveContainer" containerID="d74a0b04fa8b718a204da9d72d832d20fedc5de4f8bcaa2a9ca0d7d0f624bc5a" Jan 22 07:23:23 crc kubenswrapper[4933]: E0122 07:23:23.024172 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d74a0b04fa8b718a204da9d72d832d20fedc5de4f8bcaa2a9ca0d7d0f624bc5a\": container with ID starting with d74a0b04fa8b718a204da9d72d832d20fedc5de4f8bcaa2a9ca0d7d0f624bc5a not found: ID does not exist" containerID="d74a0b04fa8b718a204da9d72d832d20fedc5de4f8bcaa2a9ca0d7d0f624bc5a" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.024194 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d74a0b04fa8b718a204da9d72d832d20fedc5de4f8bcaa2a9ca0d7d0f624bc5a"} err="failed to get container status \"d74a0b04fa8b718a204da9d72d832d20fedc5de4f8bcaa2a9ca0d7d0f624bc5a\": rpc error: code = NotFound desc = could not find container \"d74a0b04fa8b718a204da9d72d832d20fedc5de4f8bcaa2a9ca0d7d0f624bc5a\": container with ID starting with d74a0b04fa8b718a204da9d72d832d20fedc5de4f8bcaa2a9ca0d7d0f624bc5a not found: ID does not exist" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.024207 4933 scope.go:117] "RemoveContainer" containerID="0f45835b5a893ef8c17772daba766623540cb99f95529c5870495724f8c2eb83" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.024424 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f45835b5a893ef8c17772daba766623540cb99f95529c5870495724f8c2eb83"} err="failed to get container status \"0f45835b5a893ef8c17772daba766623540cb99f95529c5870495724f8c2eb83\": rpc error: code = NotFound desc = could not find container \"0f45835b5a893ef8c17772daba766623540cb99f95529c5870495724f8c2eb83\": container with ID starting with 0f45835b5a893ef8c17772daba766623540cb99f95529c5870495724f8c2eb83 not found: ID does not exist" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.024445 4933 scope.go:117] "RemoveContainer" containerID="d74a0b04fa8b718a204da9d72d832d20fedc5de4f8bcaa2a9ca0d7d0f624bc5a" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.024613 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d74a0b04fa8b718a204da9d72d832d20fedc5de4f8bcaa2a9ca0d7d0f624bc5a"} err="failed to get container status \"d74a0b04fa8b718a204da9d72d832d20fedc5de4f8bcaa2a9ca0d7d0f624bc5a\": rpc error: code = NotFound desc = could not find container \"d74a0b04fa8b718a204da9d72d832d20fedc5de4f8bcaa2a9ca0d7d0f624bc5a\": container with ID starting with d74a0b04fa8b718a204da9d72d832d20fedc5de4f8bcaa2a9ca0d7d0f624bc5a not found: ID does not exist" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.024631 4933 scope.go:117] "RemoveContainer" containerID="c7e246459f93cd58e28fd9b77ec63de80a78c5131a1435dd127d773fabde7386" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.039277 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 22 07:23:23 crc kubenswrapper[4933]: E0122 07:23:23.040093 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c20b00c-c33c-48a7-9f47-09275ce2249e" containerName="nova-metadata-log" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.040214 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c20b00c-c33c-48a7-9f47-09275ce2249e" containerName="nova-metadata-log" Jan 22 07:23:23 crc kubenswrapper[4933]: E0122 07:23:23.040302 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e955489f-8aff-49bd-848f-ac5bc3cd398d" containerName="nova-manage" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.040384 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e955489f-8aff-49bd-848f-ac5bc3cd398d" containerName="nova-manage" Jan 22 07:23:23 crc kubenswrapper[4933]: E0122 07:23:23.040471 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="324012ea-092d-423e-a452-7e9bb177bd61" containerName="nova-api-log" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.040549 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="324012ea-092d-423e-a452-7e9bb177bd61" containerName="nova-api-log" Jan 22 07:23:23 crc kubenswrapper[4933]: E0122 07:23:23.040671 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="324012ea-092d-423e-a452-7e9bb177bd61" containerName="nova-api-api" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.040750 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="324012ea-092d-423e-a452-7e9bb177bd61" containerName="nova-api-api" Jan 22 07:23:23 crc kubenswrapper[4933]: E0122 07:23:23.040836 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c20b00c-c33c-48a7-9f47-09275ce2249e" containerName="nova-metadata-metadata" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.040917 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c20b00c-c33c-48a7-9f47-09275ce2249e" containerName="nova-metadata-metadata" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.041294 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="324012ea-092d-423e-a452-7e9bb177bd61" containerName="nova-api-api" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.041385 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="324012ea-092d-423e-a452-7e9bb177bd61" containerName="nova-api-log" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.041504 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c20b00c-c33c-48a7-9f47-09275ce2249e" containerName="nova-metadata-log" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.041592 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="e955489f-8aff-49bd-848f-ac5bc3cd398d" containerName="nova-manage" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.041675 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c20b00c-c33c-48a7-9f47-09275ce2249e" containerName="nova-metadata-metadata" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.042921 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.043110 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.050969 4933 scope.go:117] "RemoveContainer" containerID="890447e30aa41e1a3d0ded31cbc5e76214485e8c91e8e0262115ef51f056ed4d" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.052481 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.093775 4933 scope.go:117] "RemoveContainer" containerID="c7e246459f93cd58e28fd9b77ec63de80a78c5131a1435dd127d773fabde7386" Jan 22 07:23:23 crc kubenswrapper[4933]: E0122 07:23:23.094225 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7e246459f93cd58e28fd9b77ec63de80a78c5131a1435dd127d773fabde7386\": container with ID starting with c7e246459f93cd58e28fd9b77ec63de80a78c5131a1435dd127d773fabde7386 not found: ID does not exist" containerID="c7e246459f93cd58e28fd9b77ec63de80a78c5131a1435dd127d773fabde7386" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.094286 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7e246459f93cd58e28fd9b77ec63de80a78c5131a1435dd127d773fabde7386"} err="failed to get container status \"c7e246459f93cd58e28fd9b77ec63de80a78c5131a1435dd127d773fabde7386\": rpc error: code = NotFound desc = could not find container \"c7e246459f93cd58e28fd9b77ec63de80a78c5131a1435dd127d773fabde7386\": container with ID starting with c7e246459f93cd58e28fd9b77ec63de80a78c5131a1435dd127d773fabde7386 not found: ID does not exist" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.094314 4933 scope.go:117] "RemoveContainer" containerID="890447e30aa41e1a3d0ded31cbc5e76214485e8c91e8e0262115ef51f056ed4d" Jan 22 07:23:23 crc kubenswrapper[4933]: E0122 07:23:23.094671 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"890447e30aa41e1a3d0ded31cbc5e76214485e8c91e8e0262115ef51f056ed4d\": container with ID starting with 890447e30aa41e1a3d0ded31cbc5e76214485e8c91e8e0262115ef51f056ed4d not found: ID does not exist" containerID="890447e30aa41e1a3d0ded31cbc5e76214485e8c91e8e0262115ef51f056ed4d" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.094702 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"890447e30aa41e1a3d0ded31cbc5e76214485e8c91e8e0262115ef51f056ed4d"} err="failed to get container status \"890447e30aa41e1a3d0ded31cbc5e76214485e8c91e8e0262115ef51f056ed4d\": rpc error: code = NotFound desc = could not find container \"890447e30aa41e1a3d0ded31cbc5e76214485e8c91e8e0262115ef51f056ed4d\": container with ID starting with 890447e30aa41e1a3d0ded31cbc5e76214485e8c91e8e0262115ef51f056ed4d not found: ID does not exist" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.094723 4933 scope.go:117] "RemoveContainer" containerID="c7e246459f93cd58e28fd9b77ec63de80a78c5131a1435dd127d773fabde7386" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.094993 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7e246459f93cd58e28fd9b77ec63de80a78c5131a1435dd127d773fabde7386"} err="failed to get container status \"c7e246459f93cd58e28fd9b77ec63de80a78c5131a1435dd127d773fabde7386\": rpc error: code = NotFound desc = could not find container \"c7e246459f93cd58e28fd9b77ec63de80a78c5131a1435dd127d773fabde7386\": container with ID starting with c7e246459f93cd58e28fd9b77ec63de80a78c5131a1435dd127d773fabde7386 not found: ID does not exist" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.095031 4933 scope.go:117] "RemoveContainer" containerID="890447e30aa41e1a3d0ded31cbc5e76214485e8c91e8e0262115ef51f056ed4d" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.095355 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"890447e30aa41e1a3d0ded31cbc5e76214485e8c91e8e0262115ef51f056ed4d"} err="failed to get container status \"890447e30aa41e1a3d0ded31cbc5e76214485e8c91e8e0262115ef51f056ed4d\": rpc error: code = NotFound desc = could not find container \"890447e30aa41e1a3d0ded31cbc5e76214485e8c91e8e0262115ef51f056ed4d\": container with ID starting with 890447e30aa41e1a3d0ded31cbc5e76214485e8c91e8e0262115ef51f056ed4d not found: ID does not exist" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.201359 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a34fe3ba-7acd-4722-a155-807c3c39b343-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a34fe3ba-7acd-4722-a155-807c3c39b343\") " pod="openstack/nova-api-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.201476 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a34fe3ba-7acd-4722-a155-807c3c39b343-config-data\") pod \"nova-api-0\" (UID: \"a34fe3ba-7acd-4722-a155-807c3c39b343\") " pod="openstack/nova-api-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.202089 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqjqs\" (UniqueName: \"kubernetes.io/projected/a34fe3ba-7acd-4722-a155-807c3c39b343-kube-api-access-mqjqs\") pod \"nova-api-0\" (UID: \"a34fe3ba-7acd-4722-a155-807c3c39b343\") " pod="openstack/nova-api-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.202237 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a34fe3ba-7acd-4722-a155-807c3c39b343-logs\") pod \"nova-api-0\" (UID: \"a34fe3ba-7acd-4722-a155-807c3c39b343\") " pod="openstack/nova-api-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.304128 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a34fe3ba-7acd-4722-a155-807c3c39b343-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a34fe3ba-7acd-4722-a155-807c3c39b343\") " pod="openstack/nova-api-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.304203 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a34fe3ba-7acd-4722-a155-807c3c39b343-config-data\") pod \"nova-api-0\" (UID: \"a34fe3ba-7acd-4722-a155-807c3c39b343\") " pod="openstack/nova-api-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.304293 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqjqs\" (UniqueName: \"kubernetes.io/projected/a34fe3ba-7acd-4722-a155-807c3c39b343-kube-api-access-mqjqs\") pod \"nova-api-0\" (UID: \"a34fe3ba-7acd-4722-a155-807c3c39b343\") " pod="openstack/nova-api-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.304395 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a34fe3ba-7acd-4722-a155-807c3c39b343-logs\") pod \"nova-api-0\" (UID: \"a34fe3ba-7acd-4722-a155-807c3c39b343\") " pod="openstack/nova-api-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.304915 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a34fe3ba-7acd-4722-a155-807c3c39b343-logs\") pod \"nova-api-0\" (UID: \"a34fe3ba-7acd-4722-a155-807c3c39b343\") " pod="openstack/nova-api-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.316970 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a34fe3ba-7acd-4722-a155-807c3c39b343-config-data\") pod \"nova-api-0\" (UID: \"a34fe3ba-7acd-4722-a155-807c3c39b343\") " pod="openstack/nova-api-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.317051 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.320675 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a34fe3ba-7acd-4722-a155-807c3c39b343-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a34fe3ba-7acd-4722-a155-807c3c39b343\") " pod="openstack/nova-api-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.326854 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.344027 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqjqs\" (UniqueName: \"kubernetes.io/projected/a34fe3ba-7acd-4722-a155-807c3c39b343-kube-api-access-mqjqs\") pod \"nova-api-0\" (UID: \"a34fe3ba-7acd-4722-a155-807c3c39b343\") " pod="openstack/nova-api-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.362873 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.366378 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.368784 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.368994 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.379621 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.379899 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.460262 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.508963 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-config-data\") pod \"nova-metadata-0\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " pod="openstack/nova-metadata-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.509000 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " pod="openstack/nova-metadata-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.509098 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-logs\") pod \"nova-metadata-0\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " pod="openstack/nova-metadata-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.509116 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkxvr\" (UniqueName: \"kubernetes.io/projected/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-kube-api-access-xkxvr\") pod \"nova-metadata-0\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " pod="openstack/nova-metadata-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.509143 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " pod="openstack/nova-metadata-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.529816 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58c6cddd7-ks4fh"] Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.530229 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" podUID="f3f3b160-8f16-4d47-9658-893cc951620f" containerName="dnsmasq-dns" containerID="cri-o://9bf50eb554c05342fca0f27b2aea099b3dd6af03e05946538ce521661f68de3b" gracePeriod=10 Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.611517 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-logs\") pod \"nova-metadata-0\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " pod="openstack/nova-metadata-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.611584 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkxvr\" (UniqueName: \"kubernetes.io/projected/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-kube-api-access-xkxvr\") pod \"nova-metadata-0\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " pod="openstack/nova-metadata-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.611629 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " pod="openstack/nova-metadata-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.611764 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-config-data\") pod \"nova-metadata-0\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " pod="openstack/nova-metadata-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.611780 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " pod="openstack/nova-metadata-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.612015 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-logs\") pod \"nova-metadata-0\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " pod="openstack/nova-metadata-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.619188 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" podUID="f3f3b160-8f16-4d47-9658-893cc951620f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.1.69:5353: connect: connection refused" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.619881 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " pod="openstack/nova-metadata-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.620503 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " pod="openstack/nova-metadata-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.637224 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkxvr\" (UniqueName: \"kubernetes.io/projected/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-kube-api-access-xkxvr\") pod \"nova-metadata-0\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " pod="openstack/nova-metadata-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.637582 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-config-data\") pod \"nova-metadata-0\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " pod="openstack/nova-metadata-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.667649 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.809282 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.928566 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 07:23:23 crc kubenswrapper[4933]: W0122 07:23:23.932901 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda34fe3ba_7acd_4722_a155_807c3c39b343.slice/crio-3752e1cceb67c2c76580951cc126fa03a1b15b858fbf58b0407c924a5e727702 WatchSource:0}: Error finding container 3752e1cceb67c2c76580951cc126fa03a1b15b858fbf58b0407c924a5e727702: Status 404 returned error can't find the container with id 3752e1cceb67c2c76580951cc126fa03a1b15b858fbf58b0407c924a5e727702 Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.978512 4933 generic.go:334] "Generic (PLEG): container finished" podID="f3f3b160-8f16-4d47-9658-893cc951620f" containerID="9bf50eb554c05342fca0f27b2aea099b3dd6af03e05946538ce521661f68de3b" exitCode=0 Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.978606 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" event={"ID":"f3f3b160-8f16-4d47-9658-893cc951620f","Type":"ContainerDied","Data":"9bf50eb554c05342fca0f27b2aea099b3dd6af03e05946538ce521661f68de3b"} Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.980784 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a34fe3ba-7acd-4722-a155-807c3c39b343","Type":"ContainerStarted","Data":"3752e1cceb67c2c76580951cc126fa03a1b15b858fbf58b0407c924a5e727702"} Jan 22 07:23:23 crc kubenswrapper[4933]: I0122 07:23:23.984001 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.125123 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-dns-svc\") pod \"f3f3b160-8f16-4d47-9658-893cc951620f\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.125457 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dt6ws\" (UniqueName: \"kubernetes.io/projected/f3f3b160-8f16-4d47-9658-893cc951620f-kube-api-access-dt6ws\") pod \"f3f3b160-8f16-4d47-9658-893cc951620f\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.125481 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-config\") pod \"f3f3b160-8f16-4d47-9658-893cc951620f\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.125517 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-ovsdbserver-sb\") pod \"f3f3b160-8f16-4d47-9658-893cc951620f\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.125618 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-ovsdbserver-nb\") pod \"f3f3b160-8f16-4d47-9658-893cc951620f\" (UID: \"f3f3b160-8f16-4d47-9658-893cc951620f\") " Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.143879 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3f3b160-8f16-4d47-9658-893cc951620f-kube-api-access-dt6ws" (OuterVolumeSpecName: "kube-api-access-dt6ws") pod "f3f3b160-8f16-4d47-9658-893cc951620f" (UID: "f3f3b160-8f16-4d47-9658-893cc951620f"). InnerVolumeSpecName "kube-api-access-dt6ws". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.189610 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f3f3b160-8f16-4d47-9658-893cc951620f" (UID: "f3f3b160-8f16-4d47-9658-893cc951620f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.199657 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f3f3b160-8f16-4d47-9658-893cc951620f" (UID: "f3f3b160-8f16-4d47-9658-893cc951620f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.202984 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f3f3b160-8f16-4d47-9658-893cc951620f" (UID: "f3f3b160-8f16-4d47-9658-893cc951620f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.216410 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-config" (OuterVolumeSpecName: "config") pod "f3f3b160-8f16-4d47-9658-893cc951620f" (UID: "f3f3b160-8f16-4d47-9658-893cc951620f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.229345 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.229415 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.229432 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dt6ws\" (UniqueName: \"kubernetes.io/projected/f3f3b160-8f16-4d47-9658-893cc951620f-kube-api-access-dt6ws\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.229474 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.229489 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f3f3b160-8f16-4d47-9658-893cc951620f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.347153 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:24 crc kubenswrapper[4933]: W0122 07:23:24.354259 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podde6dd3f0_8aef_4a9b_bdd0_90111619f8e3.slice/crio-3796831db7e0c21e48224a616f2db3e305631b8e573abac3e036c393513c3d05 WatchSource:0}: Error finding container 3796831db7e0c21e48224a616f2db3e305631b8e573abac3e036c393513c3d05: Status 404 returned error can't find the container with id 3796831db7e0c21e48224a616f2db3e305631b8e573abac3e036c393513c3d05 Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.502492 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="324012ea-092d-423e-a452-7e9bb177bd61" path="/var/lib/kubelet/pods/324012ea-092d-423e-a452-7e9bb177bd61/volumes" Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.503826 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c20b00c-c33c-48a7-9f47-09275ce2249e" path="/var/lib/kubelet/pods/9c20b00c-c33c-48a7-9f47-09275ce2249e/volumes" Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.990682 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a34fe3ba-7acd-4722-a155-807c3c39b343","Type":"ContainerStarted","Data":"295de2c3e07a1e6b164e007596aa2ecd889911e74998d3a2a38bf3203aae99e1"} Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.990723 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a34fe3ba-7acd-4722-a155-807c3c39b343","Type":"ContainerStarted","Data":"ec5d6dd9b89176199492224320b2021a26fd9a70569d0db9a56fd068dab85b34"} Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.992349 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3","Type":"ContainerStarted","Data":"74944f2f8c4a7049e1d377bcbfa00fe176131efd6215b01d78a10f3221b1dfd5"} Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.992408 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3","Type":"ContainerStarted","Data":"3796831db7e0c21e48224a616f2db3e305631b8e573abac3e036c393513c3d05"} Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.994833 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" event={"ID":"f3f3b160-8f16-4d47-9658-893cc951620f","Type":"ContainerDied","Data":"3237ce6bba4795f5c7ff53809cc7b7c4efa69f36e2d7b9f7ca71e8838c83f62c"} Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.994877 4933 scope.go:117] "RemoveContainer" containerID="9bf50eb554c05342fca0f27b2aea099b3dd6af03e05946538ce521661f68de3b" Jan 22 07:23:24 crc kubenswrapper[4933]: I0122 07:23:24.994899 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58c6cddd7-ks4fh" Jan 22 07:23:25 crc kubenswrapper[4933]: I0122 07:23:25.018295 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.018275281 podStartE2EDuration="3.018275281s" podCreationTimestamp="2026-01-22 07:23:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:23:25.015501554 +0000 UTC m=+5852.852626937" watchObservedRunningTime="2026-01-22 07:23:25.018275281 +0000 UTC m=+5852.855400634" Jan 22 07:23:25 crc kubenswrapper[4933]: I0122 07:23:25.033246 4933 scope.go:117] "RemoveContainer" containerID="e6aa2a7025bc620a28af97bc40861c665624319305ac963a438363c949aaad6a" Jan 22 07:23:25 crc kubenswrapper[4933]: I0122 07:23:25.044435 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58c6cddd7-ks4fh"] Jan 22 07:23:25 crc kubenswrapper[4933]: I0122 07:23:25.054551 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-58c6cddd7-ks4fh"] Jan 22 07:23:26 crc kubenswrapper[4933]: I0122 07:23:26.502663 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3f3b160-8f16-4d47-9658-893cc951620f" path="/var/lib/kubelet/pods/f3f3b160-8f16-4d47-9658-893cc951620f/volumes" Jan 22 07:23:27 crc kubenswrapper[4933]: I0122 07:23:27.019605 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3","Type":"ContainerStarted","Data":"84ba846608649e106c75ff05662414b6d2bdeba99bc4bacae9896c9010183c81"} Jan 22 07:23:27 crc kubenswrapper[4933]: I0122 07:23:27.047231 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=4.047212615 podStartE2EDuration="4.047212615s" podCreationTimestamp="2026-01-22 07:23:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:23:27.044807717 +0000 UTC m=+5854.881933130" watchObservedRunningTime="2026-01-22 07:23:27.047212615 +0000 UTC m=+5854.884337968" Jan 22 07:23:28 crc kubenswrapper[4933]: I0122 07:23:28.666939 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:28 crc kubenswrapper[4933]: I0122 07:23:28.687278 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:28 crc kubenswrapper[4933]: I0122 07:23:28.809555 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 07:23:28 crc kubenswrapper[4933]: I0122 07:23:28.809634 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 07:23:29 crc kubenswrapper[4933]: I0122 07:23:29.051665 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 22 07:23:31 crc kubenswrapper[4933]: I0122 07:23:31.342242 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 22 07:23:31 crc kubenswrapper[4933]: I0122 07:23:31.848197 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-m8wzk"] Jan 22 07:23:31 crc kubenswrapper[4933]: E0122 07:23:31.848985 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3f3b160-8f16-4d47-9658-893cc951620f" containerName="dnsmasq-dns" Jan 22 07:23:31 crc kubenswrapper[4933]: I0122 07:23:31.849014 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3f3b160-8f16-4d47-9658-893cc951620f" containerName="dnsmasq-dns" Jan 22 07:23:31 crc kubenswrapper[4933]: E0122 07:23:31.849044 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3f3b160-8f16-4d47-9658-893cc951620f" containerName="init" Jan 22 07:23:31 crc kubenswrapper[4933]: I0122 07:23:31.849058 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3f3b160-8f16-4d47-9658-893cc951620f" containerName="init" Jan 22 07:23:31 crc kubenswrapper[4933]: I0122 07:23:31.849578 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3f3b160-8f16-4d47-9658-893cc951620f" containerName="dnsmasq-dns" Jan 22 07:23:31 crc kubenswrapper[4933]: I0122 07:23:31.850563 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-m8wzk" Jan 22 07:23:31 crc kubenswrapper[4933]: I0122 07:23:31.853980 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 22 07:23:31 crc kubenswrapper[4933]: I0122 07:23:31.854299 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 22 07:23:31 crc kubenswrapper[4933]: I0122 07:23:31.861440 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-m8wzk"] Jan 22 07:23:31 crc kubenswrapper[4933]: I0122 07:23:31.998124 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6759c870-c931-4a37-9062-fde2dfada3f1-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-m8wzk\" (UID: \"6759c870-c931-4a37-9062-fde2dfada3f1\") " pod="openstack/nova-cell1-cell-mapping-m8wzk" Jan 22 07:23:31 crc kubenswrapper[4933]: I0122 07:23:31.998259 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6759c870-c931-4a37-9062-fde2dfada3f1-scripts\") pod \"nova-cell1-cell-mapping-m8wzk\" (UID: \"6759c870-c931-4a37-9062-fde2dfada3f1\") " pod="openstack/nova-cell1-cell-mapping-m8wzk" Jan 22 07:23:31 crc kubenswrapper[4933]: I0122 07:23:31.998323 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4hgr5\" (UniqueName: \"kubernetes.io/projected/6759c870-c931-4a37-9062-fde2dfada3f1-kube-api-access-4hgr5\") pod \"nova-cell1-cell-mapping-m8wzk\" (UID: \"6759c870-c931-4a37-9062-fde2dfada3f1\") " pod="openstack/nova-cell1-cell-mapping-m8wzk" Jan 22 07:23:31 crc kubenswrapper[4933]: I0122 07:23:31.998513 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6759c870-c931-4a37-9062-fde2dfada3f1-config-data\") pod \"nova-cell1-cell-mapping-m8wzk\" (UID: \"6759c870-c931-4a37-9062-fde2dfada3f1\") " pod="openstack/nova-cell1-cell-mapping-m8wzk" Jan 22 07:23:32 crc kubenswrapper[4933]: I0122 07:23:32.100051 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6759c870-c931-4a37-9062-fde2dfada3f1-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-m8wzk\" (UID: \"6759c870-c931-4a37-9062-fde2dfada3f1\") " pod="openstack/nova-cell1-cell-mapping-m8wzk" Jan 22 07:23:32 crc kubenswrapper[4933]: I0122 07:23:32.100688 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6759c870-c931-4a37-9062-fde2dfada3f1-scripts\") pod \"nova-cell1-cell-mapping-m8wzk\" (UID: \"6759c870-c931-4a37-9062-fde2dfada3f1\") " pod="openstack/nova-cell1-cell-mapping-m8wzk" Jan 22 07:23:32 crc kubenswrapper[4933]: I0122 07:23:32.100771 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4hgr5\" (UniqueName: \"kubernetes.io/projected/6759c870-c931-4a37-9062-fde2dfada3f1-kube-api-access-4hgr5\") pod \"nova-cell1-cell-mapping-m8wzk\" (UID: \"6759c870-c931-4a37-9062-fde2dfada3f1\") " pod="openstack/nova-cell1-cell-mapping-m8wzk" Jan 22 07:23:32 crc kubenswrapper[4933]: I0122 07:23:32.100856 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6759c870-c931-4a37-9062-fde2dfada3f1-config-data\") pod \"nova-cell1-cell-mapping-m8wzk\" (UID: \"6759c870-c931-4a37-9062-fde2dfada3f1\") " pod="openstack/nova-cell1-cell-mapping-m8wzk" Jan 22 07:23:32 crc kubenswrapper[4933]: I0122 07:23:32.107408 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6759c870-c931-4a37-9062-fde2dfada3f1-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-m8wzk\" (UID: \"6759c870-c931-4a37-9062-fde2dfada3f1\") " pod="openstack/nova-cell1-cell-mapping-m8wzk" Jan 22 07:23:32 crc kubenswrapper[4933]: I0122 07:23:32.108605 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6759c870-c931-4a37-9062-fde2dfada3f1-config-data\") pod \"nova-cell1-cell-mapping-m8wzk\" (UID: \"6759c870-c931-4a37-9062-fde2dfada3f1\") " pod="openstack/nova-cell1-cell-mapping-m8wzk" Jan 22 07:23:32 crc kubenswrapper[4933]: I0122 07:23:32.117786 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4hgr5\" (UniqueName: \"kubernetes.io/projected/6759c870-c931-4a37-9062-fde2dfada3f1-kube-api-access-4hgr5\") pod \"nova-cell1-cell-mapping-m8wzk\" (UID: \"6759c870-c931-4a37-9062-fde2dfada3f1\") " pod="openstack/nova-cell1-cell-mapping-m8wzk" Jan 22 07:23:32 crc kubenswrapper[4933]: I0122 07:23:32.121643 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6759c870-c931-4a37-9062-fde2dfada3f1-scripts\") pod \"nova-cell1-cell-mapping-m8wzk\" (UID: \"6759c870-c931-4a37-9062-fde2dfada3f1\") " pod="openstack/nova-cell1-cell-mapping-m8wzk" Jan 22 07:23:32 crc kubenswrapper[4933]: I0122 07:23:32.175296 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-m8wzk" Jan 22 07:23:32 crc kubenswrapper[4933]: W0122 07:23:32.685970 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6759c870_c931_4a37_9062_fde2dfada3f1.slice/crio-2ef9bed9776e20836c64d4b770ee08fef6cafa3a7057a4cf3b00f4182837ea42 WatchSource:0}: Error finding container 2ef9bed9776e20836c64d4b770ee08fef6cafa3a7057a4cf3b00f4182837ea42: Status 404 returned error can't find the container with id 2ef9bed9776e20836c64d4b770ee08fef6cafa3a7057a4cf3b00f4182837ea42 Jan 22 07:23:32 crc kubenswrapper[4933]: I0122 07:23:32.688329 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-m8wzk"] Jan 22 07:23:33 crc kubenswrapper[4933]: I0122 07:23:33.074021 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-m8wzk" event={"ID":"6759c870-c931-4a37-9062-fde2dfada3f1","Type":"ContainerStarted","Data":"3c0938bfe5d57f0b82fe52c68f493cdafaf0b09fd545d01e19e95ea0c36345ed"} Jan 22 07:23:33 crc kubenswrapper[4933]: I0122 07:23:33.074067 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-m8wzk" event={"ID":"6759c870-c931-4a37-9062-fde2dfada3f1","Type":"ContainerStarted","Data":"2ef9bed9776e20836c64d4b770ee08fef6cafa3a7057a4cf3b00f4182837ea42"} Jan 22 07:23:33 crc kubenswrapper[4933]: I0122 07:23:33.092962 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-m8wzk" podStartSLOduration=2.092942057 podStartE2EDuration="2.092942057s" podCreationTimestamp="2026-01-22 07:23:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:23:33.086696645 +0000 UTC m=+5860.923822018" watchObservedRunningTime="2026-01-22 07:23:33.092942057 +0000 UTC m=+5860.930067410" Jan 22 07:23:33 crc kubenswrapper[4933]: I0122 07:23:33.380606 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 07:23:33 crc kubenswrapper[4933]: I0122 07:23:33.380991 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 07:23:33 crc kubenswrapper[4933]: I0122 07:23:33.810292 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 22 07:23:33 crc kubenswrapper[4933]: I0122 07:23:33.810379 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 22 07:23:34 crc kubenswrapper[4933]: I0122 07:23:34.463246 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a34fe3ba-7acd-4722-a155-807c3c39b343" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.90:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 07:23:34 crc kubenswrapper[4933]: I0122 07:23:34.463302 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a34fe3ba-7acd-4722-a155-807c3c39b343" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.90:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 07:23:34 crc kubenswrapper[4933]: I0122 07:23:34.825376 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="de6dd3f0-8aef-4a9b-bdd0-90111619f8e3" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.91:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 07:23:34 crc kubenswrapper[4933]: I0122 07:23:34.825462 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="de6dd3f0-8aef-4a9b-bdd0-90111619f8e3" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.91:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 07:23:38 crc kubenswrapper[4933]: I0122 07:23:38.124902 4933 generic.go:334] "Generic (PLEG): container finished" podID="6759c870-c931-4a37-9062-fde2dfada3f1" containerID="3c0938bfe5d57f0b82fe52c68f493cdafaf0b09fd545d01e19e95ea0c36345ed" exitCode=0 Jan 22 07:23:38 crc kubenswrapper[4933]: I0122 07:23:38.125000 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-m8wzk" event={"ID":"6759c870-c931-4a37-9062-fde2dfada3f1","Type":"ContainerDied","Data":"3c0938bfe5d57f0b82fe52c68f493cdafaf0b09fd545d01e19e95ea0c36345ed"} Jan 22 07:23:39 crc kubenswrapper[4933]: I0122 07:23:39.544569 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-m8wzk" Jan 22 07:23:39 crc kubenswrapper[4933]: I0122 07:23:39.675321 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6759c870-c931-4a37-9062-fde2dfada3f1-combined-ca-bundle\") pod \"6759c870-c931-4a37-9062-fde2dfada3f1\" (UID: \"6759c870-c931-4a37-9062-fde2dfada3f1\") " Jan 22 07:23:39 crc kubenswrapper[4933]: I0122 07:23:39.675395 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6759c870-c931-4a37-9062-fde2dfada3f1-config-data\") pod \"6759c870-c931-4a37-9062-fde2dfada3f1\" (UID: \"6759c870-c931-4a37-9062-fde2dfada3f1\") " Jan 22 07:23:39 crc kubenswrapper[4933]: I0122 07:23:39.675499 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4hgr5\" (UniqueName: \"kubernetes.io/projected/6759c870-c931-4a37-9062-fde2dfada3f1-kube-api-access-4hgr5\") pod \"6759c870-c931-4a37-9062-fde2dfada3f1\" (UID: \"6759c870-c931-4a37-9062-fde2dfada3f1\") " Jan 22 07:23:39 crc kubenswrapper[4933]: I0122 07:23:39.675607 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6759c870-c931-4a37-9062-fde2dfada3f1-scripts\") pod \"6759c870-c931-4a37-9062-fde2dfada3f1\" (UID: \"6759c870-c931-4a37-9062-fde2dfada3f1\") " Jan 22 07:23:39 crc kubenswrapper[4933]: I0122 07:23:39.681801 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6759c870-c931-4a37-9062-fde2dfada3f1-kube-api-access-4hgr5" (OuterVolumeSpecName: "kube-api-access-4hgr5") pod "6759c870-c931-4a37-9062-fde2dfada3f1" (UID: "6759c870-c931-4a37-9062-fde2dfada3f1"). InnerVolumeSpecName "kube-api-access-4hgr5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:23:39 crc kubenswrapper[4933]: I0122 07:23:39.681908 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6759c870-c931-4a37-9062-fde2dfada3f1-scripts" (OuterVolumeSpecName: "scripts") pod "6759c870-c931-4a37-9062-fde2dfada3f1" (UID: "6759c870-c931-4a37-9062-fde2dfada3f1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:39 crc kubenswrapper[4933]: I0122 07:23:39.709778 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6759c870-c931-4a37-9062-fde2dfada3f1-config-data" (OuterVolumeSpecName: "config-data") pod "6759c870-c931-4a37-9062-fde2dfada3f1" (UID: "6759c870-c931-4a37-9062-fde2dfada3f1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:39 crc kubenswrapper[4933]: I0122 07:23:39.710439 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6759c870-c931-4a37-9062-fde2dfada3f1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6759c870-c931-4a37-9062-fde2dfada3f1" (UID: "6759c870-c931-4a37-9062-fde2dfada3f1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:39 crc kubenswrapper[4933]: I0122 07:23:39.777738 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4hgr5\" (UniqueName: \"kubernetes.io/projected/6759c870-c931-4a37-9062-fde2dfada3f1-kube-api-access-4hgr5\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:39 crc kubenswrapper[4933]: I0122 07:23:39.777781 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6759c870-c931-4a37-9062-fde2dfada3f1-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:39 crc kubenswrapper[4933]: I0122 07:23:39.777797 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6759c870-c931-4a37-9062-fde2dfada3f1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:39 crc kubenswrapper[4933]: I0122 07:23:39.777806 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6759c870-c931-4a37-9062-fde2dfada3f1-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:40 crc kubenswrapper[4933]: I0122 07:23:40.160600 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-m8wzk" event={"ID":"6759c870-c931-4a37-9062-fde2dfada3f1","Type":"ContainerDied","Data":"2ef9bed9776e20836c64d4b770ee08fef6cafa3a7057a4cf3b00f4182837ea42"} Jan 22 07:23:40 crc kubenswrapper[4933]: I0122 07:23:40.160730 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2ef9bed9776e20836c64d4b770ee08fef6cafa3a7057a4cf3b00f4182837ea42" Jan 22 07:23:40 crc kubenswrapper[4933]: I0122 07:23:40.161109 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-m8wzk" Jan 22 07:23:40 crc kubenswrapper[4933]: I0122 07:23:40.340439 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 07:23:40 crc kubenswrapper[4933]: I0122 07:23:40.340746 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="a34fe3ba-7acd-4722-a155-807c3c39b343" containerName="nova-api-log" containerID="cri-o://ec5d6dd9b89176199492224320b2021a26fd9a70569d0db9a56fd068dab85b34" gracePeriod=30 Jan 22 07:23:40 crc kubenswrapper[4933]: I0122 07:23:40.340914 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="a34fe3ba-7acd-4722-a155-807c3c39b343" containerName="nova-api-api" containerID="cri-o://295de2c3e07a1e6b164e007596aa2ecd889911e74998d3a2a38bf3203aae99e1" gracePeriod=30 Jan 22 07:23:40 crc kubenswrapper[4933]: I0122 07:23:40.365781 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:40 crc kubenswrapper[4933]: I0122 07:23:40.366302 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="de6dd3f0-8aef-4a9b-bdd0-90111619f8e3" containerName="nova-metadata-metadata" containerID="cri-o://84ba846608649e106c75ff05662414b6d2bdeba99bc4bacae9896c9010183c81" gracePeriod=30 Jan 22 07:23:40 crc kubenswrapper[4933]: I0122 07:23:40.366147 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="de6dd3f0-8aef-4a9b-bdd0-90111619f8e3" containerName="nova-metadata-log" containerID="cri-o://74944f2f8c4a7049e1d377bcbfa00fe176131efd6215b01d78a10f3221b1dfd5" gracePeriod=30 Jan 22 07:23:41 crc kubenswrapper[4933]: I0122 07:23:41.170994 4933 generic.go:334] "Generic (PLEG): container finished" podID="a34fe3ba-7acd-4722-a155-807c3c39b343" containerID="ec5d6dd9b89176199492224320b2021a26fd9a70569d0db9a56fd068dab85b34" exitCode=143 Jan 22 07:23:41 crc kubenswrapper[4933]: I0122 07:23:41.171112 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a34fe3ba-7acd-4722-a155-807c3c39b343","Type":"ContainerDied","Data":"ec5d6dd9b89176199492224320b2021a26fd9a70569d0db9a56fd068dab85b34"} Jan 22 07:23:41 crc kubenswrapper[4933]: I0122 07:23:41.174069 4933 generic.go:334] "Generic (PLEG): container finished" podID="de6dd3f0-8aef-4a9b-bdd0-90111619f8e3" containerID="74944f2f8c4a7049e1d377bcbfa00fe176131efd6215b01d78a10f3221b1dfd5" exitCode=143 Jan 22 07:23:41 crc kubenswrapper[4933]: I0122 07:23:41.174095 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3","Type":"ContainerDied","Data":"74944f2f8c4a7049e1d377bcbfa00fe176131efd6215b01d78a10f3221b1dfd5"} Jan 22 07:23:53 crc kubenswrapper[4933]: I0122 07:23:53.271553 4933 generic.go:334] "Generic (PLEG): container finished" podID="3e2edc4a-a87d-4d50-bbd1-5006af141eb7" containerID="00be8d04b65720f0972bc7dd15fbb35735279426416862480ca65e8873323fb2" exitCode=137 Jan 22 07:23:53 crc kubenswrapper[4933]: I0122 07:23:53.271698 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3e2edc4a-a87d-4d50-bbd1-5006af141eb7","Type":"ContainerDied","Data":"00be8d04b65720f0972bc7dd15fbb35735279426416862480ca65e8873323fb2"} Jan 22 07:23:53 crc kubenswrapper[4933]: I0122 07:23:53.380655 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 22 07:23:53 crc kubenswrapper[4933]: I0122 07:23:53.380728 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 22 07:23:53 crc kubenswrapper[4933]: I0122 07:23:53.784147 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 07:23:53 crc kubenswrapper[4933]: I0122 07:23:53.842975 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e2edc4a-a87d-4d50-bbd1-5006af141eb7-combined-ca-bundle\") pod \"3e2edc4a-a87d-4d50-bbd1-5006af141eb7\" (UID: \"3e2edc4a-a87d-4d50-bbd1-5006af141eb7\") " Jan 22 07:23:53 crc kubenswrapper[4933]: I0122 07:23:53.843222 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2m58\" (UniqueName: \"kubernetes.io/projected/3e2edc4a-a87d-4d50-bbd1-5006af141eb7-kube-api-access-n2m58\") pod \"3e2edc4a-a87d-4d50-bbd1-5006af141eb7\" (UID: \"3e2edc4a-a87d-4d50-bbd1-5006af141eb7\") " Jan 22 07:23:53 crc kubenswrapper[4933]: I0122 07:23:53.843285 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e2edc4a-a87d-4d50-bbd1-5006af141eb7-config-data\") pod \"3e2edc4a-a87d-4d50-bbd1-5006af141eb7\" (UID: \"3e2edc4a-a87d-4d50-bbd1-5006af141eb7\") " Jan 22 07:23:53 crc kubenswrapper[4933]: I0122 07:23:53.849132 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e2edc4a-a87d-4d50-bbd1-5006af141eb7-kube-api-access-n2m58" (OuterVolumeSpecName: "kube-api-access-n2m58") pod "3e2edc4a-a87d-4d50-bbd1-5006af141eb7" (UID: "3e2edc4a-a87d-4d50-bbd1-5006af141eb7"). InnerVolumeSpecName "kube-api-access-n2m58". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:23:53 crc kubenswrapper[4933]: I0122 07:23:53.874577 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e2edc4a-a87d-4d50-bbd1-5006af141eb7-config-data" (OuterVolumeSpecName: "config-data") pod "3e2edc4a-a87d-4d50-bbd1-5006af141eb7" (UID: "3e2edc4a-a87d-4d50-bbd1-5006af141eb7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:53 crc kubenswrapper[4933]: I0122 07:23:53.879215 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e2edc4a-a87d-4d50-bbd1-5006af141eb7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3e2edc4a-a87d-4d50-bbd1-5006af141eb7" (UID: "3e2edc4a-a87d-4d50-bbd1-5006af141eb7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:53 crc kubenswrapper[4933]: I0122 07:23:53.945382 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2m58\" (UniqueName: \"kubernetes.io/projected/3e2edc4a-a87d-4d50-bbd1-5006af141eb7-kube-api-access-n2m58\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:53 crc kubenswrapper[4933]: I0122 07:23:53.945420 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e2edc4a-a87d-4d50-bbd1-5006af141eb7-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:53 crc kubenswrapper[4933]: I0122 07:23:53.945433 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e2edc4a-a87d-4d50-bbd1-5006af141eb7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.210598 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.219632 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.273765 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a34fe3ba-7acd-4722-a155-807c3c39b343-logs\") pod \"a34fe3ba-7acd-4722-a155-807c3c39b343\" (UID: \"a34fe3ba-7acd-4722-a155-807c3c39b343\") " Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.274712 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a34fe3ba-7acd-4722-a155-807c3c39b343-logs" (OuterVolumeSpecName: "logs") pod "a34fe3ba-7acd-4722-a155-807c3c39b343" (UID: "a34fe3ba-7acd-4722-a155-807c3c39b343"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.275285 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xkxvr\" (UniqueName: \"kubernetes.io/projected/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-kube-api-access-xkxvr\") pod \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.275397 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a34fe3ba-7acd-4722-a155-807c3c39b343-config-data\") pod \"a34fe3ba-7acd-4722-a155-807c3c39b343\" (UID: \"a34fe3ba-7acd-4722-a155-807c3c39b343\") " Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.275440 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-logs\") pod \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.275885 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a34fe3ba-7acd-4722-a155-807c3c39b343-combined-ca-bundle\") pod \"a34fe3ba-7acd-4722-a155-807c3c39b343\" (UID: \"a34fe3ba-7acd-4722-a155-807c3c39b343\") " Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.276020 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-config-data\") pod \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.276057 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-combined-ca-bundle\") pod \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.276098 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqjqs\" (UniqueName: \"kubernetes.io/projected/a34fe3ba-7acd-4722-a155-807c3c39b343-kube-api-access-mqjqs\") pod \"a34fe3ba-7acd-4722-a155-807c3c39b343\" (UID: \"a34fe3ba-7acd-4722-a155-807c3c39b343\") " Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.276116 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-nova-metadata-tls-certs\") pod \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\" (UID: \"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3\") " Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.276641 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a34fe3ba-7acd-4722-a155-807c3c39b343-logs\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.277022 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-logs" (OuterVolumeSpecName: "logs") pod "de6dd3f0-8aef-4a9b-bdd0-90111619f8e3" (UID: "de6dd3f0-8aef-4a9b-bdd0-90111619f8e3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.283665 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-kube-api-access-xkxvr" (OuterVolumeSpecName: "kube-api-access-xkxvr") pod "de6dd3f0-8aef-4a9b-bdd0-90111619f8e3" (UID: "de6dd3f0-8aef-4a9b-bdd0-90111619f8e3"). InnerVolumeSpecName "kube-api-access-xkxvr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.297955 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a34fe3ba-7acd-4722-a155-807c3c39b343-kube-api-access-mqjqs" (OuterVolumeSpecName: "kube-api-access-mqjqs") pod "a34fe3ba-7acd-4722-a155-807c3c39b343" (UID: "a34fe3ba-7acd-4722-a155-807c3c39b343"). InnerVolumeSpecName "kube-api-access-mqjqs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.301045 4933 generic.go:334] "Generic (PLEG): container finished" podID="a34fe3ba-7acd-4722-a155-807c3c39b343" containerID="295de2c3e07a1e6b164e007596aa2ecd889911e74998d3a2a38bf3203aae99e1" exitCode=0 Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.301254 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a34fe3ba-7acd-4722-a155-807c3c39b343","Type":"ContainerDied","Data":"295de2c3e07a1e6b164e007596aa2ecd889911e74998d3a2a38bf3203aae99e1"} Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.301355 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a34fe3ba-7acd-4722-a155-807c3c39b343","Type":"ContainerDied","Data":"3752e1cceb67c2c76580951cc126fa03a1b15b858fbf58b0407c924a5e727702"} Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.301422 4933 scope.go:117] "RemoveContainer" containerID="295de2c3e07a1e6b164e007596aa2ecd889911e74998d3a2a38bf3203aae99e1" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.301593 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.305921 4933 generic.go:334] "Generic (PLEG): container finished" podID="de6dd3f0-8aef-4a9b-bdd0-90111619f8e3" containerID="84ba846608649e106c75ff05662414b6d2bdeba99bc4bacae9896c9010183c81" exitCode=0 Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.306035 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3","Type":"ContainerDied","Data":"84ba846608649e106c75ff05662414b6d2bdeba99bc4bacae9896c9010183c81"} Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.306139 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"de6dd3f0-8aef-4a9b-bdd0-90111619f8e3","Type":"ContainerDied","Data":"3796831db7e0c21e48224a616f2db3e305631b8e573abac3e036c393513c3d05"} Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.306328 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.306337 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a34fe3ba-7acd-4722-a155-807c3c39b343-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a34fe3ba-7acd-4722-a155-807c3c39b343" (UID: "a34fe3ba-7acd-4722-a155-807c3c39b343"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.309100 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3e2edc4a-a87d-4d50-bbd1-5006af141eb7","Type":"ContainerDied","Data":"63092c6f0c6fea67cebe498a27d85bb74274369d898b9942bdeab305844d423f"} Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.309196 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.313614 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a34fe3ba-7acd-4722-a155-807c3c39b343-config-data" (OuterVolumeSpecName: "config-data") pod "a34fe3ba-7acd-4722-a155-807c3c39b343" (UID: "a34fe3ba-7acd-4722-a155-807c3c39b343"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.319362 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "de6dd3f0-8aef-4a9b-bdd0-90111619f8e3" (UID: "de6dd3f0-8aef-4a9b-bdd0-90111619f8e3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.326099 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-config-data" (OuterVolumeSpecName: "config-data") pod "de6dd3f0-8aef-4a9b-bdd0-90111619f8e3" (UID: "de6dd3f0-8aef-4a9b-bdd0-90111619f8e3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.345322 4933 scope.go:117] "RemoveContainer" containerID="ec5d6dd9b89176199492224320b2021a26fd9a70569d0db9a56fd068dab85b34" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.365142 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.366648 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "de6dd3f0-8aef-4a9b-bdd0-90111619f8e3" (UID: "de6dd3f0-8aef-4a9b-bdd0-90111619f8e3"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.386325 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a34fe3ba-7acd-4722-a155-807c3c39b343-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.386354 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-logs\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.386366 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a34fe3ba-7acd-4722-a155-807c3c39b343-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.386375 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.386383 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.386391 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqjqs\" (UniqueName: \"kubernetes.io/projected/a34fe3ba-7acd-4722-a155-807c3c39b343-kube-api-access-mqjqs\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.386399 4933 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.386407 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xkxvr\" (UniqueName: \"kubernetes.io/projected/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3-kube-api-access-xkxvr\") on node \"crc\" DevicePath \"\"" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.388491 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.390706 4933 scope.go:117] "RemoveContainer" containerID="295de2c3e07a1e6b164e007596aa2ecd889911e74998d3a2a38bf3203aae99e1" Jan 22 07:23:54 crc kubenswrapper[4933]: E0122 07:23:54.394688 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"295de2c3e07a1e6b164e007596aa2ecd889911e74998d3a2a38bf3203aae99e1\": container with ID starting with 295de2c3e07a1e6b164e007596aa2ecd889911e74998d3a2a38bf3203aae99e1 not found: ID does not exist" containerID="295de2c3e07a1e6b164e007596aa2ecd889911e74998d3a2a38bf3203aae99e1" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.394744 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"295de2c3e07a1e6b164e007596aa2ecd889911e74998d3a2a38bf3203aae99e1"} err="failed to get container status \"295de2c3e07a1e6b164e007596aa2ecd889911e74998d3a2a38bf3203aae99e1\": rpc error: code = NotFound desc = could not find container \"295de2c3e07a1e6b164e007596aa2ecd889911e74998d3a2a38bf3203aae99e1\": container with ID starting with 295de2c3e07a1e6b164e007596aa2ecd889911e74998d3a2a38bf3203aae99e1 not found: ID does not exist" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.394772 4933 scope.go:117] "RemoveContainer" containerID="ec5d6dd9b89176199492224320b2021a26fd9a70569d0db9a56fd068dab85b34" Jan 22 07:23:54 crc kubenswrapper[4933]: E0122 07:23:54.395381 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec5d6dd9b89176199492224320b2021a26fd9a70569d0db9a56fd068dab85b34\": container with ID starting with ec5d6dd9b89176199492224320b2021a26fd9a70569d0db9a56fd068dab85b34 not found: ID does not exist" containerID="ec5d6dd9b89176199492224320b2021a26fd9a70569d0db9a56fd068dab85b34" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.395414 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec5d6dd9b89176199492224320b2021a26fd9a70569d0db9a56fd068dab85b34"} err="failed to get container status \"ec5d6dd9b89176199492224320b2021a26fd9a70569d0db9a56fd068dab85b34\": rpc error: code = NotFound desc = could not find container \"ec5d6dd9b89176199492224320b2021a26fd9a70569d0db9a56fd068dab85b34\": container with ID starting with ec5d6dd9b89176199492224320b2021a26fd9a70569d0db9a56fd068dab85b34 not found: ID does not exist" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.395429 4933 scope.go:117] "RemoveContainer" containerID="84ba846608649e106c75ff05662414b6d2bdeba99bc4bacae9896c9010183c81" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.404572 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 07:23:54 crc kubenswrapper[4933]: E0122 07:23:54.405004 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de6dd3f0-8aef-4a9b-bdd0-90111619f8e3" containerName="nova-metadata-metadata" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.405021 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="de6dd3f0-8aef-4a9b-bdd0-90111619f8e3" containerName="nova-metadata-metadata" Jan 22 07:23:54 crc kubenswrapper[4933]: E0122 07:23:54.405035 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a34fe3ba-7acd-4722-a155-807c3c39b343" containerName="nova-api-api" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.405041 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a34fe3ba-7acd-4722-a155-807c3c39b343" containerName="nova-api-api" Jan 22 07:23:54 crc kubenswrapper[4933]: E0122 07:23:54.405053 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6759c870-c931-4a37-9062-fde2dfada3f1" containerName="nova-manage" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.405060 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6759c870-c931-4a37-9062-fde2dfada3f1" containerName="nova-manage" Jan 22 07:23:54 crc kubenswrapper[4933]: E0122 07:23:54.405072 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a34fe3ba-7acd-4722-a155-807c3c39b343" containerName="nova-api-log" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.405093 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a34fe3ba-7acd-4722-a155-807c3c39b343" containerName="nova-api-log" Jan 22 07:23:54 crc kubenswrapper[4933]: E0122 07:23:54.405104 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e2edc4a-a87d-4d50-bbd1-5006af141eb7" containerName="nova-scheduler-scheduler" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.405109 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e2edc4a-a87d-4d50-bbd1-5006af141eb7" containerName="nova-scheduler-scheduler" Jan 22 07:23:54 crc kubenswrapper[4933]: E0122 07:23:54.405128 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de6dd3f0-8aef-4a9b-bdd0-90111619f8e3" containerName="nova-metadata-log" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.405137 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="de6dd3f0-8aef-4a9b-bdd0-90111619f8e3" containerName="nova-metadata-log" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.405300 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="de6dd3f0-8aef-4a9b-bdd0-90111619f8e3" containerName="nova-metadata-metadata" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.405315 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e2edc4a-a87d-4d50-bbd1-5006af141eb7" containerName="nova-scheduler-scheduler" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.405328 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="de6dd3f0-8aef-4a9b-bdd0-90111619f8e3" containerName="nova-metadata-log" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.405336 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a34fe3ba-7acd-4722-a155-807c3c39b343" containerName="nova-api-api" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.405350 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6759c870-c931-4a37-9062-fde2dfada3f1" containerName="nova-manage" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.405357 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a34fe3ba-7acd-4722-a155-807c3c39b343" containerName="nova-api-log" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.405959 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.408154 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.415041 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.422036 4933 scope.go:117] "RemoveContainer" containerID="74944f2f8c4a7049e1d377bcbfa00fe176131efd6215b01d78a10f3221b1dfd5" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.441461 4933 scope.go:117] "RemoveContainer" containerID="84ba846608649e106c75ff05662414b6d2bdeba99bc4bacae9896c9010183c81" Jan 22 07:23:54 crc kubenswrapper[4933]: E0122 07:23:54.442166 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84ba846608649e106c75ff05662414b6d2bdeba99bc4bacae9896c9010183c81\": container with ID starting with 84ba846608649e106c75ff05662414b6d2bdeba99bc4bacae9896c9010183c81 not found: ID does not exist" containerID="84ba846608649e106c75ff05662414b6d2bdeba99bc4bacae9896c9010183c81" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.442204 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84ba846608649e106c75ff05662414b6d2bdeba99bc4bacae9896c9010183c81"} err="failed to get container status \"84ba846608649e106c75ff05662414b6d2bdeba99bc4bacae9896c9010183c81\": rpc error: code = NotFound desc = could not find container \"84ba846608649e106c75ff05662414b6d2bdeba99bc4bacae9896c9010183c81\": container with ID starting with 84ba846608649e106c75ff05662414b6d2bdeba99bc4bacae9896c9010183c81 not found: ID does not exist" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.442227 4933 scope.go:117] "RemoveContainer" containerID="74944f2f8c4a7049e1d377bcbfa00fe176131efd6215b01d78a10f3221b1dfd5" Jan 22 07:23:54 crc kubenswrapper[4933]: E0122 07:23:54.442563 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74944f2f8c4a7049e1d377bcbfa00fe176131efd6215b01d78a10f3221b1dfd5\": container with ID starting with 74944f2f8c4a7049e1d377bcbfa00fe176131efd6215b01d78a10f3221b1dfd5 not found: ID does not exist" containerID="74944f2f8c4a7049e1d377bcbfa00fe176131efd6215b01d78a10f3221b1dfd5" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.442641 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74944f2f8c4a7049e1d377bcbfa00fe176131efd6215b01d78a10f3221b1dfd5"} err="failed to get container status \"74944f2f8c4a7049e1d377bcbfa00fe176131efd6215b01d78a10f3221b1dfd5\": rpc error: code = NotFound desc = could not find container \"74944f2f8c4a7049e1d377bcbfa00fe176131efd6215b01d78a10f3221b1dfd5\": container with ID starting with 74944f2f8c4a7049e1d377bcbfa00fe176131efd6215b01d78a10f3221b1dfd5 not found: ID does not exist" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.442667 4933 scope.go:117] "RemoveContainer" containerID="00be8d04b65720f0972bc7dd15fbb35735279426416862480ca65e8873323fb2" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.488027 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4lpb2\" (UniqueName: \"kubernetes.io/projected/524eab2d-c8d7-40dd-b27c-64b020aa9118-kube-api-access-4lpb2\") pod \"nova-scheduler-0\" (UID: \"524eab2d-c8d7-40dd-b27c-64b020aa9118\") " pod="openstack/nova-scheduler-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.488124 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/524eab2d-c8d7-40dd-b27c-64b020aa9118-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"524eab2d-c8d7-40dd-b27c-64b020aa9118\") " pod="openstack/nova-scheduler-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.488315 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/524eab2d-c8d7-40dd-b27c-64b020aa9118-config-data\") pod \"nova-scheduler-0\" (UID: \"524eab2d-c8d7-40dd-b27c-64b020aa9118\") " pod="openstack/nova-scheduler-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.503249 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e2edc4a-a87d-4d50-bbd1-5006af141eb7" path="/var/lib/kubelet/pods/3e2edc4a-a87d-4d50-bbd1-5006af141eb7/volumes" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.590009 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4lpb2\" (UniqueName: \"kubernetes.io/projected/524eab2d-c8d7-40dd-b27c-64b020aa9118-kube-api-access-4lpb2\") pod \"nova-scheduler-0\" (UID: \"524eab2d-c8d7-40dd-b27c-64b020aa9118\") " pod="openstack/nova-scheduler-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.590105 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/524eab2d-c8d7-40dd-b27c-64b020aa9118-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"524eab2d-c8d7-40dd-b27c-64b020aa9118\") " pod="openstack/nova-scheduler-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.590158 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/524eab2d-c8d7-40dd-b27c-64b020aa9118-config-data\") pod \"nova-scheduler-0\" (UID: \"524eab2d-c8d7-40dd-b27c-64b020aa9118\") " pod="openstack/nova-scheduler-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.594183 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/524eab2d-c8d7-40dd-b27c-64b020aa9118-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"524eab2d-c8d7-40dd-b27c-64b020aa9118\") " pod="openstack/nova-scheduler-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.594247 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/524eab2d-c8d7-40dd-b27c-64b020aa9118-config-data\") pod \"nova-scheduler-0\" (UID: \"524eab2d-c8d7-40dd-b27c-64b020aa9118\") " pod="openstack/nova-scheduler-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.606726 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4lpb2\" (UniqueName: \"kubernetes.io/projected/524eab2d-c8d7-40dd-b27c-64b020aa9118-kube-api-access-4lpb2\") pod \"nova-scheduler-0\" (UID: \"524eab2d-c8d7-40dd-b27c-64b020aa9118\") " pod="openstack/nova-scheduler-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.649845 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.660510 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.672458 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.679723 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.690446 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.694639 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.696968 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.697314 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.704334 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.705985 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.712671 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.714543 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.716453 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.727875 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.794267 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jszrf\" (UniqueName: \"kubernetes.io/projected/49c83965-c822-4b11-8c4d-b2a19ec0fc03-kube-api-access-jszrf\") pod \"nova-metadata-0\" (UID: \"49c83965-c822-4b11-8c4d-b2a19ec0fc03\") " pod="openstack/nova-metadata-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.794319 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c83965-c822-4b11-8c4d-b2a19ec0fc03-config-data\") pod \"nova-metadata-0\" (UID: \"49c83965-c822-4b11-8c4d-b2a19ec0fc03\") " pod="openstack/nova-metadata-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.794344 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c83965-c822-4b11-8c4d-b2a19ec0fc03-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"49c83965-c822-4b11-8c4d-b2a19ec0fc03\") " pod="openstack/nova-metadata-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.794381 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16dcf692-c29b-4015-b014-150746c0d254-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"16dcf692-c29b-4015-b014-150746c0d254\") " pod="openstack/nova-api-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.794405 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16dcf692-c29b-4015-b014-150746c0d254-config-data\") pod \"nova-api-0\" (UID: \"16dcf692-c29b-4015-b014-150746c0d254\") " pod="openstack/nova-api-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.794484 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49c83965-c822-4b11-8c4d-b2a19ec0fc03-logs\") pod \"nova-metadata-0\" (UID: \"49c83965-c822-4b11-8c4d-b2a19ec0fc03\") " pod="openstack/nova-metadata-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.794501 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c83965-c822-4b11-8c4d-b2a19ec0fc03-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"49c83965-c822-4b11-8c4d-b2a19ec0fc03\") " pod="openstack/nova-metadata-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.794547 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4krv\" (UniqueName: \"kubernetes.io/projected/16dcf692-c29b-4015-b014-150746c0d254-kube-api-access-d4krv\") pod \"nova-api-0\" (UID: \"16dcf692-c29b-4015-b014-150746c0d254\") " pod="openstack/nova-api-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.794563 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/16dcf692-c29b-4015-b014-150746c0d254-logs\") pod \"nova-api-0\" (UID: \"16dcf692-c29b-4015-b014-150746c0d254\") " pod="openstack/nova-api-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.896418 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49c83965-c822-4b11-8c4d-b2a19ec0fc03-logs\") pod \"nova-metadata-0\" (UID: \"49c83965-c822-4b11-8c4d-b2a19ec0fc03\") " pod="openstack/nova-metadata-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.896465 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c83965-c822-4b11-8c4d-b2a19ec0fc03-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"49c83965-c822-4b11-8c4d-b2a19ec0fc03\") " pod="openstack/nova-metadata-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.896526 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4krv\" (UniqueName: \"kubernetes.io/projected/16dcf692-c29b-4015-b014-150746c0d254-kube-api-access-d4krv\") pod \"nova-api-0\" (UID: \"16dcf692-c29b-4015-b014-150746c0d254\") " pod="openstack/nova-api-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.896543 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/16dcf692-c29b-4015-b014-150746c0d254-logs\") pod \"nova-api-0\" (UID: \"16dcf692-c29b-4015-b014-150746c0d254\") " pod="openstack/nova-api-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.896615 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jszrf\" (UniqueName: \"kubernetes.io/projected/49c83965-c822-4b11-8c4d-b2a19ec0fc03-kube-api-access-jszrf\") pod \"nova-metadata-0\" (UID: \"49c83965-c822-4b11-8c4d-b2a19ec0fc03\") " pod="openstack/nova-metadata-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.896655 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c83965-c822-4b11-8c4d-b2a19ec0fc03-config-data\") pod \"nova-metadata-0\" (UID: \"49c83965-c822-4b11-8c4d-b2a19ec0fc03\") " pod="openstack/nova-metadata-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.896671 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c83965-c822-4b11-8c4d-b2a19ec0fc03-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"49c83965-c822-4b11-8c4d-b2a19ec0fc03\") " pod="openstack/nova-metadata-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.896699 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16dcf692-c29b-4015-b014-150746c0d254-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"16dcf692-c29b-4015-b014-150746c0d254\") " pod="openstack/nova-api-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.896715 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16dcf692-c29b-4015-b014-150746c0d254-config-data\") pod \"nova-api-0\" (UID: \"16dcf692-c29b-4015-b014-150746c0d254\") " pod="openstack/nova-api-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.896826 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49c83965-c822-4b11-8c4d-b2a19ec0fc03-logs\") pod \"nova-metadata-0\" (UID: \"49c83965-c822-4b11-8c4d-b2a19ec0fc03\") " pod="openstack/nova-metadata-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.898073 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/16dcf692-c29b-4015-b014-150746c0d254-logs\") pod \"nova-api-0\" (UID: \"16dcf692-c29b-4015-b014-150746c0d254\") " pod="openstack/nova-api-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.901536 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c83965-c822-4b11-8c4d-b2a19ec0fc03-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"49c83965-c822-4b11-8c4d-b2a19ec0fc03\") " pod="openstack/nova-metadata-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.902104 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c83965-c822-4b11-8c4d-b2a19ec0fc03-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"49c83965-c822-4b11-8c4d-b2a19ec0fc03\") " pod="openstack/nova-metadata-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.903197 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16dcf692-c29b-4015-b014-150746c0d254-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"16dcf692-c29b-4015-b014-150746c0d254\") " pod="openstack/nova-api-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.904831 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c83965-c822-4b11-8c4d-b2a19ec0fc03-config-data\") pod \"nova-metadata-0\" (UID: \"49c83965-c822-4b11-8c4d-b2a19ec0fc03\") " pod="openstack/nova-metadata-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.905379 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16dcf692-c29b-4015-b014-150746c0d254-config-data\") pod \"nova-api-0\" (UID: \"16dcf692-c29b-4015-b014-150746c0d254\") " pod="openstack/nova-api-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.921325 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jszrf\" (UniqueName: \"kubernetes.io/projected/49c83965-c822-4b11-8c4d-b2a19ec0fc03-kube-api-access-jszrf\") pod \"nova-metadata-0\" (UID: \"49c83965-c822-4b11-8c4d-b2a19ec0fc03\") " pod="openstack/nova-metadata-0" Jan 22 07:23:54 crc kubenswrapper[4933]: I0122 07:23:54.924634 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4krv\" (UniqueName: \"kubernetes.io/projected/16dcf692-c29b-4015-b014-150746c0d254-kube-api-access-d4krv\") pod \"nova-api-0\" (UID: \"16dcf692-c29b-4015-b014-150746c0d254\") " pod="openstack/nova-api-0" Jan 22 07:23:55 crc kubenswrapper[4933]: I0122 07:23:55.122619 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 07:23:55 crc kubenswrapper[4933]: I0122 07:23:55.132059 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 07:23:55 crc kubenswrapper[4933]: I0122 07:23:55.152384 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 07:23:55 crc kubenswrapper[4933]: I0122 07:23:55.360222 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"524eab2d-c8d7-40dd-b27c-64b020aa9118","Type":"ContainerStarted","Data":"000ec48be40ff606398768236479c1002534a53594807dd14b80ea2d246a84f6"} Jan 22 07:23:55 crc kubenswrapper[4933]: I0122 07:23:55.666152 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 07:23:55 crc kubenswrapper[4933]: W0122 07:23:55.666489 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16dcf692_c29b_4015_b014_150746c0d254.slice/crio-e0e3b9e38920bf6de358f623533299c6d036356cf318c0459515bb1a9f2aaa4f WatchSource:0}: Error finding container e0e3b9e38920bf6de358f623533299c6d036356cf318c0459515bb1a9f2aaa4f: Status 404 returned error can't find the container with id e0e3b9e38920bf6de358f623533299c6d036356cf318c0459515bb1a9f2aaa4f Jan 22 07:23:55 crc kubenswrapper[4933]: I0122 07:23:55.830811 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 07:23:56 crc kubenswrapper[4933]: I0122 07:23:56.374332 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"16dcf692-c29b-4015-b014-150746c0d254","Type":"ContainerStarted","Data":"4e685b008c45ec85377b7c025b00df2b073e94abbceefc9103d328114ef169db"} Jan 22 07:23:56 crc kubenswrapper[4933]: I0122 07:23:56.374696 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"16dcf692-c29b-4015-b014-150746c0d254","Type":"ContainerStarted","Data":"9d12bea616f13c80ed12c426db9a49226bb420ed73c9bbfb95dce75188d4e87d"} Jan 22 07:23:56 crc kubenswrapper[4933]: I0122 07:23:56.374713 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"16dcf692-c29b-4015-b014-150746c0d254","Type":"ContainerStarted","Data":"e0e3b9e38920bf6de358f623533299c6d036356cf318c0459515bb1a9f2aaa4f"} Jan 22 07:23:56 crc kubenswrapper[4933]: I0122 07:23:56.376765 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"524eab2d-c8d7-40dd-b27c-64b020aa9118","Type":"ContainerStarted","Data":"b68957572564f70c7870b809e925e24471307f89a5d8da85fbb24ca097f188bd"} Jan 22 07:23:56 crc kubenswrapper[4933]: I0122 07:23:56.378834 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"49c83965-c822-4b11-8c4d-b2a19ec0fc03","Type":"ContainerStarted","Data":"d7efc4e45a5bfd04cd49478fcbeba40a991ef0523d3b410082b0d53ae0a5d793"} Jan 22 07:23:56 crc kubenswrapper[4933]: I0122 07:23:56.378981 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"49c83965-c822-4b11-8c4d-b2a19ec0fc03","Type":"ContainerStarted","Data":"0a2fb9e31405dfbc2abb5013b5556bc469be64a8650c0b6cd2998111119638b8"} Jan 22 07:23:56 crc kubenswrapper[4933]: I0122 07:23:56.379042 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"49c83965-c822-4b11-8c4d-b2a19ec0fc03","Type":"ContainerStarted","Data":"c25b96b10b0e8640c03ca4bb1792c3b18af40721df1741fcec67e75587374f1c"} Jan 22 07:23:56 crc kubenswrapper[4933]: I0122 07:23:56.398510 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.398486669 podStartE2EDuration="2.398486669s" podCreationTimestamp="2026-01-22 07:23:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:23:56.393269182 +0000 UTC m=+5884.230394535" watchObservedRunningTime="2026-01-22 07:23:56.398486669 +0000 UTC m=+5884.235612022" Jan 22 07:23:56 crc kubenswrapper[4933]: I0122 07:23:56.411609 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.411587807 podStartE2EDuration="2.411587807s" podCreationTimestamp="2026-01-22 07:23:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:23:56.405926639 +0000 UTC m=+5884.243052012" watchObservedRunningTime="2026-01-22 07:23:56.411587807 +0000 UTC m=+5884.248713170" Jan 22 07:23:56 crc kubenswrapper[4933]: I0122 07:23:56.433352 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.433330746 podStartE2EDuration="2.433330746s" podCreationTimestamp="2026-01-22 07:23:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:23:56.42324282 +0000 UTC m=+5884.260368173" watchObservedRunningTime="2026-01-22 07:23:56.433330746 +0000 UTC m=+5884.270456099" Jan 22 07:23:56 crc kubenswrapper[4933]: I0122 07:23:56.503218 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a34fe3ba-7acd-4722-a155-807c3c39b343" path="/var/lib/kubelet/pods/a34fe3ba-7acd-4722-a155-807c3c39b343/volumes" Jan 22 07:23:56 crc kubenswrapper[4933]: I0122 07:23:56.503857 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de6dd3f0-8aef-4a9b-bdd0-90111619f8e3" path="/var/lib/kubelet/pods/de6dd3f0-8aef-4a9b-bdd0-90111619f8e3/volumes" Jan 22 07:23:59 crc kubenswrapper[4933]: I0122 07:23:59.728292 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 22 07:24:00 crc kubenswrapper[4933]: I0122 07:24:00.132802 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 07:24:00 crc kubenswrapper[4933]: I0122 07:24:00.132865 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 07:24:02 crc kubenswrapper[4933]: I0122 07:24:02.046626 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-q5kxm"] Jan 22 07:24:02 crc kubenswrapper[4933]: I0122 07:24:02.060928 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-f535-account-create-update-j5wk2"] Jan 22 07:24:02 crc kubenswrapper[4933]: I0122 07:24:02.071351 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-f535-account-create-update-j5wk2"] Jan 22 07:24:02 crc kubenswrapper[4933]: I0122 07:24:02.079734 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-q5kxm"] Jan 22 07:24:02 crc kubenswrapper[4933]: I0122 07:24:02.507035 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0a694f6-8abf-4b41-84e9-73d4d8a061ff" path="/var/lib/kubelet/pods/c0a694f6-8abf-4b41-84e9-73d4d8a061ff/volumes" Jan 22 07:24:02 crc kubenswrapper[4933]: I0122 07:24:02.507648 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb028b66-afaa-4d7f-a2c4-d72b6a5afe43" path="/var/lib/kubelet/pods/cb028b66-afaa-4d7f-a2c4-d72b6a5afe43/volumes" Jan 22 07:24:04 crc kubenswrapper[4933]: I0122 07:24:04.728362 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 22 07:24:04 crc kubenswrapper[4933]: I0122 07:24:04.777219 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 22 07:24:05 crc kubenswrapper[4933]: I0122 07:24:05.017882 4933 scope.go:117] "RemoveContainer" containerID="b737bba133f9eb65d5ad3f2072b14b93d2bac0b82b38c9b6e536f78e4c11d5a1" Jan 22 07:24:05 crc kubenswrapper[4933]: I0122 07:24:05.047764 4933 scope.go:117] "RemoveContainer" containerID="3a208f7491446a44a0e279507c230499e6148f372264d1f816ea078bc13f68b5" Jan 22 07:24:05 crc kubenswrapper[4933]: I0122 07:24:05.122946 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 07:24:05 crc kubenswrapper[4933]: I0122 07:24:05.123022 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 07:24:05 crc kubenswrapper[4933]: I0122 07:24:05.133539 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 22 07:24:05 crc kubenswrapper[4933]: I0122 07:24:05.133613 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 22 07:24:05 crc kubenswrapper[4933]: I0122 07:24:05.520353 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 22 07:24:06 crc kubenswrapper[4933]: I0122 07:24:06.225322 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="16dcf692-c29b-4015-b014-150746c0d254" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.94:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 07:24:06 crc kubenswrapper[4933]: I0122 07:24:06.225382 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="49c83965-c822-4b11-8c4d-b2a19ec0fc03" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.95:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 07:24:06 crc kubenswrapper[4933]: I0122 07:24:06.225436 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="16dcf692-c29b-4015-b014-150746c0d254" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.94:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 07:24:06 crc kubenswrapper[4933]: I0122 07:24:06.225469 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="49c83965-c822-4b11-8c4d-b2a19ec0fc03" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.95:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 07:24:09 crc kubenswrapper[4933]: I0122 07:24:09.032871 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-tct52"] Jan 22 07:24:09 crc kubenswrapper[4933]: I0122 07:24:09.044594 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-tct52"] Jan 22 07:24:10 crc kubenswrapper[4933]: I0122 07:24:10.501626 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bce8ad4c-791a-4e23-9e52-c361e03f8674" path="/var/lib/kubelet/pods/bce8ad4c-791a-4e23-9e52-c361e03f8674/volumes" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.129007 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.129631 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.129982 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.129998 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.136880 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.138359 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.144349 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.144646 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.153243 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.157396 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.382486 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675d97df87-x6hlh"] Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.384098 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.408616 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675d97df87-x6hlh"] Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.442295 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-ovsdbserver-sb\") pod \"dnsmasq-dns-675d97df87-x6hlh\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.442371 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rth74\" (UniqueName: \"kubernetes.io/projected/59d07063-6db8-4225-9c77-f916c2747adf-kube-api-access-rth74\") pod \"dnsmasq-dns-675d97df87-x6hlh\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.442440 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-config\") pod \"dnsmasq-dns-675d97df87-x6hlh\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.442461 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-ovsdbserver-nb\") pod \"dnsmasq-dns-675d97df87-x6hlh\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.442508 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-dns-svc\") pod \"dnsmasq-dns-675d97df87-x6hlh\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.543849 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-config\") pod \"dnsmasq-dns-675d97df87-x6hlh\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.543910 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-ovsdbserver-nb\") pod \"dnsmasq-dns-675d97df87-x6hlh\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.543978 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-dns-svc\") pod \"dnsmasq-dns-675d97df87-x6hlh\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.544055 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-ovsdbserver-sb\") pod \"dnsmasq-dns-675d97df87-x6hlh\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.544117 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rth74\" (UniqueName: \"kubernetes.io/projected/59d07063-6db8-4225-9c77-f916c2747adf-kube-api-access-rth74\") pod \"dnsmasq-dns-675d97df87-x6hlh\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.544926 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-config\") pod \"dnsmasq-dns-675d97df87-x6hlh\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.545165 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-ovsdbserver-nb\") pod \"dnsmasq-dns-675d97df87-x6hlh\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.545472 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-ovsdbserver-sb\") pod \"dnsmasq-dns-675d97df87-x6hlh\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.546059 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-dns-svc\") pod \"dnsmasq-dns-675d97df87-x6hlh\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.566106 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rth74\" (UniqueName: \"kubernetes.io/projected/59d07063-6db8-4225-9c77-f916c2747adf-kube-api-access-rth74\") pod \"dnsmasq-dns-675d97df87-x6hlh\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:24:15 crc kubenswrapper[4933]: I0122 07:24:15.711962 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:24:16 crc kubenswrapper[4933]: I0122 07:24:16.202411 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675d97df87-x6hlh"] Jan 22 07:24:16 crc kubenswrapper[4933]: I0122 07:24:16.605273 4933 generic.go:334] "Generic (PLEG): container finished" podID="59d07063-6db8-4225-9c77-f916c2747adf" containerID="0fde45fd0dc6d082b5a8f3593101c80382325a03801bdb649cc19a1f0f0b1ae8" exitCode=0 Jan 22 07:24:16 crc kubenswrapper[4933]: I0122 07:24:16.605362 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675d97df87-x6hlh" event={"ID":"59d07063-6db8-4225-9c77-f916c2747adf","Type":"ContainerDied","Data":"0fde45fd0dc6d082b5a8f3593101c80382325a03801bdb649cc19a1f0f0b1ae8"} Jan 22 07:24:16 crc kubenswrapper[4933]: I0122 07:24:16.605722 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675d97df87-x6hlh" event={"ID":"59d07063-6db8-4225-9c77-f916c2747adf","Type":"ContainerStarted","Data":"db8cd78add3ea741607023cd99314fe139db28c64861f2b82b621dc3e692dfb3"} Jan 22 07:24:17 crc kubenswrapper[4933]: I0122 07:24:17.620531 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675d97df87-x6hlh" event={"ID":"59d07063-6db8-4225-9c77-f916c2747adf","Type":"ContainerStarted","Data":"13bc2861ae1bca76ca67d1612ed13b3d344d97ecd683d5b1268bf93ffba78592"} Jan 22 07:24:17 crc kubenswrapper[4933]: I0122 07:24:17.620717 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:24:17 crc kubenswrapper[4933]: I0122 07:24:17.661840 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-675d97df87-x6hlh" podStartSLOduration=2.661818592 podStartE2EDuration="2.661818592s" podCreationTimestamp="2026-01-22 07:24:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:24:17.660042019 +0000 UTC m=+5905.497167372" watchObservedRunningTime="2026-01-22 07:24:17.661818592 +0000 UTC m=+5905.498943955" Jan 22 07:24:17 crc kubenswrapper[4933]: I0122 07:24:17.870104 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 07:24:17 crc kubenswrapper[4933]: I0122 07:24:17.870363 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="16dcf692-c29b-4015-b014-150746c0d254" containerName="nova-api-log" containerID="cri-o://9d12bea616f13c80ed12c426db9a49226bb420ed73c9bbfb95dce75188d4e87d" gracePeriod=30 Jan 22 07:24:17 crc kubenswrapper[4933]: I0122 07:24:17.870501 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="16dcf692-c29b-4015-b014-150746c0d254" containerName="nova-api-api" containerID="cri-o://4e685b008c45ec85377b7c025b00df2b073e94abbceefc9103d328114ef169db" gracePeriod=30 Jan 22 07:24:18 crc kubenswrapper[4933]: I0122 07:24:18.631952 4933 generic.go:334] "Generic (PLEG): container finished" podID="16dcf692-c29b-4015-b014-150746c0d254" containerID="9d12bea616f13c80ed12c426db9a49226bb420ed73c9bbfb95dce75188d4e87d" exitCode=143 Jan 22 07:24:18 crc kubenswrapper[4933]: I0122 07:24:18.632111 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"16dcf692-c29b-4015-b014-150746c0d254","Type":"ContainerDied","Data":"9d12bea616f13c80ed12c426db9a49226bb420ed73c9bbfb95dce75188d4e87d"} Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.470981 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.576232 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16dcf692-c29b-4015-b014-150746c0d254-config-data\") pod \"16dcf692-c29b-4015-b014-150746c0d254\" (UID: \"16dcf692-c29b-4015-b014-150746c0d254\") " Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.576347 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16dcf692-c29b-4015-b014-150746c0d254-combined-ca-bundle\") pod \"16dcf692-c29b-4015-b014-150746c0d254\" (UID: \"16dcf692-c29b-4015-b014-150746c0d254\") " Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.576471 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4krv\" (UniqueName: \"kubernetes.io/projected/16dcf692-c29b-4015-b014-150746c0d254-kube-api-access-d4krv\") pod \"16dcf692-c29b-4015-b014-150746c0d254\" (UID: \"16dcf692-c29b-4015-b014-150746c0d254\") " Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.576534 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/16dcf692-c29b-4015-b014-150746c0d254-logs\") pod \"16dcf692-c29b-4015-b014-150746c0d254\" (UID: \"16dcf692-c29b-4015-b014-150746c0d254\") " Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.578045 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16dcf692-c29b-4015-b014-150746c0d254-logs" (OuterVolumeSpecName: "logs") pod "16dcf692-c29b-4015-b014-150746c0d254" (UID: "16dcf692-c29b-4015-b014-150746c0d254"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.582120 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16dcf692-c29b-4015-b014-150746c0d254-kube-api-access-d4krv" (OuterVolumeSpecName: "kube-api-access-d4krv") pod "16dcf692-c29b-4015-b014-150746c0d254" (UID: "16dcf692-c29b-4015-b014-150746c0d254"). InnerVolumeSpecName "kube-api-access-d4krv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.614931 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16dcf692-c29b-4015-b014-150746c0d254-config-data" (OuterVolumeSpecName: "config-data") pod "16dcf692-c29b-4015-b014-150746c0d254" (UID: "16dcf692-c29b-4015-b014-150746c0d254"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.616975 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16dcf692-c29b-4015-b014-150746c0d254-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "16dcf692-c29b-4015-b014-150746c0d254" (UID: "16dcf692-c29b-4015-b014-150746c0d254"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.655989 4933 generic.go:334] "Generic (PLEG): container finished" podID="16dcf692-c29b-4015-b014-150746c0d254" containerID="4e685b008c45ec85377b7c025b00df2b073e94abbceefc9103d328114ef169db" exitCode=0 Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.656038 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"16dcf692-c29b-4015-b014-150746c0d254","Type":"ContainerDied","Data":"4e685b008c45ec85377b7c025b00df2b073e94abbceefc9103d328114ef169db"} Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.656061 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.656092 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"16dcf692-c29b-4015-b014-150746c0d254","Type":"ContainerDied","Data":"e0e3b9e38920bf6de358f623533299c6d036356cf318c0459515bb1a9f2aaa4f"} Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.656111 4933 scope.go:117] "RemoveContainer" containerID="4e685b008c45ec85377b7c025b00df2b073e94abbceefc9103d328114ef169db" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.679983 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4krv\" (UniqueName: \"kubernetes.io/projected/16dcf692-c29b-4015-b014-150746c0d254-kube-api-access-d4krv\") on node \"crc\" DevicePath \"\"" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.680009 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/16dcf692-c29b-4015-b014-150746c0d254-logs\") on node \"crc\" DevicePath \"\"" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.680020 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16dcf692-c29b-4015-b014-150746c0d254-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.680030 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16dcf692-c29b-4015-b014-150746c0d254-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.696110 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.703732 4933 scope.go:117] "RemoveContainer" containerID="9d12bea616f13c80ed12c426db9a49226bb420ed73c9bbfb95dce75188d4e87d" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.720498 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.747070 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 22 07:24:21 crc kubenswrapper[4933]: E0122 07:24:21.747527 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16dcf692-c29b-4015-b014-150746c0d254" containerName="nova-api-api" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.747542 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="16dcf692-c29b-4015-b014-150746c0d254" containerName="nova-api-api" Jan 22 07:24:21 crc kubenswrapper[4933]: E0122 07:24:21.747577 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16dcf692-c29b-4015-b014-150746c0d254" containerName="nova-api-log" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.747585 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="16dcf692-c29b-4015-b014-150746c0d254" containerName="nova-api-log" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.747813 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="16dcf692-c29b-4015-b014-150746c0d254" containerName="nova-api-log" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.747830 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="16dcf692-c29b-4015-b014-150746c0d254" containerName="nova-api-api" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.749065 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.749935 4933 scope.go:117] "RemoveContainer" containerID="4e685b008c45ec85377b7c025b00df2b073e94abbceefc9103d328114ef169db" Jan 22 07:24:21 crc kubenswrapper[4933]: E0122 07:24:21.752045 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e685b008c45ec85377b7c025b00df2b073e94abbceefc9103d328114ef169db\": container with ID starting with 4e685b008c45ec85377b7c025b00df2b073e94abbceefc9103d328114ef169db not found: ID does not exist" containerID="4e685b008c45ec85377b7c025b00df2b073e94abbceefc9103d328114ef169db" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.752127 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e685b008c45ec85377b7c025b00df2b073e94abbceefc9103d328114ef169db"} err="failed to get container status \"4e685b008c45ec85377b7c025b00df2b073e94abbceefc9103d328114ef169db\": rpc error: code = NotFound desc = could not find container \"4e685b008c45ec85377b7c025b00df2b073e94abbceefc9103d328114ef169db\": container with ID starting with 4e685b008c45ec85377b7c025b00df2b073e94abbceefc9103d328114ef169db not found: ID does not exist" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.752156 4933 scope.go:117] "RemoveContainer" containerID="9d12bea616f13c80ed12c426db9a49226bb420ed73c9bbfb95dce75188d4e87d" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.752290 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.752314 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.752451 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 22 07:24:21 crc kubenswrapper[4933]: E0122 07:24:21.752637 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d12bea616f13c80ed12c426db9a49226bb420ed73c9bbfb95dce75188d4e87d\": container with ID starting with 9d12bea616f13c80ed12c426db9a49226bb420ed73c9bbfb95dce75188d4e87d not found: ID does not exist" containerID="9d12bea616f13c80ed12c426db9a49226bb420ed73c9bbfb95dce75188d4e87d" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.752687 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d12bea616f13c80ed12c426db9a49226bb420ed73c9bbfb95dce75188d4e87d"} err="failed to get container status \"9d12bea616f13c80ed12c426db9a49226bb420ed73c9bbfb95dce75188d4e87d\": rpc error: code = NotFound desc = could not find container \"9d12bea616f13c80ed12c426db9a49226bb420ed73c9bbfb95dce75188d4e87d\": container with ID starting with 9d12bea616f13c80ed12c426db9a49226bb420ed73c9bbfb95dce75188d4e87d not found: ID does not exist" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.756873 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.883647 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e856da6e-d00f-4b01-af9f-257c680f9882-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e856da6e-d00f-4b01-af9f-257c680f9882\") " pod="openstack/nova-api-0" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.883845 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e856da6e-d00f-4b01-af9f-257c680f9882-logs\") pod \"nova-api-0\" (UID: \"e856da6e-d00f-4b01-af9f-257c680f9882\") " pod="openstack/nova-api-0" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.883930 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e856da6e-d00f-4b01-af9f-257c680f9882-config-data\") pod \"nova-api-0\" (UID: \"e856da6e-d00f-4b01-af9f-257c680f9882\") " pod="openstack/nova-api-0" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.884039 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26gv8\" (UniqueName: \"kubernetes.io/projected/e856da6e-d00f-4b01-af9f-257c680f9882-kube-api-access-26gv8\") pod \"nova-api-0\" (UID: \"e856da6e-d00f-4b01-af9f-257c680f9882\") " pod="openstack/nova-api-0" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.884237 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e856da6e-d00f-4b01-af9f-257c680f9882-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e856da6e-d00f-4b01-af9f-257c680f9882\") " pod="openstack/nova-api-0" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.884341 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e856da6e-d00f-4b01-af9f-257c680f9882-public-tls-certs\") pod \"nova-api-0\" (UID: \"e856da6e-d00f-4b01-af9f-257c680f9882\") " pod="openstack/nova-api-0" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.985729 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e856da6e-d00f-4b01-af9f-257c680f9882-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e856da6e-d00f-4b01-af9f-257c680f9882\") " pod="openstack/nova-api-0" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.985814 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e856da6e-d00f-4b01-af9f-257c680f9882-logs\") pod \"nova-api-0\" (UID: \"e856da6e-d00f-4b01-af9f-257c680f9882\") " pod="openstack/nova-api-0" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.985839 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e856da6e-d00f-4b01-af9f-257c680f9882-config-data\") pod \"nova-api-0\" (UID: \"e856da6e-d00f-4b01-af9f-257c680f9882\") " pod="openstack/nova-api-0" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.985883 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26gv8\" (UniqueName: \"kubernetes.io/projected/e856da6e-d00f-4b01-af9f-257c680f9882-kube-api-access-26gv8\") pod \"nova-api-0\" (UID: \"e856da6e-d00f-4b01-af9f-257c680f9882\") " pod="openstack/nova-api-0" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.985905 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e856da6e-d00f-4b01-af9f-257c680f9882-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e856da6e-d00f-4b01-af9f-257c680f9882\") " pod="openstack/nova-api-0" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.985927 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e856da6e-d00f-4b01-af9f-257c680f9882-public-tls-certs\") pod \"nova-api-0\" (UID: \"e856da6e-d00f-4b01-af9f-257c680f9882\") " pod="openstack/nova-api-0" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.986166 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e856da6e-d00f-4b01-af9f-257c680f9882-logs\") pod \"nova-api-0\" (UID: \"e856da6e-d00f-4b01-af9f-257c680f9882\") " pod="openstack/nova-api-0" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.990203 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e856da6e-d00f-4b01-af9f-257c680f9882-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e856da6e-d00f-4b01-af9f-257c680f9882\") " pod="openstack/nova-api-0" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.990769 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e856da6e-d00f-4b01-af9f-257c680f9882-public-tls-certs\") pod \"nova-api-0\" (UID: \"e856da6e-d00f-4b01-af9f-257c680f9882\") " pod="openstack/nova-api-0" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.992330 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e856da6e-d00f-4b01-af9f-257c680f9882-config-data\") pod \"nova-api-0\" (UID: \"e856da6e-d00f-4b01-af9f-257c680f9882\") " pod="openstack/nova-api-0" Jan 22 07:24:21 crc kubenswrapper[4933]: I0122 07:24:21.993326 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e856da6e-d00f-4b01-af9f-257c680f9882-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e856da6e-d00f-4b01-af9f-257c680f9882\") " pod="openstack/nova-api-0" Jan 22 07:24:22 crc kubenswrapper[4933]: I0122 07:24:22.007799 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26gv8\" (UniqueName: \"kubernetes.io/projected/e856da6e-d00f-4b01-af9f-257c680f9882-kube-api-access-26gv8\") pod \"nova-api-0\" (UID: \"e856da6e-d00f-4b01-af9f-257c680f9882\") " pod="openstack/nova-api-0" Jan 22 07:24:22 crc kubenswrapper[4933]: I0122 07:24:22.070348 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 07:24:22 crc kubenswrapper[4933]: I0122 07:24:22.509017 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16dcf692-c29b-4015-b014-150746c0d254" path="/var/lib/kubelet/pods/16dcf692-c29b-4015-b014-150746c0d254/volumes" Jan 22 07:24:22 crc kubenswrapper[4933]: I0122 07:24:22.554377 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 07:24:22 crc kubenswrapper[4933]: I0122 07:24:22.668178 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e856da6e-d00f-4b01-af9f-257c680f9882","Type":"ContainerStarted","Data":"1b5e4e2280888d373901aa2cbce057d90d2ad6be424ce06fe986365d4e4216c5"} Jan 22 07:24:23 crc kubenswrapper[4933]: I0122 07:24:23.030131 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-8x2zf"] Jan 22 07:24:23 crc kubenswrapper[4933]: I0122 07:24:23.039918 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-8x2zf"] Jan 22 07:24:23 crc kubenswrapper[4933]: I0122 07:24:23.678663 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e856da6e-d00f-4b01-af9f-257c680f9882","Type":"ContainerStarted","Data":"d1d0e7f4c23649bb1d477598760012584242b47fdf310e31e64fea564b039d79"} Jan 22 07:24:23 crc kubenswrapper[4933]: I0122 07:24:23.678711 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e856da6e-d00f-4b01-af9f-257c680f9882","Type":"ContainerStarted","Data":"947c472502e7cc8c84d08e84491dc39c3ae45b327992232b220b14984ae037f0"} Jan 22 07:24:23 crc kubenswrapper[4933]: I0122 07:24:23.698216 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.698193437 podStartE2EDuration="2.698193437s" podCreationTimestamp="2026-01-22 07:24:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:24:23.694458456 +0000 UTC m=+5911.531583839" watchObservedRunningTime="2026-01-22 07:24:23.698193437 +0000 UTC m=+5911.535318800" Jan 22 07:24:24 crc kubenswrapper[4933]: I0122 07:24:24.501042 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95b8a659-2af3-4f21-b848-d395f968a0fe" path="/var/lib/kubelet/pods/95b8a659-2af3-4f21-b848-d395f968a0fe/volumes" Jan 22 07:24:25 crc kubenswrapper[4933]: I0122 07:24:25.713302 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:24:25 crc kubenswrapper[4933]: I0122 07:24:25.782208 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-65df48cf75-dnvbm"] Jan 22 07:24:25 crc kubenswrapper[4933]: I0122 07:24:25.782723 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" podUID="7d881258-6f0e-4c52-b036-3805dba06666" containerName="dnsmasq-dns" containerID="cri-o://104fccaaa80183fbe07fc7a867e73d76b87c4cc126c630ee2b3f7bd0a76b862a" gracePeriod=10 Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.267358 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.375555 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-dns-svc\") pod \"7d881258-6f0e-4c52-b036-3805dba06666\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.375754 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8p8pk\" (UniqueName: \"kubernetes.io/projected/7d881258-6f0e-4c52-b036-3805dba06666-kube-api-access-8p8pk\") pod \"7d881258-6f0e-4c52-b036-3805dba06666\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.376375 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-config\") pod \"7d881258-6f0e-4c52-b036-3805dba06666\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.376455 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-ovsdbserver-nb\") pod \"7d881258-6f0e-4c52-b036-3805dba06666\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.376495 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-ovsdbserver-sb\") pod \"7d881258-6f0e-4c52-b036-3805dba06666\" (UID: \"7d881258-6f0e-4c52-b036-3805dba06666\") " Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.380545 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d881258-6f0e-4c52-b036-3805dba06666-kube-api-access-8p8pk" (OuterVolumeSpecName: "kube-api-access-8p8pk") pod "7d881258-6f0e-4c52-b036-3805dba06666" (UID: "7d881258-6f0e-4c52-b036-3805dba06666"). InnerVolumeSpecName "kube-api-access-8p8pk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.428674 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7d881258-6f0e-4c52-b036-3805dba06666" (UID: "7d881258-6f0e-4c52-b036-3805dba06666"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.431572 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-config" (OuterVolumeSpecName: "config") pod "7d881258-6f0e-4c52-b036-3805dba06666" (UID: "7d881258-6f0e-4c52-b036-3805dba06666"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.433821 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7d881258-6f0e-4c52-b036-3805dba06666" (UID: "7d881258-6f0e-4c52-b036-3805dba06666"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.438151 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7d881258-6f0e-4c52-b036-3805dba06666" (UID: "7d881258-6f0e-4c52-b036-3805dba06666"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.480063 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.480161 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8p8pk\" (UniqueName: \"kubernetes.io/projected/7d881258-6f0e-4c52-b036-3805dba06666-kube-api-access-8p8pk\") on node \"crc\" DevicePath \"\"" Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.480178 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.480189 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.480199 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d881258-6f0e-4c52-b036-3805dba06666-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.708970 4933 generic.go:334] "Generic (PLEG): container finished" podID="7d881258-6f0e-4c52-b036-3805dba06666" containerID="104fccaaa80183fbe07fc7a867e73d76b87c4cc126c630ee2b3f7bd0a76b862a" exitCode=0 Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.709048 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" event={"ID":"7d881258-6f0e-4c52-b036-3805dba06666","Type":"ContainerDied","Data":"104fccaaa80183fbe07fc7a867e73d76b87c4cc126c630ee2b3f7bd0a76b862a"} Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.709286 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" event={"ID":"7d881258-6f0e-4c52-b036-3805dba06666","Type":"ContainerDied","Data":"15fd74bfa3dbcf91ecdbb6d28920684b42c85075a1726743a225359751a374ec"} Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.709059 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65df48cf75-dnvbm" Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.709312 4933 scope.go:117] "RemoveContainer" containerID="104fccaaa80183fbe07fc7a867e73d76b87c4cc126c630ee2b3f7bd0a76b862a" Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.737293 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-65df48cf75-dnvbm"] Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.745776 4933 scope.go:117] "RemoveContainer" containerID="60353b0cd3e4cc985bd382318ae6d7c2bb2c2ebf5d61a2e4d0619d8b7875e7a9" Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.747204 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-65df48cf75-dnvbm"] Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.772649 4933 scope.go:117] "RemoveContainer" containerID="104fccaaa80183fbe07fc7a867e73d76b87c4cc126c630ee2b3f7bd0a76b862a" Jan 22 07:24:26 crc kubenswrapper[4933]: E0122 07:24:26.773051 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"104fccaaa80183fbe07fc7a867e73d76b87c4cc126c630ee2b3f7bd0a76b862a\": container with ID starting with 104fccaaa80183fbe07fc7a867e73d76b87c4cc126c630ee2b3f7bd0a76b862a not found: ID does not exist" containerID="104fccaaa80183fbe07fc7a867e73d76b87c4cc126c630ee2b3f7bd0a76b862a" Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.773105 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"104fccaaa80183fbe07fc7a867e73d76b87c4cc126c630ee2b3f7bd0a76b862a"} err="failed to get container status \"104fccaaa80183fbe07fc7a867e73d76b87c4cc126c630ee2b3f7bd0a76b862a\": rpc error: code = NotFound desc = could not find container \"104fccaaa80183fbe07fc7a867e73d76b87c4cc126c630ee2b3f7bd0a76b862a\": container with ID starting with 104fccaaa80183fbe07fc7a867e73d76b87c4cc126c630ee2b3f7bd0a76b862a not found: ID does not exist" Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.773136 4933 scope.go:117] "RemoveContainer" containerID="60353b0cd3e4cc985bd382318ae6d7c2bb2c2ebf5d61a2e4d0619d8b7875e7a9" Jan 22 07:24:26 crc kubenswrapper[4933]: E0122 07:24:26.773517 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60353b0cd3e4cc985bd382318ae6d7c2bb2c2ebf5d61a2e4d0619d8b7875e7a9\": container with ID starting with 60353b0cd3e4cc985bd382318ae6d7c2bb2c2ebf5d61a2e4d0619d8b7875e7a9 not found: ID does not exist" containerID="60353b0cd3e4cc985bd382318ae6d7c2bb2c2ebf5d61a2e4d0619d8b7875e7a9" Jan 22 07:24:26 crc kubenswrapper[4933]: I0122 07:24:26.773543 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60353b0cd3e4cc985bd382318ae6d7c2bb2c2ebf5d61a2e4d0619d8b7875e7a9"} err="failed to get container status \"60353b0cd3e4cc985bd382318ae6d7c2bb2c2ebf5d61a2e4d0619d8b7875e7a9\": rpc error: code = NotFound desc = could not find container \"60353b0cd3e4cc985bd382318ae6d7c2bb2c2ebf5d61a2e4d0619d8b7875e7a9\": container with ID starting with 60353b0cd3e4cc985bd382318ae6d7c2bb2c2ebf5d61a2e4d0619d8b7875e7a9 not found: ID does not exist" Jan 22 07:24:28 crc kubenswrapper[4933]: I0122 07:24:28.502553 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d881258-6f0e-4c52-b036-3805dba06666" path="/var/lib/kubelet/pods/7d881258-6f0e-4c52-b036-3805dba06666/volumes" Jan 22 07:24:32 crc kubenswrapper[4933]: I0122 07:24:32.071535 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 07:24:32 crc kubenswrapper[4933]: I0122 07:24:32.071816 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 07:24:33 crc kubenswrapper[4933]: I0122 07:24:33.087278 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e856da6e-d00f-4b01-af9f-257c680f9882" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.97:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 07:24:33 crc kubenswrapper[4933]: I0122 07:24:33.087300 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e856da6e-d00f-4b01-af9f-257c680f9882" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.97:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 07:24:42 crc kubenswrapper[4933]: I0122 07:24:42.078688 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 22 07:24:42 crc kubenswrapper[4933]: I0122 07:24:42.080525 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 22 07:24:42 crc kubenswrapper[4933]: I0122 07:24:42.080567 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 22 07:24:42 crc kubenswrapper[4933]: I0122 07:24:42.087219 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 22 07:24:42 crc kubenswrapper[4933]: I0122 07:24:42.850981 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 22 07:24:42 crc kubenswrapper[4933]: I0122 07:24:42.856342 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 22 07:25:05 crc kubenswrapper[4933]: I0122 07:25:05.267805 4933 scope.go:117] "RemoveContainer" containerID="791ee139cdd01ee7264d040651dc10539788592ddc696d4bee66c94a8c05158c" Jan 22 07:25:05 crc kubenswrapper[4933]: I0122 07:25:05.300477 4933 scope.go:117] "RemoveContainer" containerID="e06943f28023d4bf16b8f3df6fd47a3c5569c55b4239064adbd7f0fb4beb0ab2" Jan 22 07:25:07 crc kubenswrapper[4933]: I0122 07:25:07.975679 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-qxtvn"] Jan 22 07:25:07 crc kubenswrapper[4933]: E0122 07:25:07.976332 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d881258-6f0e-4c52-b036-3805dba06666" containerName="dnsmasq-dns" Jan 22 07:25:07 crc kubenswrapper[4933]: I0122 07:25:07.976599 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d881258-6f0e-4c52-b036-3805dba06666" containerName="dnsmasq-dns" Jan 22 07:25:07 crc kubenswrapper[4933]: E0122 07:25:07.976633 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d881258-6f0e-4c52-b036-3805dba06666" containerName="init" Jan 22 07:25:07 crc kubenswrapper[4933]: I0122 07:25:07.976642 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d881258-6f0e-4c52-b036-3805dba06666" containerName="init" Jan 22 07:25:07 crc kubenswrapper[4933]: I0122 07:25:07.976807 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d881258-6f0e-4c52-b036-3805dba06666" containerName="dnsmasq-dns" Jan 22 07:25:07 crc kubenswrapper[4933]: I0122 07:25:07.977413 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:07 crc kubenswrapper[4933]: I0122 07:25:07.979006 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 22 07:25:07 crc kubenswrapper[4933]: I0122 07:25:07.981541 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 22 07:25:07 crc kubenswrapper[4933]: I0122 07:25:07.989479 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-mskzq" Jan 22 07:25:07 crc kubenswrapper[4933]: I0122 07:25:07.990874 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-qxtvn"] Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.013793 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/308f49ae-ef97-4833-b9b5-e3cef66d305f-ovn-controller-tls-certs\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.013846 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/308f49ae-ef97-4833-b9b5-e3cef66d305f-combined-ca-bundle\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.014240 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/308f49ae-ef97-4833-b9b5-e3cef66d305f-scripts\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.014345 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/308f49ae-ef97-4833-b9b5-e3cef66d305f-var-run-ovn\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.014444 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znxnh\" (UniqueName: \"kubernetes.io/projected/308f49ae-ef97-4833-b9b5-e3cef66d305f-kube-api-access-znxnh\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.014465 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/308f49ae-ef97-4833-b9b5-e3cef66d305f-var-run\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.014493 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/308f49ae-ef97-4833-b9b5-e3cef66d305f-var-log-ovn\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.031112 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-ctqtw"] Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.033095 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.043650 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-ctqtw"] Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.116557 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/71d2c844-a72f-49fa-97d9-1d9f236823c4-var-run\") pod \"ovn-controller-ovs-ctqtw\" (UID: \"71d2c844-a72f-49fa-97d9-1d9f236823c4\") " pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.116618 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/308f49ae-ef97-4833-b9b5-e3cef66d305f-var-run-ovn\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.116690 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znxnh\" (UniqueName: \"kubernetes.io/projected/308f49ae-ef97-4833-b9b5-e3cef66d305f-kube-api-access-znxnh\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.116712 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/308f49ae-ef97-4833-b9b5-e3cef66d305f-var-run\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.116734 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/308f49ae-ef97-4833-b9b5-e3cef66d305f-var-log-ovn\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.116792 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/308f49ae-ef97-4833-b9b5-e3cef66d305f-ovn-controller-tls-certs\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.116845 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/308f49ae-ef97-4833-b9b5-e3cef66d305f-combined-ca-bundle\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.116952 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/308f49ae-ef97-4833-b9b5-e3cef66d305f-var-run\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.116967 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/308f49ae-ef97-4833-b9b5-e3cef66d305f-var-run-ovn\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.117145 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/308f49ae-ef97-4833-b9b5-e3cef66d305f-var-log-ovn\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.117566 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/71d2c844-a72f-49fa-97d9-1d9f236823c4-scripts\") pod \"ovn-controller-ovs-ctqtw\" (UID: \"71d2c844-a72f-49fa-97d9-1d9f236823c4\") " pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.117605 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5npr\" (UniqueName: \"kubernetes.io/projected/71d2c844-a72f-49fa-97d9-1d9f236823c4-kube-api-access-f5npr\") pod \"ovn-controller-ovs-ctqtw\" (UID: \"71d2c844-a72f-49fa-97d9-1d9f236823c4\") " pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.117644 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/71d2c844-a72f-49fa-97d9-1d9f236823c4-var-log\") pod \"ovn-controller-ovs-ctqtw\" (UID: \"71d2c844-a72f-49fa-97d9-1d9f236823c4\") " pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.117663 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/71d2c844-a72f-49fa-97d9-1d9f236823c4-etc-ovs\") pod \"ovn-controller-ovs-ctqtw\" (UID: \"71d2c844-a72f-49fa-97d9-1d9f236823c4\") " pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.117693 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/71d2c844-a72f-49fa-97d9-1d9f236823c4-var-lib\") pod \"ovn-controller-ovs-ctqtw\" (UID: \"71d2c844-a72f-49fa-97d9-1d9f236823c4\") " pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.117776 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/308f49ae-ef97-4833-b9b5-e3cef66d305f-scripts\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.119954 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/308f49ae-ef97-4833-b9b5-e3cef66d305f-scripts\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.123707 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/308f49ae-ef97-4833-b9b5-e3cef66d305f-ovn-controller-tls-certs\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.132901 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/308f49ae-ef97-4833-b9b5-e3cef66d305f-combined-ca-bundle\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.137328 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znxnh\" (UniqueName: \"kubernetes.io/projected/308f49ae-ef97-4833-b9b5-e3cef66d305f-kube-api-access-znxnh\") pod \"ovn-controller-qxtvn\" (UID: \"308f49ae-ef97-4833-b9b5-e3cef66d305f\") " pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.219726 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/71d2c844-a72f-49fa-97d9-1d9f236823c4-var-run\") pod \"ovn-controller-ovs-ctqtw\" (UID: \"71d2c844-a72f-49fa-97d9-1d9f236823c4\") " pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.219865 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/71d2c844-a72f-49fa-97d9-1d9f236823c4-scripts\") pod \"ovn-controller-ovs-ctqtw\" (UID: \"71d2c844-a72f-49fa-97d9-1d9f236823c4\") " pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.219899 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5npr\" (UniqueName: \"kubernetes.io/projected/71d2c844-a72f-49fa-97d9-1d9f236823c4-kube-api-access-f5npr\") pod \"ovn-controller-ovs-ctqtw\" (UID: \"71d2c844-a72f-49fa-97d9-1d9f236823c4\") " pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.219943 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/71d2c844-a72f-49fa-97d9-1d9f236823c4-var-log\") pod \"ovn-controller-ovs-ctqtw\" (UID: \"71d2c844-a72f-49fa-97d9-1d9f236823c4\") " pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.219971 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/71d2c844-a72f-49fa-97d9-1d9f236823c4-etc-ovs\") pod \"ovn-controller-ovs-ctqtw\" (UID: \"71d2c844-a72f-49fa-97d9-1d9f236823c4\") " pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.220011 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/71d2c844-a72f-49fa-97d9-1d9f236823c4-var-lib\") pod \"ovn-controller-ovs-ctqtw\" (UID: \"71d2c844-a72f-49fa-97d9-1d9f236823c4\") " pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.220207 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/71d2c844-a72f-49fa-97d9-1d9f236823c4-var-lib\") pod \"ovn-controller-ovs-ctqtw\" (UID: \"71d2c844-a72f-49fa-97d9-1d9f236823c4\") " pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.220277 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/71d2c844-a72f-49fa-97d9-1d9f236823c4-var-run\") pod \"ovn-controller-ovs-ctqtw\" (UID: \"71d2c844-a72f-49fa-97d9-1d9f236823c4\") " pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.222474 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/71d2c844-a72f-49fa-97d9-1d9f236823c4-scripts\") pod \"ovn-controller-ovs-ctqtw\" (UID: \"71d2c844-a72f-49fa-97d9-1d9f236823c4\") " pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.222883 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/71d2c844-a72f-49fa-97d9-1d9f236823c4-var-log\") pod \"ovn-controller-ovs-ctqtw\" (UID: \"71d2c844-a72f-49fa-97d9-1d9f236823c4\") " pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.222954 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/71d2c844-a72f-49fa-97d9-1d9f236823c4-etc-ovs\") pod \"ovn-controller-ovs-ctqtw\" (UID: \"71d2c844-a72f-49fa-97d9-1d9f236823c4\") " pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.247116 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5npr\" (UniqueName: \"kubernetes.io/projected/71d2c844-a72f-49fa-97d9-1d9f236823c4-kube-api-access-f5npr\") pod \"ovn-controller-ovs-ctqtw\" (UID: \"71d2c844-a72f-49fa-97d9-1d9f236823c4\") " pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.302103 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.349505 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:08 crc kubenswrapper[4933]: I0122 07:25:08.799663 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-qxtvn"] Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.097193 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-qxtvn" event={"ID":"308f49ae-ef97-4833-b9b5-e3cef66d305f","Type":"ContainerStarted","Data":"2a9faf866a2aaf1b1aa37539a66064528fb1290f16eb4e055ec67779863e9661"} Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.221478 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-ctqtw"] Jan 22 07:25:09 crc kubenswrapper[4933]: W0122 07:25:09.224038 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod71d2c844_a72f_49fa_97d9_1d9f236823c4.slice/crio-02485885cff226e371855cd7fa33c4af019d5f1d9b34ebb421af26b841dd45ee WatchSource:0}: Error finding container 02485885cff226e371855cd7fa33c4af019d5f1d9b34ebb421af26b841dd45ee: Status 404 returned error can't find the container with id 02485885cff226e371855cd7fa33c4af019d5f1d9b34ebb421af26b841dd45ee Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.422423 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-5brn2"] Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.438754 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.441368 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.460247 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-5brn2"] Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.548665 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/03290ff4-6e07-4c4d-8e4d-33a5e1977c4f-ovs-rundir\") pod \"ovn-controller-metrics-5brn2\" (UID: \"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f\") " pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.548766 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03290ff4-6e07-4c4d-8e4d-33a5e1977c4f-config\") pod \"ovn-controller-metrics-5brn2\" (UID: \"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f\") " pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.551477 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03290ff4-6e07-4c4d-8e4d-33a5e1977c4f-combined-ca-bundle\") pod \"ovn-controller-metrics-5brn2\" (UID: \"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f\") " pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.551682 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7c5hj\" (UniqueName: \"kubernetes.io/projected/03290ff4-6e07-4c4d-8e4d-33a5e1977c4f-kube-api-access-7c5hj\") pod \"ovn-controller-metrics-5brn2\" (UID: \"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f\") " pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.551826 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/03290ff4-6e07-4c4d-8e4d-33a5e1977c4f-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-5brn2\" (UID: \"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f\") " pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.551868 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/03290ff4-6e07-4c4d-8e4d-33a5e1977c4f-ovn-rundir\") pod \"ovn-controller-metrics-5brn2\" (UID: \"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f\") " pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.653415 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03290ff4-6e07-4c4d-8e4d-33a5e1977c4f-combined-ca-bundle\") pod \"ovn-controller-metrics-5brn2\" (UID: \"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f\") " pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.654735 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7c5hj\" (UniqueName: \"kubernetes.io/projected/03290ff4-6e07-4c4d-8e4d-33a5e1977c4f-kube-api-access-7c5hj\") pod \"ovn-controller-metrics-5brn2\" (UID: \"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f\") " pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.655044 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/03290ff4-6e07-4c4d-8e4d-33a5e1977c4f-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-5brn2\" (UID: \"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f\") " pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.655415 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/03290ff4-6e07-4c4d-8e4d-33a5e1977c4f-ovn-rundir\") pod \"ovn-controller-metrics-5brn2\" (UID: \"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f\") " pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.655604 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/03290ff4-6e07-4c4d-8e4d-33a5e1977c4f-ovs-rundir\") pod \"ovn-controller-metrics-5brn2\" (UID: \"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f\") " pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.655704 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03290ff4-6e07-4c4d-8e4d-33a5e1977c4f-config\") pod \"ovn-controller-metrics-5brn2\" (UID: \"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f\") " pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.655715 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/03290ff4-6e07-4c4d-8e4d-33a5e1977c4f-ovs-rundir\") pod \"ovn-controller-metrics-5brn2\" (UID: \"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f\") " pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.655721 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/03290ff4-6e07-4c4d-8e4d-33a5e1977c4f-ovn-rundir\") pod \"ovn-controller-metrics-5brn2\" (UID: \"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f\") " pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.656381 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03290ff4-6e07-4c4d-8e4d-33a5e1977c4f-config\") pod \"ovn-controller-metrics-5brn2\" (UID: \"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f\") " pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.659672 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03290ff4-6e07-4c4d-8e4d-33a5e1977c4f-combined-ca-bundle\") pod \"ovn-controller-metrics-5brn2\" (UID: \"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f\") " pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.660236 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/03290ff4-6e07-4c4d-8e4d-33a5e1977c4f-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-5brn2\" (UID: \"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f\") " pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.670462 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7c5hj\" (UniqueName: \"kubernetes.io/projected/03290ff4-6e07-4c4d-8e4d-33a5e1977c4f-kube-api-access-7c5hj\") pod \"ovn-controller-metrics-5brn2\" (UID: \"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f\") " pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.761882 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-5brn2" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.767545 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-create-97zkc"] Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.768929 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-97zkc" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.780056 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-97zkc"] Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.858863 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4mk6\" (UniqueName: \"kubernetes.io/projected/6eb108c3-9c44-480e-ac8f-b0c0c74db54f-kube-api-access-d4mk6\") pod \"octavia-db-create-97zkc\" (UID: \"6eb108c3-9c44-480e-ac8f-b0c0c74db54f\") " pod="openstack/octavia-db-create-97zkc" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.858921 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6eb108c3-9c44-480e-ac8f-b0c0c74db54f-operator-scripts\") pod \"octavia-db-create-97zkc\" (UID: \"6eb108c3-9c44-480e-ac8f-b0c0c74db54f\") " pod="openstack/octavia-db-create-97zkc" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.960947 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4mk6\" (UniqueName: \"kubernetes.io/projected/6eb108c3-9c44-480e-ac8f-b0c0c74db54f-kube-api-access-d4mk6\") pod \"octavia-db-create-97zkc\" (UID: \"6eb108c3-9c44-480e-ac8f-b0c0c74db54f\") " pod="openstack/octavia-db-create-97zkc" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.961344 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6eb108c3-9c44-480e-ac8f-b0c0c74db54f-operator-scripts\") pod \"octavia-db-create-97zkc\" (UID: \"6eb108c3-9c44-480e-ac8f-b0c0c74db54f\") " pod="openstack/octavia-db-create-97zkc" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.962386 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6eb108c3-9c44-480e-ac8f-b0c0c74db54f-operator-scripts\") pod \"octavia-db-create-97zkc\" (UID: \"6eb108c3-9c44-480e-ac8f-b0c0c74db54f\") " pod="openstack/octavia-db-create-97zkc" Jan 22 07:25:09 crc kubenswrapper[4933]: I0122 07:25:09.977090 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4mk6\" (UniqueName: \"kubernetes.io/projected/6eb108c3-9c44-480e-ac8f-b0c0c74db54f-kube-api-access-d4mk6\") pod \"octavia-db-create-97zkc\" (UID: \"6eb108c3-9c44-480e-ac8f-b0c0c74db54f\") " pod="openstack/octavia-db-create-97zkc" Jan 22 07:25:10 crc kubenswrapper[4933]: I0122 07:25:10.105929 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-97zkc" Jan 22 07:25:10 crc kubenswrapper[4933]: I0122 07:25:10.111561 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-qxtvn" event={"ID":"308f49ae-ef97-4833-b9b5-e3cef66d305f","Type":"ContainerStarted","Data":"3e16f49704294832cdf1029100224e5c150722fb727ec0174399817025f4474e"} Jan 22 07:25:10 crc kubenswrapper[4933]: I0122 07:25:10.112957 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:10 crc kubenswrapper[4933]: I0122 07:25:10.116177 4933 generic.go:334] "Generic (PLEG): container finished" podID="71d2c844-a72f-49fa-97d9-1d9f236823c4" containerID="f09b58f8dcbab57ac75267d437e0995c250d3ba3343a28cf8c605eaab3c85194" exitCode=0 Jan 22 07:25:10 crc kubenswrapper[4933]: I0122 07:25:10.116214 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-ctqtw" event={"ID":"71d2c844-a72f-49fa-97d9-1d9f236823c4","Type":"ContainerDied","Data":"f09b58f8dcbab57ac75267d437e0995c250d3ba3343a28cf8c605eaab3c85194"} Jan 22 07:25:10 crc kubenswrapper[4933]: I0122 07:25:10.116238 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-ctqtw" event={"ID":"71d2c844-a72f-49fa-97d9-1d9f236823c4","Type":"ContainerStarted","Data":"02485885cff226e371855cd7fa33c4af019d5f1d9b34ebb421af26b841dd45ee"} Jan 22 07:25:10 crc kubenswrapper[4933]: I0122 07:25:10.129422 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-qxtvn" podStartSLOduration=3.129397195 podStartE2EDuration="3.129397195s" podCreationTimestamp="2026-01-22 07:25:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:25:10.128361239 +0000 UTC m=+5957.965486602" watchObservedRunningTime="2026-01-22 07:25:10.129397195 +0000 UTC m=+5957.966522548" Jan 22 07:25:10 crc kubenswrapper[4933]: I0122 07:25:10.269358 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-5brn2"] Jan 22 07:25:10 crc kubenswrapper[4933]: I0122 07:25:10.624445 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-97zkc"] Jan 22 07:25:10 crc kubenswrapper[4933]: I0122 07:25:10.942959 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:25:10 crc kubenswrapper[4933]: I0122 07:25:10.943405 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.132202 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-40d7-account-create-update-t4rsr"] Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.133449 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-40d7-account-create-update-t4rsr" Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.134414 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-ctqtw" event={"ID":"71d2c844-a72f-49fa-97d9-1d9f236823c4","Type":"ContainerStarted","Data":"f791fb41c34c16a0c775220e90fea9c1918a3013466f2d5ef5d74b67ba39a0d5"} Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.134440 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-ctqtw" event={"ID":"71d2c844-a72f-49fa-97d9-1d9f236823c4","Type":"ContainerStarted","Data":"acf854537c9e53aa83c2d424aa33095061d7496e20bbb0e22e4777e487acedc5"} Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.134471 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.134568 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.135432 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-db-secret" Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.137153 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-5brn2" event={"ID":"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f","Type":"ContainerStarted","Data":"105d8dd0d3659fb921153bd9fb8090bb845059ccbfef2f1162be7979c9717d8b"} Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.137188 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-5brn2" event={"ID":"03290ff4-6e07-4c4d-8e4d-33a5e1977c4f","Type":"ContainerStarted","Data":"3baa39c0afbde3661cd895c0ac868c1e9bb46bbdd585fb596437c8a237346313"} Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.139484 4933 generic.go:334] "Generic (PLEG): container finished" podID="6eb108c3-9c44-480e-ac8f-b0c0c74db54f" containerID="a2454353ef91e4c783dbe25c6ad27150573e5b6da3e01fca7e60831c997ea4f2" exitCode=0 Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.139598 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-97zkc" event={"ID":"6eb108c3-9c44-480e-ac8f-b0c0c74db54f","Type":"ContainerDied","Data":"a2454353ef91e4c783dbe25c6ad27150573e5b6da3e01fca7e60831c997ea4f2"} Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.139669 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-97zkc" event={"ID":"6eb108c3-9c44-480e-ac8f-b0c0c74db54f","Type":"ContainerStarted","Data":"c834cf43e7a7e1d6483dbfc5d16fbfb270623ad3b19865343e10c4166769d6c7"} Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.148440 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-40d7-account-create-update-t4rsr"] Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.182642 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-ctqtw" podStartSLOduration=3.182620299 podStartE2EDuration="3.182620299s" podCreationTimestamp="2026-01-22 07:25:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:25:11.178539261 +0000 UTC m=+5959.015664644" watchObservedRunningTime="2026-01-22 07:25:11.182620299 +0000 UTC m=+5959.019745652" Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.194051 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/58379571-6fee-4de8-8919-293ea15e8c07-operator-scripts\") pod \"octavia-40d7-account-create-update-t4rsr\" (UID: \"58379571-6fee-4de8-8919-293ea15e8c07\") " pod="openstack/octavia-40d7-account-create-update-t4rsr" Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.197245 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skhfb\" (UniqueName: \"kubernetes.io/projected/58379571-6fee-4de8-8919-293ea15e8c07-kube-api-access-skhfb\") pod \"octavia-40d7-account-create-update-t4rsr\" (UID: \"58379571-6fee-4de8-8919-293ea15e8c07\") " pod="openstack/octavia-40d7-account-create-update-t4rsr" Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.206230 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-5brn2" podStartSLOduration=2.206214753 podStartE2EDuration="2.206214753s" podCreationTimestamp="2026-01-22 07:25:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:25:11.192568252 +0000 UTC m=+5959.029693605" watchObservedRunningTime="2026-01-22 07:25:11.206214753 +0000 UTC m=+5959.043340106" Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.299609 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skhfb\" (UniqueName: \"kubernetes.io/projected/58379571-6fee-4de8-8919-293ea15e8c07-kube-api-access-skhfb\") pod \"octavia-40d7-account-create-update-t4rsr\" (UID: \"58379571-6fee-4de8-8919-293ea15e8c07\") " pod="openstack/octavia-40d7-account-create-update-t4rsr" Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.299754 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/58379571-6fee-4de8-8919-293ea15e8c07-operator-scripts\") pod \"octavia-40d7-account-create-update-t4rsr\" (UID: \"58379571-6fee-4de8-8919-293ea15e8c07\") " pod="openstack/octavia-40d7-account-create-update-t4rsr" Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.300617 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/58379571-6fee-4de8-8919-293ea15e8c07-operator-scripts\") pod \"octavia-40d7-account-create-update-t4rsr\" (UID: \"58379571-6fee-4de8-8919-293ea15e8c07\") " pod="openstack/octavia-40d7-account-create-update-t4rsr" Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.333737 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skhfb\" (UniqueName: \"kubernetes.io/projected/58379571-6fee-4de8-8919-293ea15e8c07-kube-api-access-skhfb\") pod \"octavia-40d7-account-create-update-t4rsr\" (UID: \"58379571-6fee-4de8-8919-293ea15e8c07\") " pod="openstack/octavia-40d7-account-create-update-t4rsr" Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.501285 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-40d7-account-create-update-t4rsr" Jan 22 07:25:11 crc kubenswrapper[4933]: I0122 07:25:11.962456 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-40d7-account-create-update-t4rsr"] Jan 22 07:25:12 crc kubenswrapper[4933]: I0122 07:25:12.153339 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-40d7-account-create-update-t4rsr" event={"ID":"58379571-6fee-4de8-8919-293ea15e8c07","Type":"ContainerStarted","Data":"c7afe3ef93cd30c177f607ca171a697672305c1ef9790c6d497ae3758424e87c"} Jan 22 07:25:12 crc kubenswrapper[4933]: I0122 07:25:12.153665 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-40d7-account-create-update-t4rsr" event={"ID":"58379571-6fee-4de8-8919-293ea15e8c07","Type":"ContainerStarted","Data":"0f2eec27c99e0060d542f3f23c09a41508e4189e4a358e3e51b66e545943f744"} Jan 22 07:25:12 crc kubenswrapper[4933]: I0122 07:25:12.180973 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-40d7-account-create-update-t4rsr" podStartSLOduration=1.180946188 podStartE2EDuration="1.180946188s" podCreationTimestamp="2026-01-22 07:25:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:25:12.166381234 +0000 UTC m=+5960.003506597" watchObservedRunningTime="2026-01-22 07:25:12.180946188 +0000 UTC m=+5960.018071561" Jan 22 07:25:12 crc kubenswrapper[4933]: I0122 07:25:12.450050 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-97zkc" Jan 22 07:25:12 crc kubenswrapper[4933]: I0122 07:25:12.627286 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4mk6\" (UniqueName: \"kubernetes.io/projected/6eb108c3-9c44-480e-ac8f-b0c0c74db54f-kube-api-access-d4mk6\") pod \"6eb108c3-9c44-480e-ac8f-b0c0c74db54f\" (UID: \"6eb108c3-9c44-480e-ac8f-b0c0c74db54f\") " Jan 22 07:25:12 crc kubenswrapper[4933]: I0122 07:25:12.628104 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6eb108c3-9c44-480e-ac8f-b0c0c74db54f-operator-scripts\") pod \"6eb108c3-9c44-480e-ac8f-b0c0c74db54f\" (UID: \"6eb108c3-9c44-480e-ac8f-b0c0c74db54f\") " Jan 22 07:25:12 crc kubenswrapper[4933]: I0122 07:25:12.628242 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6eb108c3-9c44-480e-ac8f-b0c0c74db54f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6eb108c3-9c44-480e-ac8f-b0c0c74db54f" (UID: "6eb108c3-9c44-480e-ac8f-b0c0c74db54f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:25:12 crc kubenswrapper[4933]: I0122 07:25:12.650680 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6eb108c3-9c44-480e-ac8f-b0c0c74db54f-kube-api-access-d4mk6" (OuterVolumeSpecName: "kube-api-access-d4mk6") pod "6eb108c3-9c44-480e-ac8f-b0c0c74db54f" (UID: "6eb108c3-9c44-480e-ac8f-b0c0c74db54f"). InnerVolumeSpecName "kube-api-access-d4mk6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:25:12 crc kubenswrapper[4933]: I0122 07:25:12.731451 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4mk6\" (UniqueName: \"kubernetes.io/projected/6eb108c3-9c44-480e-ac8f-b0c0c74db54f-kube-api-access-d4mk6\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:12 crc kubenswrapper[4933]: I0122 07:25:12.731493 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6eb108c3-9c44-480e-ac8f-b0c0c74db54f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:13 crc kubenswrapper[4933]: I0122 07:25:13.166505 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-97zkc" event={"ID":"6eb108c3-9c44-480e-ac8f-b0c0c74db54f","Type":"ContainerDied","Data":"c834cf43e7a7e1d6483dbfc5d16fbfb270623ad3b19865343e10c4166769d6c7"} Jan 22 07:25:13 crc kubenswrapper[4933]: I0122 07:25:13.166558 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c834cf43e7a7e1d6483dbfc5d16fbfb270623ad3b19865343e10c4166769d6c7" Jan 22 07:25:13 crc kubenswrapper[4933]: I0122 07:25:13.166518 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-97zkc" Jan 22 07:25:13 crc kubenswrapper[4933]: I0122 07:25:13.170049 4933 generic.go:334] "Generic (PLEG): container finished" podID="58379571-6fee-4de8-8919-293ea15e8c07" containerID="c7afe3ef93cd30c177f607ca171a697672305c1ef9790c6d497ae3758424e87c" exitCode=0 Jan 22 07:25:13 crc kubenswrapper[4933]: I0122 07:25:13.170159 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-40d7-account-create-update-t4rsr" event={"ID":"58379571-6fee-4de8-8919-293ea15e8c07","Type":"ContainerDied","Data":"c7afe3ef93cd30c177f607ca171a697672305c1ef9790c6d497ae3758424e87c"} Jan 22 07:25:14 crc kubenswrapper[4933]: I0122 07:25:14.555176 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-40d7-account-create-update-t4rsr" Jan 22 07:25:14 crc kubenswrapper[4933]: I0122 07:25:14.669830 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skhfb\" (UniqueName: \"kubernetes.io/projected/58379571-6fee-4de8-8919-293ea15e8c07-kube-api-access-skhfb\") pod \"58379571-6fee-4de8-8919-293ea15e8c07\" (UID: \"58379571-6fee-4de8-8919-293ea15e8c07\") " Jan 22 07:25:14 crc kubenswrapper[4933]: I0122 07:25:14.669912 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/58379571-6fee-4de8-8919-293ea15e8c07-operator-scripts\") pod \"58379571-6fee-4de8-8919-293ea15e8c07\" (UID: \"58379571-6fee-4de8-8919-293ea15e8c07\") " Jan 22 07:25:14 crc kubenswrapper[4933]: I0122 07:25:14.670571 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58379571-6fee-4de8-8919-293ea15e8c07-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "58379571-6fee-4de8-8919-293ea15e8c07" (UID: "58379571-6fee-4de8-8919-293ea15e8c07"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:25:14 crc kubenswrapper[4933]: I0122 07:25:14.671916 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/58379571-6fee-4de8-8919-293ea15e8c07-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:14 crc kubenswrapper[4933]: I0122 07:25:14.676584 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58379571-6fee-4de8-8919-293ea15e8c07-kube-api-access-skhfb" (OuterVolumeSpecName: "kube-api-access-skhfb") pod "58379571-6fee-4de8-8919-293ea15e8c07" (UID: "58379571-6fee-4de8-8919-293ea15e8c07"). InnerVolumeSpecName "kube-api-access-skhfb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:25:14 crc kubenswrapper[4933]: I0122 07:25:14.773693 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skhfb\" (UniqueName: \"kubernetes.io/projected/58379571-6fee-4de8-8919-293ea15e8c07-kube-api-access-skhfb\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:15 crc kubenswrapper[4933]: I0122 07:25:15.197770 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-40d7-account-create-update-t4rsr" event={"ID":"58379571-6fee-4de8-8919-293ea15e8c07","Type":"ContainerDied","Data":"0f2eec27c99e0060d542f3f23c09a41508e4189e4a358e3e51b66e545943f744"} Jan 22 07:25:15 crc kubenswrapper[4933]: I0122 07:25:15.197818 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f2eec27c99e0060d542f3f23c09a41508e4189e4a358e3e51b66e545943f744" Jan 22 07:25:15 crc kubenswrapper[4933]: I0122 07:25:15.197892 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-40d7-account-create-update-t4rsr" Jan 22 07:25:16 crc kubenswrapper[4933]: I0122 07:25:16.349906 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-persistence-db-create-tcfnc"] Jan 22 07:25:16 crc kubenswrapper[4933]: E0122 07:25:16.350634 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6eb108c3-9c44-480e-ac8f-b0c0c74db54f" containerName="mariadb-database-create" Jan 22 07:25:16 crc kubenswrapper[4933]: I0122 07:25:16.350650 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6eb108c3-9c44-480e-ac8f-b0c0c74db54f" containerName="mariadb-database-create" Jan 22 07:25:16 crc kubenswrapper[4933]: E0122 07:25:16.350686 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58379571-6fee-4de8-8919-293ea15e8c07" containerName="mariadb-account-create-update" Jan 22 07:25:16 crc kubenswrapper[4933]: I0122 07:25:16.350694 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="58379571-6fee-4de8-8919-293ea15e8c07" containerName="mariadb-account-create-update" Jan 22 07:25:16 crc kubenswrapper[4933]: I0122 07:25:16.350926 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="58379571-6fee-4de8-8919-293ea15e8c07" containerName="mariadb-account-create-update" Jan 22 07:25:16 crc kubenswrapper[4933]: I0122 07:25:16.350944 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6eb108c3-9c44-480e-ac8f-b0c0c74db54f" containerName="mariadb-database-create" Jan 22 07:25:16 crc kubenswrapper[4933]: I0122 07:25:16.351804 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-tcfnc" Jan 22 07:25:16 crc kubenswrapper[4933]: I0122 07:25:16.358667 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-tcfnc"] Jan 22 07:25:16 crc kubenswrapper[4933]: I0122 07:25:16.401931 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/04576677-4607-48ab-b786-a763b254ad3f-operator-scripts\") pod \"octavia-persistence-db-create-tcfnc\" (UID: \"04576677-4607-48ab-b786-a763b254ad3f\") " pod="openstack/octavia-persistence-db-create-tcfnc" Jan 22 07:25:16 crc kubenswrapper[4933]: I0122 07:25:16.402018 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ssz9\" (UniqueName: \"kubernetes.io/projected/04576677-4607-48ab-b786-a763b254ad3f-kube-api-access-8ssz9\") pod \"octavia-persistence-db-create-tcfnc\" (UID: \"04576677-4607-48ab-b786-a763b254ad3f\") " pod="openstack/octavia-persistence-db-create-tcfnc" Jan 22 07:25:16 crc kubenswrapper[4933]: I0122 07:25:16.503832 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/04576677-4607-48ab-b786-a763b254ad3f-operator-scripts\") pod \"octavia-persistence-db-create-tcfnc\" (UID: \"04576677-4607-48ab-b786-a763b254ad3f\") " pod="openstack/octavia-persistence-db-create-tcfnc" Jan 22 07:25:16 crc kubenswrapper[4933]: I0122 07:25:16.503909 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ssz9\" (UniqueName: \"kubernetes.io/projected/04576677-4607-48ab-b786-a763b254ad3f-kube-api-access-8ssz9\") pod \"octavia-persistence-db-create-tcfnc\" (UID: \"04576677-4607-48ab-b786-a763b254ad3f\") " pod="openstack/octavia-persistence-db-create-tcfnc" Jan 22 07:25:16 crc kubenswrapper[4933]: I0122 07:25:16.504729 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/04576677-4607-48ab-b786-a763b254ad3f-operator-scripts\") pod \"octavia-persistence-db-create-tcfnc\" (UID: \"04576677-4607-48ab-b786-a763b254ad3f\") " pod="openstack/octavia-persistence-db-create-tcfnc" Jan 22 07:25:16 crc kubenswrapper[4933]: I0122 07:25:16.521740 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ssz9\" (UniqueName: \"kubernetes.io/projected/04576677-4607-48ab-b786-a763b254ad3f-kube-api-access-8ssz9\") pod \"octavia-persistence-db-create-tcfnc\" (UID: \"04576677-4607-48ab-b786-a763b254ad3f\") " pod="openstack/octavia-persistence-db-create-tcfnc" Jan 22 07:25:16 crc kubenswrapper[4933]: I0122 07:25:16.668246 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-tcfnc" Jan 22 07:25:17 crc kubenswrapper[4933]: I0122 07:25:17.107189 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-tcfnc"] Jan 22 07:25:17 crc kubenswrapper[4933]: I0122 07:25:17.213609 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-tcfnc" event={"ID":"04576677-4607-48ab-b786-a763b254ad3f","Type":"ContainerStarted","Data":"5e06cddaf0dce044cc8cad30393a6aeebbedad0bfc3dc91e8934cb4108531bce"} Jan 22 07:25:17 crc kubenswrapper[4933]: I0122 07:25:17.590508 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-c826-account-create-update-2pjwm"] Jan 22 07:25:17 crc kubenswrapper[4933]: I0122 07:25:17.591661 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-c826-account-create-update-2pjwm" Jan 22 07:25:17 crc kubenswrapper[4933]: I0122 07:25:17.594114 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-persistence-db-secret" Jan 22 07:25:17 crc kubenswrapper[4933]: I0122 07:25:17.611725 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-c826-account-create-update-2pjwm"] Jan 22 07:25:17 crc kubenswrapper[4933]: I0122 07:25:17.727253 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8c8cb03-9525-41d1-b307-badc66ef535f-operator-scripts\") pod \"octavia-c826-account-create-update-2pjwm\" (UID: \"a8c8cb03-9525-41d1-b307-badc66ef535f\") " pod="openstack/octavia-c826-account-create-update-2pjwm" Jan 22 07:25:17 crc kubenswrapper[4933]: I0122 07:25:17.727407 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b857z\" (UniqueName: \"kubernetes.io/projected/a8c8cb03-9525-41d1-b307-badc66ef535f-kube-api-access-b857z\") pod \"octavia-c826-account-create-update-2pjwm\" (UID: \"a8c8cb03-9525-41d1-b307-badc66ef535f\") " pod="openstack/octavia-c826-account-create-update-2pjwm" Jan 22 07:25:17 crc kubenswrapper[4933]: I0122 07:25:17.828751 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8c8cb03-9525-41d1-b307-badc66ef535f-operator-scripts\") pod \"octavia-c826-account-create-update-2pjwm\" (UID: \"a8c8cb03-9525-41d1-b307-badc66ef535f\") " pod="openstack/octavia-c826-account-create-update-2pjwm" Jan 22 07:25:17 crc kubenswrapper[4933]: I0122 07:25:17.828850 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b857z\" (UniqueName: \"kubernetes.io/projected/a8c8cb03-9525-41d1-b307-badc66ef535f-kube-api-access-b857z\") pod \"octavia-c826-account-create-update-2pjwm\" (UID: \"a8c8cb03-9525-41d1-b307-badc66ef535f\") " pod="openstack/octavia-c826-account-create-update-2pjwm" Jan 22 07:25:17 crc kubenswrapper[4933]: I0122 07:25:17.829603 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8c8cb03-9525-41d1-b307-badc66ef535f-operator-scripts\") pod \"octavia-c826-account-create-update-2pjwm\" (UID: \"a8c8cb03-9525-41d1-b307-badc66ef535f\") " pod="openstack/octavia-c826-account-create-update-2pjwm" Jan 22 07:25:17 crc kubenswrapper[4933]: I0122 07:25:17.848375 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b857z\" (UniqueName: \"kubernetes.io/projected/a8c8cb03-9525-41d1-b307-badc66ef535f-kube-api-access-b857z\") pod \"octavia-c826-account-create-update-2pjwm\" (UID: \"a8c8cb03-9525-41d1-b307-badc66ef535f\") " pod="openstack/octavia-c826-account-create-update-2pjwm" Jan 22 07:25:17 crc kubenswrapper[4933]: I0122 07:25:17.905634 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-c826-account-create-update-2pjwm" Jan 22 07:25:18 crc kubenswrapper[4933]: I0122 07:25:18.231647 4933 generic.go:334] "Generic (PLEG): container finished" podID="04576677-4607-48ab-b786-a763b254ad3f" containerID="3dc4adca8bbf2e3374f739695c2a6d83deddf74227851407e568e2f124b0ee35" exitCode=0 Jan 22 07:25:18 crc kubenswrapper[4933]: I0122 07:25:18.231745 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-tcfnc" event={"ID":"04576677-4607-48ab-b786-a763b254ad3f","Type":"ContainerDied","Data":"3dc4adca8bbf2e3374f739695c2a6d83deddf74227851407e568e2f124b0ee35"} Jan 22 07:25:18 crc kubenswrapper[4933]: I0122 07:25:18.525656 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-c826-account-create-update-2pjwm"] Jan 22 07:25:18 crc kubenswrapper[4933]: W0122 07:25:18.525929 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda8c8cb03_9525_41d1_b307_badc66ef535f.slice/crio-2e1e20addad052dc46985ce684c9c47dd235bc8385a43aa8e29d8a2e867be70e WatchSource:0}: Error finding container 2e1e20addad052dc46985ce684c9c47dd235bc8385a43aa8e29d8a2e867be70e: Status 404 returned error can't find the container with id 2e1e20addad052dc46985ce684c9c47dd235bc8385a43aa8e29d8a2e867be70e Jan 22 07:25:19 crc kubenswrapper[4933]: I0122 07:25:19.242233 4933 generic.go:334] "Generic (PLEG): container finished" podID="a8c8cb03-9525-41d1-b307-badc66ef535f" containerID="2136f47d770777e539728e6e53388ee952d2762f387317a7e77f3d0c5f424f60" exitCode=0 Jan 22 07:25:19 crc kubenswrapper[4933]: I0122 07:25:19.242300 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-c826-account-create-update-2pjwm" event={"ID":"a8c8cb03-9525-41d1-b307-badc66ef535f","Type":"ContainerDied","Data":"2136f47d770777e539728e6e53388ee952d2762f387317a7e77f3d0c5f424f60"} Jan 22 07:25:19 crc kubenswrapper[4933]: I0122 07:25:19.242578 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-c826-account-create-update-2pjwm" event={"ID":"a8c8cb03-9525-41d1-b307-badc66ef535f","Type":"ContainerStarted","Data":"2e1e20addad052dc46985ce684c9c47dd235bc8385a43aa8e29d8a2e867be70e"} Jan 22 07:25:19 crc kubenswrapper[4933]: I0122 07:25:19.605173 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-tcfnc" Jan 22 07:25:19 crc kubenswrapper[4933]: I0122 07:25:19.761457 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/04576677-4607-48ab-b786-a763b254ad3f-operator-scripts\") pod \"04576677-4607-48ab-b786-a763b254ad3f\" (UID: \"04576677-4607-48ab-b786-a763b254ad3f\") " Jan 22 07:25:19 crc kubenswrapper[4933]: I0122 07:25:19.761620 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ssz9\" (UniqueName: \"kubernetes.io/projected/04576677-4607-48ab-b786-a763b254ad3f-kube-api-access-8ssz9\") pod \"04576677-4607-48ab-b786-a763b254ad3f\" (UID: \"04576677-4607-48ab-b786-a763b254ad3f\") " Jan 22 07:25:19 crc kubenswrapper[4933]: I0122 07:25:19.762173 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04576677-4607-48ab-b786-a763b254ad3f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "04576677-4607-48ab-b786-a763b254ad3f" (UID: "04576677-4607-48ab-b786-a763b254ad3f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:25:19 crc kubenswrapper[4933]: I0122 07:25:19.766559 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04576677-4607-48ab-b786-a763b254ad3f-kube-api-access-8ssz9" (OuterVolumeSpecName: "kube-api-access-8ssz9") pod "04576677-4607-48ab-b786-a763b254ad3f" (UID: "04576677-4607-48ab-b786-a763b254ad3f"). InnerVolumeSpecName "kube-api-access-8ssz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:25:19 crc kubenswrapper[4933]: I0122 07:25:19.863966 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ssz9\" (UniqueName: \"kubernetes.io/projected/04576677-4607-48ab-b786-a763b254ad3f-kube-api-access-8ssz9\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:19 crc kubenswrapper[4933]: I0122 07:25:19.864286 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/04576677-4607-48ab-b786-a763b254ad3f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:20 crc kubenswrapper[4933]: I0122 07:25:20.263259 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-tcfnc" Jan 22 07:25:20 crc kubenswrapper[4933]: I0122 07:25:20.263306 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-tcfnc" event={"ID":"04576677-4607-48ab-b786-a763b254ad3f","Type":"ContainerDied","Data":"5e06cddaf0dce044cc8cad30393a6aeebbedad0bfc3dc91e8934cb4108531bce"} Jan 22 07:25:20 crc kubenswrapper[4933]: I0122 07:25:20.263333 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5e06cddaf0dce044cc8cad30393a6aeebbedad0bfc3dc91e8934cb4108531bce" Jan 22 07:25:20 crc kubenswrapper[4933]: I0122 07:25:20.604391 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-c826-account-create-update-2pjwm" Jan 22 07:25:20 crc kubenswrapper[4933]: I0122 07:25:20.782238 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8c8cb03-9525-41d1-b307-badc66ef535f-operator-scripts\") pod \"a8c8cb03-9525-41d1-b307-badc66ef535f\" (UID: \"a8c8cb03-9525-41d1-b307-badc66ef535f\") " Jan 22 07:25:20 crc kubenswrapper[4933]: I0122 07:25:20.782300 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b857z\" (UniqueName: \"kubernetes.io/projected/a8c8cb03-9525-41d1-b307-badc66ef535f-kube-api-access-b857z\") pod \"a8c8cb03-9525-41d1-b307-badc66ef535f\" (UID: \"a8c8cb03-9525-41d1-b307-badc66ef535f\") " Jan 22 07:25:20 crc kubenswrapper[4933]: I0122 07:25:20.782672 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8c8cb03-9525-41d1-b307-badc66ef535f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a8c8cb03-9525-41d1-b307-badc66ef535f" (UID: "a8c8cb03-9525-41d1-b307-badc66ef535f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:25:20 crc kubenswrapper[4933]: I0122 07:25:20.792399 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8c8cb03-9525-41d1-b307-badc66ef535f-kube-api-access-b857z" (OuterVolumeSpecName: "kube-api-access-b857z") pod "a8c8cb03-9525-41d1-b307-badc66ef535f" (UID: "a8c8cb03-9525-41d1-b307-badc66ef535f"). InnerVolumeSpecName "kube-api-access-b857z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:25:20 crc kubenswrapper[4933]: I0122 07:25:20.884089 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8c8cb03-9525-41d1-b307-badc66ef535f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:20 crc kubenswrapper[4933]: I0122 07:25:20.884128 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b857z\" (UniqueName: \"kubernetes.io/projected/a8c8cb03-9525-41d1-b307-badc66ef535f-kube-api-access-b857z\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:21 crc kubenswrapper[4933]: I0122 07:25:21.276662 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-c826-account-create-update-2pjwm" event={"ID":"a8c8cb03-9525-41d1-b307-badc66ef535f","Type":"ContainerDied","Data":"2e1e20addad052dc46985ce684c9c47dd235bc8385a43aa8e29d8a2e867be70e"} Jan 22 07:25:21 crc kubenswrapper[4933]: I0122 07:25:21.276710 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e1e20addad052dc46985ce684c9c47dd235bc8385a43aa8e29d8a2e867be70e" Jan 22 07:25:21 crc kubenswrapper[4933]: I0122 07:25:21.276727 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-c826-account-create-update-2pjwm" Jan 22 07:25:23 crc kubenswrapper[4933]: I0122 07:25:23.868923 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-api-d6c5fd9f-kftw2"] Jan 22 07:25:23 crc kubenswrapper[4933]: E0122 07:25:23.869747 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04576677-4607-48ab-b786-a763b254ad3f" containerName="mariadb-database-create" Jan 22 07:25:23 crc kubenswrapper[4933]: I0122 07:25:23.869767 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="04576677-4607-48ab-b786-a763b254ad3f" containerName="mariadb-database-create" Jan 22 07:25:23 crc kubenswrapper[4933]: E0122 07:25:23.869789 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8c8cb03-9525-41d1-b307-badc66ef535f" containerName="mariadb-account-create-update" Jan 22 07:25:23 crc kubenswrapper[4933]: I0122 07:25:23.869797 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8c8cb03-9525-41d1-b307-badc66ef535f" containerName="mariadb-account-create-update" Jan 22 07:25:23 crc kubenswrapper[4933]: I0122 07:25:23.870055 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="04576677-4607-48ab-b786-a763b254ad3f" containerName="mariadb-database-create" Jan 22 07:25:23 crc kubenswrapper[4933]: I0122 07:25:23.870120 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8c8cb03-9525-41d1-b307-badc66ef535f" containerName="mariadb-account-create-update" Jan 22 07:25:23 crc kubenswrapper[4933]: I0122 07:25:23.871846 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:23 crc kubenswrapper[4933]: I0122 07:25:23.875915 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-octavia-ovndbs" Jan 22 07:25:23 crc kubenswrapper[4933]: I0122 07:25:23.876306 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-scripts" Jan 22 07:25:23 crc kubenswrapper[4933]: I0122 07:25:23.877971 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-config-data" Jan 22 07:25:23 crc kubenswrapper[4933]: I0122 07:25:23.878171 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-octavia-dockercfg-tvngh" Jan 22 07:25:23 crc kubenswrapper[4933]: I0122 07:25:23.895205 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-d6c5fd9f-kftw2"] Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.053909 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/f10a3846-d1b2-4d43-bd53-b005c791e9c1-config-data-merged\") pod \"octavia-api-d6c5fd9f-kftw2\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.054048 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/f10a3846-d1b2-4d43-bd53-b005c791e9c1-octavia-run\") pod \"octavia-api-d6c5fd9f-kftw2\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.054246 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-combined-ca-bundle\") pod \"octavia-api-d6c5fd9f-kftw2\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.054277 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-ovndb-tls-certs\") pod \"octavia-api-d6c5fd9f-kftw2\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.054341 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-config-data\") pod \"octavia-api-d6c5fd9f-kftw2\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.054404 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-scripts\") pod \"octavia-api-d6c5fd9f-kftw2\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.156570 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/f10a3846-d1b2-4d43-bd53-b005c791e9c1-config-data-merged\") pod \"octavia-api-d6c5fd9f-kftw2\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.156650 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/f10a3846-d1b2-4d43-bd53-b005c791e9c1-octavia-run\") pod \"octavia-api-d6c5fd9f-kftw2\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.156751 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-combined-ca-bundle\") pod \"octavia-api-d6c5fd9f-kftw2\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.156780 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-ovndb-tls-certs\") pod \"octavia-api-d6c5fd9f-kftw2\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.156818 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-config-data\") pod \"octavia-api-d6c5fd9f-kftw2\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.156841 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-scripts\") pod \"octavia-api-d6c5fd9f-kftw2\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.159863 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/f10a3846-d1b2-4d43-bd53-b005c791e9c1-config-data-merged\") pod \"octavia-api-d6c5fd9f-kftw2\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.160180 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/f10a3846-d1b2-4d43-bd53-b005c791e9c1-octavia-run\") pod \"octavia-api-d6c5fd9f-kftw2\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.163528 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-scripts\") pod \"octavia-api-d6c5fd9f-kftw2\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.164373 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-combined-ca-bundle\") pod \"octavia-api-d6c5fd9f-kftw2\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.165138 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-ovndb-tls-certs\") pod \"octavia-api-d6c5fd9f-kftw2\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.178352 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-config-data\") pod \"octavia-api-d6c5fd9f-kftw2\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.202893 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.708951 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-d6c5fd9f-kftw2"] Jan 22 07:25:24 crc kubenswrapper[4933]: I0122 07:25:24.719092 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 07:25:25 crc kubenswrapper[4933]: I0122 07:25:25.347069 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-d6c5fd9f-kftw2" event={"ID":"f10a3846-d1b2-4d43-bd53-b005c791e9c1","Type":"ContainerStarted","Data":"e03f579a788db902bd3d81f05243aded0ba5e9a2068deaf4680701533f820780"} Jan 22 07:25:34 crc kubenswrapper[4933]: I0122 07:25:34.440217 4933 generic.go:334] "Generic (PLEG): container finished" podID="f10a3846-d1b2-4d43-bd53-b005c791e9c1" containerID="a61a7fbe224fd798ac818dde3fcf7034cff453fb9e3830e32647ea5ea480bcbb" exitCode=0 Jan 22 07:25:34 crc kubenswrapper[4933]: I0122 07:25:34.440264 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-d6c5fd9f-kftw2" event={"ID":"f10a3846-d1b2-4d43-bd53-b005c791e9c1","Type":"ContainerDied","Data":"a61a7fbe224fd798ac818dde3fcf7034cff453fb9e3830e32647ea5ea480bcbb"} Jan 22 07:25:35 crc kubenswrapper[4933]: I0122 07:25:35.452409 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-d6c5fd9f-kftw2" event={"ID":"f10a3846-d1b2-4d43-bd53-b005c791e9c1","Type":"ContainerStarted","Data":"5b4cf48304839e1ebf882c53ca8142875f24359ea8380ddefd58cd54995ae44b"} Jan 22 07:25:35 crc kubenswrapper[4933]: I0122 07:25:35.452808 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:35 crc kubenswrapper[4933]: I0122 07:25:35.452829 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:35 crc kubenswrapper[4933]: I0122 07:25:35.452841 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-d6c5fd9f-kftw2" event={"ID":"f10a3846-d1b2-4d43-bd53-b005c791e9c1","Type":"ContainerStarted","Data":"c6725f03766c525d9a1a757c71b3ffcda7688f696f77c8cc794e879f820b7d13"} Jan 22 07:25:35 crc kubenswrapper[4933]: I0122 07:25:35.477567 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-api-d6c5fd9f-kftw2" podStartSLOduration=3.934202173 podStartE2EDuration="12.477541791s" podCreationTimestamp="2026-01-22 07:25:23 +0000 UTC" firstStartedPulling="2026-01-22 07:25:24.718787523 +0000 UTC m=+5972.555912876" lastFinishedPulling="2026-01-22 07:25:33.262127141 +0000 UTC m=+5981.099252494" observedRunningTime="2026-01-22 07:25:35.469218819 +0000 UTC m=+5983.306344182" watchObservedRunningTime="2026-01-22 07:25:35.477541791 +0000 UTC m=+5983.314667144" Jan 22 07:25:40 crc kubenswrapper[4933]: I0122 07:25:40.943388 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:25:40 crc kubenswrapper[4933]: I0122 07:25:40.943665 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.347200 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-qxtvn" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.396325 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.422016 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-ctqtw" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.600639 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-qxtvn-config-rt2s9"] Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.602013 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.610542 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-qxtvn-config-rt2s9"] Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.617281 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.773495 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/f16938a6-627b-46b5-82ce-2ac0f500ec93-var-run-ovn\") pod \"ovn-controller-qxtvn-config-rt2s9\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.773584 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f16938a6-627b-46b5-82ce-2ac0f500ec93-var-run\") pod \"ovn-controller-qxtvn-config-rt2s9\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.773674 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/f16938a6-627b-46b5-82ce-2ac0f500ec93-var-log-ovn\") pod \"ovn-controller-qxtvn-config-rt2s9\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.773702 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhctn\" (UniqueName: \"kubernetes.io/projected/f16938a6-627b-46b5-82ce-2ac0f500ec93-kube-api-access-zhctn\") pod \"ovn-controller-qxtvn-config-rt2s9\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.773730 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f16938a6-627b-46b5-82ce-2ac0f500ec93-scripts\") pod \"ovn-controller-qxtvn-config-rt2s9\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.773765 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/f16938a6-627b-46b5-82ce-2ac0f500ec93-additional-scripts\") pod \"ovn-controller-qxtvn-config-rt2s9\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.875868 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/f16938a6-627b-46b5-82ce-2ac0f500ec93-var-run-ovn\") pod \"ovn-controller-qxtvn-config-rt2s9\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.876001 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f16938a6-627b-46b5-82ce-2ac0f500ec93-var-run\") pod \"ovn-controller-qxtvn-config-rt2s9\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.876118 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/f16938a6-627b-46b5-82ce-2ac0f500ec93-var-log-ovn\") pod \"ovn-controller-qxtvn-config-rt2s9\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.876166 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhctn\" (UniqueName: \"kubernetes.io/projected/f16938a6-627b-46b5-82ce-2ac0f500ec93-kube-api-access-zhctn\") pod \"ovn-controller-qxtvn-config-rt2s9\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.876206 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f16938a6-627b-46b5-82ce-2ac0f500ec93-scripts\") pod \"ovn-controller-qxtvn-config-rt2s9\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.876262 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/f16938a6-627b-46b5-82ce-2ac0f500ec93-additional-scripts\") pod \"ovn-controller-qxtvn-config-rt2s9\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.876320 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/f16938a6-627b-46b5-82ce-2ac0f500ec93-var-run-ovn\") pod \"ovn-controller-qxtvn-config-rt2s9\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.876340 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f16938a6-627b-46b5-82ce-2ac0f500ec93-var-run\") pod \"ovn-controller-qxtvn-config-rt2s9\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.876407 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/f16938a6-627b-46b5-82ce-2ac0f500ec93-var-log-ovn\") pod \"ovn-controller-qxtvn-config-rt2s9\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.877125 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/f16938a6-627b-46b5-82ce-2ac0f500ec93-additional-scripts\") pod \"ovn-controller-qxtvn-config-rt2s9\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.878160 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f16938a6-627b-46b5-82ce-2ac0f500ec93-scripts\") pod \"ovn-controller-qxtvn-config-rt2s9\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:43 crc kubenswrapper[4933]: I0122 07:25:43.902680 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhctn\" (UniqueName: \"kubernetes.io/projected/f16938a6-627b-46b5-82ce-2ac0f500ec93-kube-api-access-zhctn\") pod \"ovn-controller-qxtvn-config-rt2s9\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:44 crc kubenswrapper[4933]: I0122 07:25:44.017941 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:44 crc kubenswrapper[4933]: I0122 07:25:44.140887 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:44 crc kubenswrapper[4933]: I0122 07:25:44.326361 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:25:44 crc kubenswrapper[4933]: I0122 07:25:44.536695 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-qxtvn-config-rt2s9"] Jan 22 07:25:44 crc kubenswrapper[4933]: I0122 07:25:44.576529 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-qxtvn-config-rt2s9" event={"ID":"f16938a6-627b-46b5-82ce-2ac0f500ec93","Type":"ContainerStarted","Data":"d13be18bba1cf7f627e16d2a78d8d1b444c3928972dc1a24616a680cf6aa9bf3"} Jan 22 07:25:45 crc kubenswrapper[4933]: I0122 07:25:45.586777 4933 generic.go:334] "Generic (PLEG): container finished" podID="f16938a6-627b-46b5-82ce-2ac0f500ec93" containerID="f9c77645df4c0d171284c077c8d127a11e7c9a134e382b9c2a54b517e00e863b" exitCode=0 Jan 22 07:25:45 crc kubenswrapper[4933]: I0122 07:25:45.588023 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-qxtvn-config-rt2s9" event={"ID":"f16938a6-627b-46b5-82ce-2ac0f500ec93","Type":"ContainerDied","Data":"f9c77645df4c0d171284c077c8d127a11e7c9a134e382b9c2a54b517e00e863b"} Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.020975 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.144900 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/f16938a6-627b-46b5-82ce-2ac0f500ec93-var-log-ovn\") pod \"f16938a6-627b-46b5-82ce-2ac0f500ec93\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.144971 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zhctn\" (UniqueName: \"kubernetes.io/projected/f16938a6-627b-46b5-82ce-2ac0f500ec93-kube-api-access-zhctn\") pod \"f16938a6-627b-46b5-82ce-2ac0f500ec93\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.145132 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f16938a6-627b-46b5-82ce-2ac0f500ec93-var-run\") pod \"f16938a6-627b-46b5-82ce-2ac0f500ec93\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.145147 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f16938a6-627b-46b5-82ce-2ac0f500ec93-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "f16938a6-627b-46b5-82ce-2ac0f500ec93" (UID: "f16938a6-627b-46b5-82ce-2ac0f500ec93"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.145228 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/f16938a6-627b-46b5-82ce-2ac0f500ec93-additional-scripts\") pod \"f16938a6-627b-46b5-82ce-2ac0f500ec93\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.145274 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f16938a6-627b-46b5-82ce-2ac0f500ec93-var-run" (OuterVolumeSpecName: "var-run") pod "f16938a6-627b-46b5-82ce-2ac0f500ec93" (UID: "f16938a6-627b-46b5-82ce-2ac0f500ec93"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.145302 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/f16938a6-627b-46b5-82ce-2ac0f500ec93-var-run-ovn\") pod \"f16938a6-627b-46b5-82ce-2ac0f500ec93\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.145335 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f16938a6-627b-46b5-82ce-2ac0f500ec93-scripts\") pod \"f16938a6-627b-46b5-82ce-2ac0f500ec93\" (UID: \"f16938a6-627b-46b5-82ce-2ac0f500ec93\") " Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.145381 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f16938a6-627b-46b5-82ce-2ac0f500ec93-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "f16938a6-627b-46b5-82ce-2ac0f500ec93" (UID: "f16938a6-627b-46b5-82ce-2ac0f500ec93"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.146295 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f16938a6-627b-46b5-82ce-2ac0f500ec93-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "f16938a6-627b-46b5-82ce-2ac0f500ec93" (UID: "f16938a6-627b-46b5-82ce-2ac0f500ec93"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.146469 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f16938a6-627b-46b5-82ce-2ac0f500ec93-scripts" (OuterVolumeSpecName: "scripts") pod "f16938a6-627b-46b5-82ce-2ac0f500ec93" (UID: "f16938a6-627b-46b5-82ce-2ac0f500ec93"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.147000 4933 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/f16938a6-627b-46b5-82ce-2ac0f500ec93-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.147021 4933 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/f16938a6-627b-46b5-82ce-2ac0f500ec93-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.147030 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f16938a6-627b-46b5-82ce-2ac0f500ec93-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.147040 4933 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/f16938a6-627b-46b5-82ce-2ac0f500ec93-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.147051 4933 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f16938a6-627b-46b5-82ce-2ac0f500ec93-var-run\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.163962 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f16938a6-627b-46b5-82ce-2ac0f500ec93-kube-api-access-zhctn" (OuterVolumeSpecName: "kube-api-access-zhctn") pod "f16938a6-627b-46b5-82ce-2ac0f500ec93" (UID: "f16938a6-627b-46b5-82ce-2ac0f500ec93"). InnerVolumeSpecName "kube-api-access-zhctn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.250277 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zhctn\" (UniqueName: \"kubernetes.io/projected/f16938a6-627b-46b5-82ce-2ac0f500ec93-kube-api-access-zhctn\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.605172 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-qxtvn-config-rt2s9" event={"ID":"f16938a6-627b-46b5-82ce-2ac0f500ec93","Type":"ContainerDied","Data":"d13be18bba1cf7f627e16d2a78d8d1b444c3928972dc1a24616a680cf6aa9bf3"} Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.605518 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d13be18bba1cf7f627e16d2a78d8d1b444c3928972dc1a24616a680cf6aa9bf3" Jan 22 07:25:47 crc kubenswrapper[4933]: I0122 07:25:47.605392 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-qxtvn-config-rt2s9" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.127273 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-qxtvn-config-rt2s9"] Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.140960 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-qxtvn-config-rt2s9"] Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.233507 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-qxtvn-config-27chd"] Jan 22 07:25:48 crc kubenswrapper[4933]: E0122 07:25:48.233927 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f16938a6-627b-46b5-82ce-2ac0f500ec93" containerName="ovn-config" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.233948 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f16938a6-627b-46b5-82ce-2ac0f500ec93" containerName="ovn-config" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.234180 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f16938a6-627b-46b5-82ce-2ac0f500ec93" containerName="ovn-config" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.234961 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.237758 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.246803 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-qxtvn-config-27chd"] Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.372928 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-var-run\") pod \"ovn-controller-qxtvn-config-27chd\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.373046 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-var-log-ovn\") pod \"ovn-controller-qxtvn-config-27chd\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.373107 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-var-run-ovn\") pod \"ovn-controller-qxtvn-config-27chd\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.373173 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-scripts\") pod \"ovn-controller-qxtvn-config-27chd\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.373197 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-additional-scripts\") pod \"ovn-controller-qxtvn-config-27chd\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.373232 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9wqm\" (UniqueName: \"kubernetes.io/projected/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-kube-api-access-b9wqm\") pod \"ovn-controller-qxtvn-config-27chd\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.474397 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-scripts\") pod \"ovn-controller-qxtvn-config-27chd\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.474448 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-additional-scripts\") pod \"ovn-controller-qxtvn-config-27chd\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.474492 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9wqm\" (UniqueName: \"kubernetes.io/projected/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-kube-api-access-b9wqm\") pod \"ovn-controller-qxtvn-config-27chd\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.474564 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-var-run\") pod \"ovn-controller-qxtvn-config-27chd\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.474640 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-var-log-ovn\") pod \"ovn-controller-qxtvn-config-27chd\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.474672 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-var-run-ovn\") pod \"ovn-controller-qxtvn-config-27chd\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.474956 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-var-run-ovn\") pod \"ovn-controller-qxtvn-config-27chd\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.475024 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-var-log-ovn\") pod \"ovn-controller-qxtvn-config-27chd\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.475030 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-var-run\") pod \"ovn-controller-qxtvn-config-27chd\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.476118 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-additional-scripts\") pod \"ovn-controller-qxtvn-config-27chd\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.477981 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-scripts\") pod \"ovn-controller-qxtvn-config-27chd\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.491865 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9wqm\" (UniqueName: \"kubernetes.io/projected/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-kube-api-access-b9wqm\") pod \"ovn-controller-qxtvn-config-27chd\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.511507 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f16938a6-627b-46b5-82ce-2ac0f500ec93" path="/var/lib/kubelet/pods/f16938a6-627b-46b5-82ce-2ac0f500ec93/volumes" Jan 22 07:25:48 crc kubenswrapper[4933]: I0122 07:25:48.553535 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:49 crc kubenswrapper[4933]: I0122 07:25:49.103188 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-qxtvn-config-27chd"] Jan 22 07:25:49 crc kubenswrapper[4933]: I0122 07:25:49.626358 4933 generic.go:334] "Generic (PLEG): container finished" podID="9a6d18c6-e74e-43f2-9159-f4c6cee92ff1" containerID="d3317c261af8a1169715f5844843b1cbb6549d86a511a7e28aab6eda8c5adbfb" exitCode=0 Jan 22 07:25:49 crc kubenswrapper[4933]: I0122 07:25:49.626414 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-qxtvn-config-27chd" event={"ID":"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1","Type":"ContainerDied","Data":"d3317c261af8a1169715f5844843b1cbb6549d86a511a7e28aab6eda8c5adbfb"} Jan 22 07:25:49 crc kubenswrapper[4933]: I0122 07:25:49.626686 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-qxtvn-config-27chd" event={"ID":"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1","Type":"ContainerStarted","Data":"364fca55b76f0478b55a552f8f50f169d76d6eadd4e98fe67bc56ac2725273b4"} Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.006125 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.126024 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-scripts\") pod \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.126504 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-var-run\") pod \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.126576 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-var-run-ovn\") pod \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.126635 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-var-run" (OuterVolumeSpecName: "var-run") pod "9a6d18c6-e74e-43f2-9159-f4c6cee92ff1" (UID: "9a6d18c6-e74e-43f2-9159-f4c6cee92ff1"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.126658 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-additional-scripts\") pod \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.126694 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "9a6d18c6-e74e-43f2-9159-f4c6cee92ff1" (UID: "9a6d18c6-e74e-43f2-9159-f4c6cee92ff1"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.126741 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b9wqm\" (UniqueName: \"kubernetes.io/projected/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-kube-api-access-b9wqm\") pod \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.126780 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-var-log-ovn\") pod \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\" (UID: \"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1\") " Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.126963 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "9a6d18c6-e74e-43f2-9159-f4c6cee92ff1" (UID: "9a6d18c6-e74e-43f2-9159-f4c6cee92ff1"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.127155 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "9a6d18c6-e74e-43f2-9159-f4c6cee92ff1" (UID: "9a6d18c6-e74e-43f2-9159-f4c6cee92ff1"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.127443 4933 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.127460 4933 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.127469 4933 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-var-run\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.127479 4933 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.127616 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-scripts" (OuterVolumeSpecName: "scripts") pod "9a6d18c6-e74e-43f2-9159-f4c6cee92ff1" (UID: "9a6d18c6-e74e-43f2-9159-f4c6cee92ff1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.132183 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-kube-api-access-b9wqm" (OuterVolumeSpecName: "kube-api-access-b9wqm") pod "9a6d18c6-e74e-43f2-9159-f4c6cee92ff1" (UID: "9a6d18c6-e74e-43f2-9159-f4c6cee92ff1"). InnerVolumeSpecName "kube-api-access-b9wqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.229726 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b9wqm\" (UniqueName: \"kubernetes.io/projected/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-kube-api-access-b9wqm\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.229774 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.642796 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-qxtvn-config-27chd" event={"ID":"9a6d18c6-e74e-43f2-9159-f4c6cee92ff1","Type":"ContainerDied","Data":"364fca55b76f0478b55a552f8f50f169d76d6eadd4e98fe67bc56ac2725273b4"} Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.642834 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="364fca55b76f0478b55a552f8f50f169d76d6eadd4e98fe67bc56ac2725273b4" Jan 22 07:25:51 crc kubenswrapper[4933]: I0122 07:25:51.642833 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-qxtvn-config-27chd" Jan 22 07:25:52 crc kubenswrapper[4933]: I0122 07:25:52.095230 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-qxtvn-config-27chd"] Jan 22 07:25:52 crc kubenswrapper[4933]: I0122 07:25:52.103780 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-qxtvn-config-27chd"] Jan 22 07:25:52 crc kubenswrapper[4933]: I0122 07:25:52.500738 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a6d18c6-e74e-43f2-9159-f4c6cee92ff1" path="/var/lib/kubelet/pods/9a6d18c6-e74e-43f2-9159-f4c6cee92ff1/volumes" Jan 22 07:25:56 crc kubenswrapper[4933]: I0122 07:25:56.737099 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-rsyslog-bpbrn"] Jan 22 07:25:56 crc kubenswrapper[4933]: E0122 07:25:56.740878 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a6d18c6-e74e-43f2-9159-f4c6cee92ff1" containerName="ovn-config" Jan 22 07:25:56 crc kubenswrapper[4933]: I0122 07:25:56.740912 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a6d18c6-e74e-43f2-9159-f4c6cee92ff1" containerName="ovn-config" Jan 22 07:25:56 crc kubenswrapper[4933]: I0122 07:25:56.741138 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a6d18c6-e74e-43f2-9159-f4c6cee92ff1" containerName="ovn-config" Jan 22 07:25:56 crc kubenswrapper[4933]: I0122 07:25:56.742292 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-bpbrn" Jan 22 07:25:56 crc kubenswrapper[4933]: I0122 07:25:56.745363 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"octavia-hmport-map" Jan 22 07:25:56 crc kubenswrapper[4933]: I0122 07:25:56.745591 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-config-data" Jan 22 07:25:56 crc kubenswrapper[4933]: I0122 07:25:56.745759 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-scripts" Jan 22 07:25:56 crc kubenswrapper[4933]: I0122 07:25:56.749808 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-bpbrn"] Jan 22 07:25:56 crc kubenswrapper[4933]: I0122 07:25:56.933521 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/73086f28-1dbc-4a12-afa9-8440c178dae1-config-data-merged\") pod \"octavia-rsyslog-bpbrn\" (UID: \"73086f28-1dbc-4a12-afa9-8440c178dae1\") " pod="openstack/octavia-rsyslog-bpbrn" Jan 22 07:25:56 crc kubenswrapper[4933]: I0122 07:25:56.933638 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73086f28-1dbc-4a12-afa9-8440c178dae1-scripts\") pod \"octavia-rsyslog-bpbrn\" (UID: \"73086f28-1dbc-4a12-afa9-8440c178dae1\") " pod="openstack/octavia-rsyslog-bpbrn" Jan 22 07:25:56 crc kubenswrapper[4933]: I0122 07:25:56.933685 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/73086f28-1dbc-4a12-afa9-8440c178dae1-hm-ports\") pod \"octavia-rsyslog-bpbrn\" (UID: \"73086f28-1dbc-4a12-afa9-8440c178dae1\") " pod="openstack/octavia-rsyslog-bpbrn" Jan 22 07:25:56 crc kubenswrapper[4933]: I0122 07:25:56.933741 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73086f28-1dbc-4a12-afa9-8440c178dae1-config-data\") pod \"octavia-rsyslog-bpbrn\" (UID: \"73086f28-1dbc-4a12-afa9-8440c178dae1\") " pod="openstack/octavia-rsyslog-bpbrn" Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.035994 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/73086f28-1dbc-4a12-afa9-8440c178dae1-config-data-merged\") pod \"octavia-rsyslog-bpbrn\" (UID: \"73086f28-1dbc-4a12-afa9-8440c178dae1\") " pod="openstack/octavia-rsyslog-bpbrn" Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.036057 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73086f28-1dbc-4a12-afa9-8440c178dae1-scripts\") pod \"octavia-rsyslog-bpbrn\" (UID: \"73086f28-1dbc-4a12-afa9-8440c178dae1\") " pod="openstack/octavia-rsyslog-bpbrn" Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.036113 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/73086f28-1dbc-4a12-afa9-8440c178dae1-hm-ports\") pod \"octavia-rsyslog-bpbrn\" (UID: \"73086f28-1dbc-4a12-afa9-8440c178dae1\") " pod="openstack/octavia-rsyslog-bpbrn" Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.036167 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73086f28-1dbc-4a12-afa9-8440c178dae1-config-data\") pod \"octavia-rsyslog-bpbrn\" (UID: \"73086f28-1dbc-4a12-afa9-8440c178dae1\") " pod="openstack/octavia-rsyslog-bpbrn" Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.037031 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/73086f28-1dbc-4a12-afa9-8440c178dae1-config-data-merged\") pod \"octavia-rsyslog-bpbrn\" (UID: \"73086f28-1dbc-4a12-afa9-8440c178dae1\") " pod="openstack/octavia-rsyslog-bpbrn" Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.037538 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/73086f28-1dbc-4a12-afa9-8440c178dae1-hm-ports\") pod \"octavia-rsyslog-bpbrn\" (UID: \"73086f28-1dbc-4a12-afa9-8440c178dae1\") " pod="openstack/octavia-rsyslog-bpbrn" Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.042602 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73086f28-1dbc-4a12-afa9-8440c178dae1-scripts\") pod \"octavia-rsyslog-bpbrn\" (UID: \"73086f28-1dbc-4a12-afa9-8440c178dae1\") " pod="openstack/octavia-rsyslog-bpbrn" Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.045971 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73086f28-1dbc-4a12-afa9-8440c178dae1-config-data\") pod \"octavia-rsyslog-bpbrn\" (UID: \"73086f28-1dbc-4a12-afa9-8440c178dae1\") " pod="openstack/octavia-rsyslog-bpbrn" Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.066806 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-bpbrn" Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.286459 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-image-upload-7b97d6bc64-js7m6"] Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.288696 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-7b97d6bc64-js7m6" Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.291374 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-config-data" Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.298623 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-7b97d6bc64-js7m6"] Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.442860 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9147baef-7574-4104-83be-e54825e1a277-httpd-config\") pod \"octavia-image-upload-7b97d6bc64-js7m6\" (UID: \"9147baef-7574-4104-83be-e54825e1a277\") " pod="openstack/octavia-image-upload-7b97d6bc64-js7m6" Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.443179 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/9147baef-7574-4104-83be-e54825e1a277-amphora-image\") pod \"octavia-image-upload-7b97d6bc64-js7m6\" (UID: \"9147baef-7574-4104-83be-e54825e1a277\") " pod="openstack/octavia-image-upload-7b97d6bc64-js7m6" Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.544573 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9147baef-7574-4104-83be-e54825e1a277-httpd-config\") pod \"octavia-image-upload-7b97d6bc64-js7m6\" (UID: \"9147baef-7574-4104-83be-e54825e1a277\") " pod="openstack/octavia-image-upload-7b97d6bc64-js7m6" Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.544631 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/9147baef-7574-4104-83be-e54825e1a277-amphora-image\") pod \"octavia-image-upload-7b97d6bc64-js7m6\" (UID: \"9147baef-7574-4104-83be-e54825e1a277\") " pod="openstack/octavia-image-upload-7b97d6bc64-js7m6" Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.545170 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/9147baef-7574-4104-83be-e54825e1a277-amphora-image\") pod \"octavia-image-upload-7b97d6bc64-js7m6\" (UID: \"9147baef-7574-4104-83be-e54825e1a277\") " pod="openstack/octavia-image-upload-7b97d6bc64-js7m6" Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.550256 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9147baef-7574-4104-83be-e54825e1a277-httpd-config\") pod \"octavia-image-upload-7b97d6bc64-js7m6\" (UID: \"9147baef-7574-4104-83be-e54825e1a277\") " pod="openstack/octavia-image-upload-7b97d6bc64-js7m6" Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.603307 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-bpbrn"] Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.611256 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-7b97d6bc64-js7m6" Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.722203 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-bpbrn" event={"ID":"73086f28-1dbc-4a12-afa9-8440c178dae1","Type":"ContainerStarted","Data":"5b46dd6fb998aa9235e7ccb2086a4542f0e111f78cc32e798039ffc43250789e"} Jan 22 07:25:57 crc kubenswrapper[4933]: I0122 07:25:57.812470 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-bpbrn"] Jan 22 07:25:58 crc kubenswrapper[4933]: I0122 07:25:58.100385 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-7b97d6bc64-js7m6"] Jan 22 07:25:58 crc kubenswrapper[4933]: W0122 07:25:58.103433 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9147baef_7574_4104_83be_e54825e1a277.slice/crio-f9d2720a51b2c0f4ad7badb55d9d02248de785fb12ced532266b1132d4c3ccce WatchSource:0}: Error finding container f9d2720a51b2c0f4ad7badb55d9d02248de785fb12ced532266b1132d4c3ccce: Status 404 returned error can't find the container with id f9d2720a51b2c0f4ad7badb55d9d02248de785fb12ced532266b1132d4c3ccce Jan 22 07:25:58 crc kubenswrapper[4933]: I0122 07:25:58.744300 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-7b97d6bc64-js7m6" event={"ID":"9147baef-7574-4104-83be-e54825e1a277","Type":"ContainerStarted","Data":"f9d2720a51b2c0f4ad7badb55d9d02248de785fb12ced532266b1132d4c3ccce"} Jan 22 07:25:58 crc kubenswrapper[4933]: I0122 07:25:58.773650 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-sync-kws6f"] Jan 22 07:25:58 crc kubenswrapper[4933]: I0122 07:25:58.776524 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-kws6f" Jan 22 07:25:58 crc kubenswrapper[4933]: I0122 07:25:58.778597 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-scripts" Jan 22 07:25:58 crc kubenswrapper[4933]: I0122 07:25:58.790802 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-kws6f"] Jan 22 07:25:58 crc kubenswrapper[4933]: I0122 07:25:58.875667 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a01cbd8-9d77-42b9-9848-bc2329258052-scripts\") pod \"octavia-db-sync-kws6f\" (UID: \"6a01cbd8-9d77-42b9-9848-bc2329258052\") " pod="openstack/octavia-db-sync-kws6f" Jan 22 07:25:58 crc kubenswrapper[4933]: I0122 07:25:58.875743 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a01cbd8-9d77-42b9-9848-bc2329258052-config-data\") pod \"octavia-db-sync-kws6f\" (UID: \"6a01cbd8-9d77-42b9-9848-bc2329258052\") " pod="openstack/octavia-db-sync-kws6f" Jan 22 07:25:58 crc kubenswrapper[4933]: I0122 07:25:58.876151 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a01cbd8-9d77-42b9-9848-bc2329258052-combined-ca-bundle\") pod \"octavia-db-sync-kws6f\" (UID: \"6a01cbd8-9d77-42b9-9848-bc2329258052\") " pod="openstack/octavia-db-sync-kws6f" Jan 22 07:25:58 crc kubenswrapper[4933]: I0122 07:25:58.876197 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/6a01cbd8-9d77-42b9-9848-bc2329258052-config-data-merged\") pod \"octavia-db-sync-kws6f\" (UID: \"6a01cbd8-9d77-42b9-9848-bc2329258052\") " pod="openstack/octavia-db-sync-kws6f" Jan 22 07:25:58 crc kubenswrapper[4933]: I0122 07:25:58.977658 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a01cbd8-9d77-42b9-9848-bc2329258052-combined-ca-bundle\") pod \"octavia-db-sync-kws6f\" (UID: \"6a01cbd8-9d77-42b9-9848-bc2329258052\") " pod="openstack/octavia-db-sync-kws6f" Jan 22 07:25:58 crc kubenswrapper[4933]: I0122 07:25:58.978013 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/6a01cbd8-9d77-42b9-9848-bc2329258052-config-data-merged\") pod \"octavia-db-sync-kws6f\" (UID: \"6a01cbd8-9d77-42b9-9848-bc2329258052\") " pod="openstack/octavia-db-sync-kws6f" Jan 22 07:25:58 crc kubenswrapper[4933]: I0122 07:25:58.978181 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a01cbd8-9d77-42b9-9848-bc2329258052-scripts\") pod \"octavia-db-sync-kws6f\" (UID: \"6a01cbd8-9d77-42b9-9848-bc2329258052\") " pod="openstack/octavia-db-sync-kws6f" Jan 22 07:25:58 crc kubenswrapper[4933]: I0122 07:25:58.978239 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a01cbd8-9d77-42b9-9848-bc2329258052-config-data\") pod \"octavia-db-sync-kws6f\" (UID: \"6a01cbd8-9d77-42b9-9848-bc2329258052\") " pod="openstack/octavia-db-sync-kws6f" Jan 22 07:25:58 crc kubenswrapper[4933]: I0122 07:25:58.981784 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/6a01cbd8-9d77-42b9-9848-bc2329258052-config-data-merged\") pod \"octavia-db-sync-kws6f\" (UID: \"6a01cbd8-9d77-42b9-9848-bc2329258052\") " pod="openstack/octavia-db-sync-kws6f" Jan 22 07:25:58 crc kubenswrapper[4933]: I0122 07:25:58.984625 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a01cbd8-9d77-42b9-9848-bc2329258052-scripts\") pod \"octavia-db-sync-kws6f\" (UID: \"6a01cbd8-9d77-42b9-9848-bc2329258052\") " pod="openstack/octavia-db-sync-kws6f" Jan 22 07:25:58 crc kubenswrapper[4933]: I0122 07:25:58.984644 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a01cbd8-9d77-42b9-9848-bc2329258052-config-data\") pod \"octavia-db-sync-kws6f\" (UID: \"6a01cbd8-9d77-42b9-9848-bc2329258052\") " pod="openstack/octavia-db-sync-kws6f" Jan 22 07:25:58 crc kubenswrapper[4933]: I0122 07:25:58.985648 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a01cbd8-9d77-42b9-9848-bc2329258052-combined-ca-bundle\") pod \"octavia-db-sync-kws6f\" (UID: \"6a01cbd8-9d77-42b9-9848-bc2329258052\") " pod="openstack/octavia-db-sync-kws6f" Jan 22 07:25:59 crc kubenswrapper[4933]: I0122 07:25:59.112011 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-kws6f" Jan 22 07:25:59 crc kubenswrapper[4933]: I0122 07:25:59.610902 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-kws6f"] Jan 22 07:25:59 crc kubenswrapper[4933]: W0122 07:25:59.713757 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6a01cbd8_9d77_42b9_9848_bc2329258052.slice/crio-8ce16a716e81b3aa65339811f07e454ac0758cc6b49c4a1b4581cbbe41148361 WatchSource:0}: Error finding container 8ce16a716e81b3aa65339811f07e454ac0758cc6b49c4a1b4581cbbe41148361: Status 404 returned error can't find the container with id 8ce16a716e81b3aa65339811f07e454ac0758cc6b49c4a1b4581cbbe41148361 Jan 22 07:25:59 crc kubenswrapper[4933]: I0122 07:25:59.756791 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-bpbrn" event={"ID":"73086f28-1dbc-4a12-afa9-8440c178dae1","Type":"ContainerStarted","Data":"b304ca2c86baaad535e899bf8bf8498aee118b73bc4a2cf875005a607ea43499"} Jan 22 07:25:59 crc kubenswrapper[4933]: I0122 07:25:59.761734 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-kws6f" event={"ID":"6a01cbd8-9d77-42b9-9848-bc2329258052","Type":"ContainerStarted","Data":"8ce16a716e81b3aa65339811f07e454ac0758cc6b49c4a1b4581cbbe41148361"} Jan 22 07:26:00 crc kubenswrapper[4933]: I0122 07:26:00.776753 4933 generic.go:334] "Generic (PLEG): container finished" podID="6a01cbd8-9d77-42b9-9848-bc2329258052" containerID="65ae41d1f038e952a5889a5077ee28582fa20712ea60f085952cdfebc0d4ae9d" exitCode=0 Jan 22 07:26:00 crc kubenswrapper[4933]: I0122 07:26:00.776931 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-kws6f" event={"ID":"6a01cbd8-9d77-42b9-9848-bc2329258052","Type":"ContainerDied","Data":"65ae41d1f038e952a5889a5077ee28582fa20712ea60f085952cdfebc0d4ae9d"} Jan 22 07:26:01 crc kubenswrapper[4933]: I0122 07:26:01.787652 4933 generic.go:334] "Generic (PLEG): container finished" podID="73086f28-1dbc-4a12-afa9-8440c178dae1" containerID="b304ca2c86baaad535e899bf8bf8498aee118b73bc4a2cf875005a607ea43499" exitCode=0 Jan 22 07:26:01 crc kubenswrapper[4933]: I0122 07:26:01.787845 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-bpbrn" event={"ID":"73086f28-1dbc-4a12-afa9-8440c178dae1","Type":"ContainerDied","Data":"b304ca2c86baaad535e899bf8bf8498aee118b73bc4a2cf875005a607ea43499"} Jan 22 07:26:01 crc kubenswrapper[4933]: I0122 07:26:01.791060 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-kws6f" event={"ID":"6a01cbd8-9d77-42b9-9848-bc2329258052","Type":"ContainerStarted","Data":"ec4363925d8bf174e46cffd00cb8c7eec6934d637ce74c1bb8c9bbd7da023f30"} Jan 22 07:26:01 crc kubenswrapper[4933]: I0122 07:26:01.830531 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-db-sync-kws6f" podStartSLOduration=3.830512906 podStartE2EDuration="3.830512906s" podCreationTimestamp="2026-01-22 07:25:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:26:01.823474265 +0000 UTC m=+6009.660599638" watchObservedRunningTime="2026-01-22 07:26:01.830512906 +0000 UTC m=+6009.667638259" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.051765 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-healthmanager-sd9kx"] Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.055856 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.057983 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-config-data" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.058802 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-scripts" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.060122 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-certs-secret" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.062794 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-sd9kx"] Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.245419 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/12794d9c-3ddc-4003-a7fa-d8aad66d74ac-config-data-merged\") pod \"octavia-healthmanager-sd9kx\" (UID: \"12794d9c-3ddc-4003-a7fa-d8aad66d74ac\") " pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.245754 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12794d9c-3ddc-4003-a7fa-d8aad66d74ac-scripts\") pod \"octavia-healthmanager-sd9kx\" (UID: \"12794d9c-3ddc-4003-a7fa-d8aad66d74ac\") " pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.245888 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/12794d9c-3ddc-4003-a7fa-d8aad66d74ac-hm-ports\") pod \"octavia-healthmanager-sd9kx\" (UID: \"12794d9c-3ddc-4003-a7fa-d8aad66d74ac\") " pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.246126 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12794d9c-3ddc-4003-a7fa-d8aad66d74ac-combined-ca-bundle\") pod \"octavia-healthmanager-sd9kx\" (UID: \"12794d9c-3ddc-4003-a7fa-d8aad66d74ac\") " pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.246216 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12794d9c-3ddc-4003-a7fa-d8aad66d74ac-config-data\") pod \"octavia-healthmanager-sd9kx\" (UID: \"12794d9c-3ddc-4003-a7fa-d8aad66d74ac\") " pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.246396 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/12794d9c-3ddc-4003-a7fa-d8aad66d74ac-amphora-certs\") pod \"octavia-healthmanager-sd9kx\" (UID: \"12794d9c-3ddc-4003-a7fa-d8aad66d74ac\") " pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.349156 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12794d9c-3ddc-4003-a7fa-d8aad66d74ac-combined-ca-bundle\") pod \"octavia-healthmanager-sd9kx\" (UID: \"12794d9c-3ddc-4003-a7fa-d8aad66d74ac\") " pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.349572 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12794d9c-3ddc-4003-a7fa-d8aad66d74ac-config-data\") pod \"octavia-healthmanager-sd9kx\" (UID: \"12794d9c-3ddc-4003-a7fa-d8aad66d74ac\") " pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.349655 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/12794d9c-3ddc-4003-a7fa-d8aad66d74ac-amphora-certs\") pod \"octavia-healthmanager-sd9kx\" (UID: \"12794d9c-3ddc-4003-a7fa-d8aad66d74ac\") " pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.349735 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/12794d9c-3ddc-4003-a7fa-d8aad66d74ac-config-data-merged\") pod \"octavia-healthmanager-sd9kx\" (UID: \"12794d9c-3ddc-4003-a7fa-d8aad66d74ac\") " pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.349826 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12794d9c-3ddc-4003-a7fa-d8aad66d74ac-scripts\") pod \"octavia-healthmanager-sd9kx\" (UID: \"12794d9c-3ddc-4003-a7fa-d8aad66d74ac\") " pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.349849 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/12794d9c-3ddc-4003-a7fa-d8aad66d74ac-hm-ports\") pod \"octavia-healthmanager-sd9kx\" (UID: \"12794d9c-3ddc-4003-a7fa-d8aad66d74ac\") " pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.350530 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/12794d9c-3ddc-4003-a7fa-d8aad66d74ac-config-data-merged\") pod \"octavia-healthmanager-sd9kx\" (UID: \"12794d9c-3ddc-4003-a7fa-d8aad66d74ac\") " pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.350825 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/12794d9c-3ddc-4003-a7fa-d8aad66d74ac-hm-ports\") pod \"octavia-healthmanager-sd9kx\" (UID: \"12794d9c-3ddc-4003-a7fa-d8aad66d74ac\") " pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.355490 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12794d9c-3ddc-4003-a7fa-d8aad66d74ac-combined-ca-bundle\") pod \"octavia-healthmanager-sd9kx\" (UID: \"12794d9c-3ddc-4003-a7fa-d8aad66d74ac\") " pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.356053 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12794d9c-3ddc-4003-a7fa-d8aad66d74ac-config-data\") pod \"octavia-healthmanager-sd9kx\" (UID: \"12794d9c-3ddc-4003-a7fa-d8aad66d74ac\") " pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.357843 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12794d9c-3ddc-4003-a7fa-d8aad66d74ac-scripts\") pod \"octavia-healthmanager-sd9kx\" (UID: \"12794d9c-3ddc-4003-a7fa-d8aad66d74ac\") " pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.358345 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/12794d9c-3ddc-4003-a7fa-d8aad66d74ac-amphora-certs\") pod \"octavia-healthmanager-sd9kx\" (UID: \"12794d9c-3ddc-4003-a7fa-d8aad66d74ac\") " pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.382936 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:02 crc kubenswrapper[4933]: I0122 07:26:02.933310 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-sd9kx"] Jan 22 07:26:03 crc kubenswrapper[4933]: W0122 07:26:03.216907 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12794d9c_3ddc_4003_a7fa_d8aad66d74ac.slice/crio-34e488eadd4986971d72f15fe16b0441a1e9291bbbd4a4204fc16185efe79867 WatchSource:0}: Error finding container 34e488eadd4986971d72f15fe16b0441a1e9291bbbd4a4204fc16185efe79867: Status 404 returned error can't find the container with id 34e488eadd4986971d72f15fe16b0441a1e9291bbbd4a4204fc16185efe79867 Jan 22 07:26:03 crc kubenswrapper[4933]: I0122 07:26:03.813583 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-bpbrn" event={"ID":"73086f28-1dbc-4a12-afa9-8440c178dae1","Type":"ContainerStarted","Data":"50b9d734d1ef746f3960380f2a5213557738182257e0d58370ed9679387215e7"} Jan 22 07:26:03 crc kubenswrapper[4933]: I0122 07:26:03.814379 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-rsyslog-bpbrn" Jan 22 07:26:03 crc kubenswrapper[4933]: I0122 07:26:03.821792 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-sd9kx" event={"ID":"12794d9c-3ddc-4003-a7fa-d8aad66d74ac","Type":"ContainerStarted","Data":"97dbe6cf6c3f219d38823483463d52a142265f48fc5f605322f06dc03b161416"} Jan 22 07:26:03 crc kubenswrapper[4933]: I0122 07:26:03.821840 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-sd9kx" event={"ID":"12794d9c-3ddc-4003-a7fa-d8aad66d74ac","Type":"ContainerStarted","Data":"34e488eadd4986971d72f15fe16b0441a1e9291bbbd4a4204fc16185efe79867"} Jan 22 07:26:03 crc kubenswrapper[4933]: I0122 07:26:03.834237 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-rsyslog-bpbrn" podStartSLOduration=2.170506208 podStartE2EDuration="7.834216296s" podCreationTimestamp="2026-01-22 07:25:56 +0000 UTC" firstStartedPulling="2026-01-22 07:25:57.625312294 +0000 UTC m=+6005.462437647" lastFinishedPulling="2026-01-22 07:26:03.289022352 +0000 UTC m=+6011.126147735" observedRunningTime="2026-01-22 07:26:03.832141935 +0000 UTC m=+6011.669267298" watchObservedRunningTime="2026-01-22 07:26:03.834216296 +0000 UTC m=+6011.671341649" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.137630 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-housekeeping-27ltq"] Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.140960 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.144864 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-config-data" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.145103 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-scripts" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.147293 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-27ltq"] Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.191806 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2d46e8ea-1174-431e-8fa3-8ffc44be7919-scripts\") pod \"octavia-housekeeping-27ltq\" (UID: \"2d46e8ea-1174-431e-8fa3-8ffc44be7919\") " pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.192065 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d46e8ea-1174-431e-8fa3-8ffc44be7919-config-data\") pod \"octavia-housekeeping-27ltq\" (UID: \"2d46e8ea-1174-431e-8fa3-8ffc44be7919\") " pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.192215 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/2d46e8ea-1174-431e-8fa3-8ffc44be7919-config-data-merged\") pod \"octavia-housekeeping-27ltq\" (UID: \"2d46e8ea-1174-431e-8fa3-8ffc44be7919\") " pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.192276 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/2d46e8ea-1174-431e-8fa3-8ffc44be7919-hm-ports\") pod \"octavia-housekeeping-27ltq\" (UID: \"2d46e8ea-1174-431e-8fa3-8ffc44be7919\") " pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.192535 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d46e8ea-1174-431e-8fa3-8ffc44be7919-combined-ca-bundle\") pod \"octavia-housekeeping-27ltq\" (UID: \"2d46e8ea-1174-431e-8fa3-8ffc44be7919\") " pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.192893 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/2d46e8ea-1174-431e-8fa3-8ffc44be7919-amphora-certs\") pod \"octavia-housekeeping-27ltq\" (UID: \"2d46e8ea-1174-431e-8fa3-8ffc44be7919\") " pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.294638 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/2d46e8ea-1174-431e-8fa3-8ffc44be7919-config-data-merged\") pod \"octavia-housekeeping-27ltq\" (UID: \"2d46e8ea-1174-431e-8fa3-8ffc44be7919\") " pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.294692 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/2d46e8ea-1174-431e-8fa3-8ffc44be7919-hm-ports\") pod \"octavia-housekeeping-27ltq\" (UID: \"2d46e8ea-1174-431e-8fa3-8ffc44be7919\") " pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.294755 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d46e8ea-1174-431e-8fa3-8ffc44be7919-combined-ca-bundle\") pod \"octavia-housekeeping-27ltq\" (UID: \"2d46e8ea-1174-431e-8fa3-8ffc44be7919\") " pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.294856 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/2d46e8ea-1174-431e-8fa3-8ffc44be7919-amphora-certs\") pod \"octavia-housekeeping-27ltq\" (UID: \"2d46e8ea-1174-431e-8fa3-8ffc44be7919\") " pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.294887 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2d46e8ea-1174-431e-8fa3-8ffc44be7919-scripts\") pod \"octavia-housekeeping-27ltq\" (UID: \"2d46e8ea-1174-431e-8fa3-8ffc44be7919\") " pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.294937 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d46e8ea-1174-431e-8fa3-8ffc44be7919-config-data\") pod \"octavia-housekeeping-27ltq\" (UID: \"2d46e8ea-1174-431e-8fa3-8ffc44be7919\") " pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.295104 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/2d46e8ea-1174-431e-8fa3-8ffc44be7919-config-data-merged\") pod \"octavia-housekeeping-27ltq\" (UID: \"2d46e8ea-1174-431e-8fa3-8ffc44be7919\") " pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.295677 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/2d46e8ea-1174-431e-8fa3-8ffc44be7919-hm-ports\") pod \"octavia-housekeeping-27ltq\" (UID: \"2d46e8ea-1174-431e-8fa3-8ffc44be7919\") " pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.301481 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/2d46e8ea-1174-431e-8fa3-8ffc44be7919-amphora-certs\") pod \"octavia-housekeeping-27ltq\" (UID: \"2d46e8ea-1174-431e-8fa3-8ffc44be7919\") " pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.301863 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2d46e8ea-1174-431e-8fa3-8ffc44be7919-scripts\") pod \"octavia-housekeeping-27ltq\" (UID: \"2d46e8ea-1174-431e-8fa3-8ffc44be7919\") " pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.302933 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d46e8ea-1174-431e-8fa3-8ffc44be7919-combined-ca-bundle\") pod \"octavia-housekeeping-27ltq\" (UID: \"2d46e8ea-1174-431e-8fa3-8ffc44be7919\") " pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.303891 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d46e8ea-1174-431e-8fa3-8ffc44be7919-config-data\") pod \"octavia-housekeeping-27ltq\" (UID: \"2d46e8ea-1174-431e-8fa3-8ffc44be7919\") " pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.467746 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.837493 4933 generic.go:334] "Generic (PLEG): container finished" podID="6a01cbd8-9d77-42b9-9848-bc2329258052" containerID="ec4363925d8bf174e46cffd00cb8c7eec6934d637ce74c1bb8c9bbd7da023f30" exitCode=0 Jan 22 07:26:04 crc kubenswrapper[4933]: I0122 07:26:04.837614 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-kws6f" event={"ID":"6a01cbd8-9d77-42b9-9848-bc2329258052","Type":"ContainerDied","Data":"ec4363925d8bf174e46cffd00cb8c7eec6934d637ce74c1bb8c9bbd7da023f30"} Jan 22 07:26:05 crc kubenswrapper[4933]: I0122 07:26:05.313382 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-sd9kx"] Jan 22 07:26:06 crc kubenswrapper[4933]: I0122 07:26:06.780056 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-worker-gkn68"] Jan 22 07:26:06 crc kubenswrapper[4933]: I0122 07:26:06.782390 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-gkn68" Jan 22 07:26:06 crc kubenswrapper[4933]: I0122 07:26:06.785469 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-config-data" Jan 22 07:26:06 crc kubenswrapper[4933]: I0122 07:26:06.785607 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-scripts" Jan 22 07:26:06 crc kubenswrapper[4933]: I0122 07:26:06.803069 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-gkn68"] Jan 22 07:26:06 crc kubenswrapper[4933]: I0122 07:26:06.866524 4933 generic.go:334] "Generic (PLEG): container finished" podID="12794d9c-3ddc-4003-a7fa-d8aad66d74ac" containerID="97dbe6cf6c3f219d38823483463d52a142265f48fc5f605322f06dc03b161416" exitCode=0 Jan 22 07:26:06 crc kubenswrapper[4933]: I0122 07:26:06.866598 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-sd9kx" event={"ID":"12794d9c-3ddc-4003-a7fa-d8aad66d74ac","Type":"ContainerDied","Data":"97dbe6cf6c3f219d38823483463d52a142265f48fc5f605322f06dc03b161416"} Jan 22 07:26:06 crc kubenswrapper[4933]: I0122 07:26:06.965663 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09aef9f6-a8ee-4751-8a6b-64c323cb7bce-combined-ca-bundle\") pod \"octavia-worker-gkn68\" (UID: \"09aef9f6-a8ee-4751-8a6b-64c323cb7bce\") " pod="openstack/octavia-worker-gkn68" Jan 22 07:26:06 crc kubenswrapper[4933]: I0122 07:26:06.966540 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/09aef9f6-a8ee-4751-8a6b-64c323cb7bce-amphora-certs\") pod \"octavia-worker-gkn68\" (UID: \"09aef9f6-a8ee-4751-8a6b-64c323cb7bce\") " pod="openstack/octavia-worker-gkn68" Jan 22 07:26:06 crc kubenswrapper[4933]: I0122 07:26:06.966680 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09aef9f6-a8ee-4751-8a6b-64c323cb7bce-scripts\") pod \"octavia-worker-gkn68\" (UID: \"09aef9f6-a8ee-4751-8a6b-64c323cb7bce\") " pod="openstack/octavia-worker-gkn68" Jan 22 07:26:06 crc kubenswrapper[4933]: I0122 07:26:06.966725 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09aef9f6-a8ee-4751-8a6b-64c323cb7bce-config-data\") pod \"octavia-worker-gkn68\" (UID: \"09aef9f6-a8ee-4751-8a6b-64c323cb7bce\") " pod="openstack/octavia-worker-gkn68" Jan 22 07:26:06 crc kubenswrapper[4933]: I0122 07:26:06.966877 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/09aef9f6-a8ee-4751-8a6b-64c323cb7bce-hm-ports\") pod \"octavia-worker-gkn68\" (UID: \"09aef9f6-a8ee-4751-8a6b-64c323cb7bce\") " pod="openstack/octavia-worker-gkn68" Jan 22 07:26:06 crc kubenswrapper[4933]: I0122 07:26:06.967125 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/09aef9f6-a8ee-4751-8a6b-64c323cb7bce-config-data-merged\") pod \"octavia-worker-gkn68\" (UID: \"09aef9f6-a8ee-4751-8a6b-64c323cb7bce\") " pod="openstack/octavia-worker-gkn68" Jan 22 07:26:07 crc kubenswrapper[4933]: I0122 07:26:07.068626 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09aef9f6-a8ee-4751-8a6b-64c323cb7bce-scripts\") pod \"octavia-worker-gkn68\" (UID: \"09aef9f6-a8ee-4751-8a6b-64c323cb7bce\") " pod="openstack/octavia-worker-gkn68" Jan 22 07:26:07 crc kubenswrapper[4933]: I0122 07:26:07.068707 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09aef9f6-a8ee-4751-8a6b-64c323cb7bce-config-data\") pod \"octavia-worker-gkn68\" (UID: \"09aef9f6-a8ee-4751-8a6b-64c323cb7bce\") " pod="openstack/octavia-worker-gkn68" Jan 22 07:26:07 crc kubenswrapper[4933]: I0122 07:26:07.068749 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/09aef9f6-a8ee-4751-8a6b-64c323cb7bce-hm-ports\") pod \"octavia-worker-gkn68\" (UID: \"09aef9f6-a8ee-4751-8a6b-64c323cb7bce\") " pod="openstack/octavia-worker-gkn68" Jan 22 07:26:07 crc kubenswrapper[4933]: I0122 07:26:07.068787 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/09aef9f6-a8ee-4751-8a6b-64c323cb7bce-config-data-merged\") pod \"octavia-worker-gkn68\" (UID: \"09aef9f6-a8ee-4751-8a6b-64c323cb7bce\") " pod="openstack/octavia-worker-gkn68" Jan 22 07:26:07 crc kubenswrapper[4933]: I0122 07:26:07.068825 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09aef9f6-a8ee-4751-8a6b-64c323cb7bce-combined-ca-bundle\") pod \"octavia-worker-gkn68\" (UID: \"09aef9f6-a8ee-4751-8a6b-64c323cb7bce\") " pod="openstack/octavia-worker-gkn68" Jan 22 07:26:07 crc kubenswrapper[4933]: I0122 07:26:07.068963 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/09aef9f6-a8ee-4751-8a6b-64c323cb7bce-amphora-certs\") pod \"octavia-worker-gkn68\" (UID: \"09aef9f6-a8ee-4751-8a6b-64c323cb7bce\") " pod="openstack/octavia-worker-gkn68" Jan 22 07:26:07 crc kubenswrapper[4933]: I0122 07:26:07.070123 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/09aef9f6-a8ee-4751-8a6b-64c323cb7bce-config-data-merged\") pod \"octavia-worker-gkn68\" (UID: \"09aef9f6-a8ee-4751-8a6b-64c323cb7bce\") " pod="openstack/octavia-worker-gkn68" Jan 22 07:26:07 crc kubenswrapper[4933]: I0122 07:26:07.070473 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/09aef9f6-a8ee-4751-8a6b-64c323cb7bce-hm-ports\") pod \"octavia-worker-gkn68\" (UID: \"09aef9f6-a8ee-4751-8a6b-64c323cb7bce\") " pod="openstack/octavia-worker-gkn68" Jan 22 07:26:07 crc kubenswrapper[4933]: I0122 07:26:07.075042 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/09aef9f6-a8ee-4751-8a6b-64c323cb7bce-amphora-certs\") pod \"octavia-worker-gkn68\" (UID: \"09aef9f6-a8ee-4751-8a6b-64c323cb7bce\") " pod="openstack/octavia-worker-gkn68" Jan 22 07:26:07 crc kubenswrapper[4933]: I0122 07:26:07.075314 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09aef9f6-a8ee-4751-8a6b-64c323cb7bce-config-data\") pod \"octavia-worker-gkn68\" (UID: \"09aef9f6-a8ee-4751-8a6b-64c323cb7bce\") " pod="openstack/octavia-worker-gkn68" Jan 22 07:26:07 crc kubenswrapper[4933]: I0122 07:26:07.075985 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09aef9f6-a8ee-4751-8a6b-64c323cb7bce-scripts\") pod \"octavia-worker-gkn68\" (UID: \"09aef9f6-a8ee-4751-8a6b-64c323cb7bce\") " pod="openstack/octavia-worker-gkn68" Jan 22 07:26:07 crc kubenswrapper[4933]: I0122 07:26:07.076983 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09aef9f6-a8ee-4751-8a6b-64c323cb7bce-combined-ca-bundle\") pod \"octavia-worker-gkn68\" (UID: \"09aef9f6-a8ee-4751-8a6b-64c323cb7bce\") " pod="openstack/octavia-worker-gkn68" Jan 22 07:26:07 crc kubenswrapper[4933]: I0122 07:26:07.105432 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-gkn68" Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.263031 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-kws6f" Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.336044 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a01cbd8-9d77-42b9-9848-bc2329258052-config-data\") pod \"6a01cbd8-9d77-42b9-9848-bc2329258052\" (UID: \"6a01cbd8-9d77-42b9-9848-bc2329258052\") " Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.336232 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a01cbd8-9d77-42b9-9848-bc2329258052-scripts\") pod \"6a01cbd8-9d77-42b9-9848-bc2329258052\" (UID: \"6a01cbd8-9d77-42b9-9848-bc2329258052\") " Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.336350 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/6a01cbd8-9d77-42b9-9848-bc2329258052-config-data-merged\") pod \"6a01cbd8-9d77-42b9-9848-bc2329258052\" (UID: \"6a01cbd8-9d77-42b9-9848-bc2329258052\") " Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.336425 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a01cbd8-9d77-42b9-9848-bc2329258052-combined-ca-bundle\") pod \"6a01cbd8-9d77-42b9-9848-bc2329258052\" (UID: \"6a01cbd8-9d77-42b9-9848-bc2329258052\") " Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.339825 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a01cbd8-9d77-42b9-9848-bc2329258052-scripts" (OuterVolumeSpecName: "scripts") pod "6a01cbd8-9d77-42b9-9848-bc2329258052" (UID: "6a01cbd8-9d77-42b9-9848-bc2329258052"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.340021 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a01cbd8-9d77-42b9-9848-bc2329258052-config-data" (OuterVolumeSpecName: "config-data") pod "6a01cbd8-9d77-42b9-9848-bc2329258052" (UID: "6a01cbd8-9d77-42b9-9848-bc2329258052"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.361897 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a01cbd8-9d77-42b9-9848-bc2329258052-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6a01cbd8-9d77-42b9-9848-bc2329258052" (UID: "6a01cbd8-9d77-42b9-9848-bc2329258052"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.369998 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6a01cbd8-9d77-42b9-9848-bc2329258052-config-data-merged" (OuterVolumeSpecName: "config-data-merged") pod "6a01cbd8-9d77-42b9-9848-bc2329258052" (UID: "6a01cbd8-9d77-42b9-9848-bc2329258052"). InnerVolumeSpecName "config-data-merged". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.439095 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6a01cbd8-9d77-42b9-9848-bc2329258052-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.439131 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/6a01cbd8-9d77-42b9-9848-bc2329258052-config-data-merged\") on node \"crc\" DevicePath \"\"" Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.439143 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a01cbd8-9d77-42b9-9848-bc2329258052-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.439153 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a01cbd8-9d77-42b9-9848-bc2329258052-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.912164 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-7b97d6bc64-js7m6" event={"ID":"9147baef-7574-4104-83be-e54825e1a277","Type":"ContainerStarted","Data":"a49b26f9f5551d3b9d7700bfb8622fd4477d854cde412ef6af9e2269f89ca31d"} Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.915037 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-kws6f" event={"ID":"6a01cbd8-9d77-42b9-9848-bc2329258052","Type":"ContainerDied","Data":"8ce16a716e81b3aa65339811f07e454ac0758cc6b49c4a1b4581cbbe41148361"} Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.915060 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ce16a716e81b3aa65339811f07e454ac0758cc6b49c4a1b4581cbbe41148361" Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.915163 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-kws6f" Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.944031 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.944098 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.944143 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.944810 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 07:26:10 crc kubenswrapper[4933]: I0122 07:26:10.944867 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" gracePeriod=600 Jan 22 07:26:11 crc kubenswrapper[4933]: E0122 07:26:11.070105 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.076531 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-27ltq"] Jan 22 07:26:11 crc kubenswrapper[4933]: W0122 07:26:11.177160 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09aef9f6_a8ee_4751_8a6b_64c323cb7bce.slice/crio-72f939523bc25f455bb1cbb64e6996b7956abe82f338ea775b9257f8bbcacb83 WatchSource:0}: Error finding container 72f939523bc25f455bb1cbb64e6996b7956abe82f338ea775b9257f8bbcacb83: Status 404 returned error can't find the container with id 72f939523bc25f455bb1cbb64e6996b7956abe82f338ea775b9257f8bbcacb83 Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.181391 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-gkn68"] Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.535059 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-api-68d465b9c5-6lv2f"] Jan 22 07:26:11 crc kubenswrapper[4933]: E0122 07:26:11.535460 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a01cbd8-9d77-42b9-9848-bc2329258052" containerName="init" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.535471 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a01cbd8-9d77-42b9-9848-bc2329258052" containerName="init" Jan 22 07:26:11 crc kubenswrapper[4933]: E0122 07:26:11.535510 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a01cbd8-9d77-42b9-9848-bc2329258052" containerName="octavia-db-sync" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.535516 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a01cbd8-9d77-42b9-9848-bc2329258052" containerName="octavia-db-sync" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.535676 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a01cbd8-9d77-42b9-9848-bc2329258052" containerName="octavia-db-sync" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.536970 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.538883 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-octavia-internal-svc" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.540704 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-octavia-public-svc" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.560385 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67ea81f4-bc8e-4883-9053-410468b0f4f6-scripts\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.560589 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/67ea81f4-bc8e-4883-9053-410468b0f4f6-octavia-run\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.560673 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67ea81f4-bc8e-4883-9053-410468b0f4f6-config-data\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.560698 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/67ea81f4-bc8e-4883-9053-410468b0f4f6-config-data-merged\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.560725 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/67ea81f4-bc8e-4883-9053-410468b0f4f6-ovndb-tls-certs\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.560946 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67ea81f4-bc8e-4883-9053-410468b0f4f6-combined-ca-bundle\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.561037 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/67ea81f4-bc8e-4883-9053-410468b0f4f6-public-tls-certs\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.561069 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/67ea81f4-bc8e-4883-9053-410468b0f4f6-internal-tls-certs\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.608102 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-68d465b9c5-6lv2f"] Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.662465 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67ea81f4-bc8e-4883-9053-410468b0f4f6-config-data\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.662521 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/67ea81f4-bc8e-4883-9053-410468b0f4f6-config-data-merged\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.662552 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/67ea81f4-bc8e-4883-9053-410468b0f4f6-ovndb-tls-certs\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.662833 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67ea81f4-bc8e-4883-9053-410468b0f4f6-combined-ca-bundle\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.662898 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/67ea81f4-bc8e-4883-9053-410468b0f4f6-public-tls-certs\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.662920 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/67ea81f4-bc8e-4883-9053-410468b0f4f6-internal-tls-certs\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.662999 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67ea81f4-bc8e-4883-9053-410468b0f4f6-scripts\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.663021 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/67ea81f4-bc8e-4883-9053-410468b0f4f6-octavia-run\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.663541 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/67ea81f4-bc8e-4883-9053-410468b0f4f6-octavia-run\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.663730 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/67ea81f4-bc8e-4883-9053-410468b0f4f6-config-data-merged\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.670054 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67ea81f4-bc8e-4883-9053-410468b0f4f6-scripts\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.670107 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/67ea81f4-bc8e-4883-9053-410468b0f4f6-public-tls-certs\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.672723 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/67ea81f4-bc8e-4883-9053-410468b0f4f6-internal-tls-certs\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.674817 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67ea81f4-bc8e-4883-9053-410468b0f4f6-config-data\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.675693 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67ea81f4-bc8e-4883-9053-410468b0f4f6-combined-ca-bundle\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.677750 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/67ea81f4-bc8e-4883-9053-410468b0f4f6-ovndb-tls-certs\") pod \"octavia-api-68d465b9c5-6lv2f\" (UID: \"67ea81f4-bc8e-4883-9053-410468b0f4f6\") " pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.853378 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.942517 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" exitCode=0 Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.942574 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336"} Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.942695 4933 scope.go:117] "RemoveContainer" containerID="df77120a276e772bee7dad3e476bcf8749c071d28246be8da71e670976f60157" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.943903 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:26:11 crc kubenswrapper[4933]: E0122 07:26:11.945155 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.957146 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-sd9kx" event={"ID":"12794d9c-3ddc-4003-a7fa-d8aad66d74ac","Type":"ContainerStarted","Data":"5d9d9a512217d336a650499d4da2cdc72ddf6dee767eb739b6a4886773513a6c"} Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.963197 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:11 crc kubenswrapper[4933]: I0122 07:26:11.976821 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-gkn68" event={"ID":"09aef9f6-a8ee-4751-8a6b-64c323cb7bce","Type":"ContainerStarted","Data":"72f939523bc25f455bb1cbb64e6996b7956abe82f338ea775b9257f8bbcacb83"} Jan 22 07:26:12 crc kubenswrapper[4933]: I0122 07:26:12.007816 4933 generic.go:334] "Generic (PLEG): container finished" podID="9147baef-7574-4104-83be-e54825e1a277" containerID="a49b26f9f5551d3b9d7700bfb8622fd4477d854cde412ef6af9e2269f89ca31d" exitCode=0 Jan 22 07:26:12 crc kubenswrapper[4933]: I0122 07:26:12.008245 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-7b97d6bc64-js7m6" event={"ID":"9147baef-7574-4104-83be-e54825e1a277","Type":"ContainerDied","Data":"a49b26f9f5551d3b9d7700bfb8622fd4477d854cde412ef6af9e2269f89ca31d"} Jan 22 07:26:12 crc kubenswrapper[4933]: I0122 07:26:12.009617 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-27ltq" event={"ID":"2d46e8ea-1174-431e-8fa3-8ffc44be7919","Type":"ContainerStarted","Data":"4d3e9b7c551945674b35cee6d8c213f45b1add007c7de555d9526a6bd32e1531"} Jan 22 07:26:12 crc kubenswrapper[4933]: I0122 07:26:12.032508 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-healthmanager-sd9kx" podStartSLOduration=10.032434377 podStartE2EDuration="10.032434377s" podCreationTimestamp="2026-01-22 07:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:26:12.006165278 +0000 UTC m=+6019.843290651" watchObservedRunningTime="2026-01-22 07:26:12.032434377 +0000 UTC m=+6019.869559740" Jan 22 07:26:12 crc kubenswrapper[4933]: I0122 07:26:12.103468 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-rsyslog-bpbrn" Jan 22 07:26:12 crc kubenswrapper[4933]: I0122 07:26:12.395256 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-68d465b9c5-6lv2f"] Jan 22 07:26:12 crc kubenswrapper[4933]: W0122 07:26:12.426270 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod67ea81f4_bc8e_4883_9053_410468b0f4f6.slice/crio-c521df3eabbc62ee77b2220f56deac3bdde3a61114c7305429fff6150e6a56d6 WatchSource:0}: Error finding container c521df3eabbc62ee77b2220f56deac3bdde3a61114c7305429fff6150e6a56d6: Status 404 returned error can't find the container with id c521df3eabbc62ee77b2220f56deac3bdde3a61114c7305429fff6150e6a56d6 Jan 22 07:26:13 crc kubenswrapper[4933]: I0122 07:26:13.031460 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-7b97d6bc64-js7m6" event={"ID":"9147baef-7574-4104-83be-e54825e1a277","Type":"ContainerStarted","Data":"d3434b379477d2edbb3ef250c8a4fbf25adb801438c1fbd4d6f98de826ca0bce"} Jan 22 07:26:13 crc kubenswrapper[4933]: I0122 07:26:13.034238 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-68d465b9c5-6lv2f" event={"ID":"67ea81f4-bc8e-4883-9053-410468b0f4f6","Type":"ContainerStarted","Data":"1428d5256c45e1740c709bfb66fc57e081b93e70f300aefc87d3f525e5add8e0"} Jan 22 07:26:13 crc kubenswrapper[4933]: I0122 07:26:13.034307 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-68d465b9c5-6lv2f" event={"ID":"67ea81f4-bc8e-4883-9053-410468b0f4f6","Type":"ContainerStarted","Data":"c521df3eabbc62ee77b2220f56deac3bdde3a61114c7305429fff6150e6a56d6"} Jan 22 07:26:13 crc kubenswrapper[4933]: I0122 07:26:13.058145 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-image-upload-7b97d6bc64-js7m6" podStartSLOduration=3.514889419 podStartE2EDuration="16.058121293s" podCreationTimestamp="2026-01-22 07:25:57 +0000 UTC" firstStartedPulling="2026-01-22 07:25:58.105670932 +0000 UTC m=+6005.942796285" lastFinishedPulling="2026-01-22 07:26:10.648902806 +0000 UTC m=+6018.486028159" observedRunningTime="2026-01-22 07:26:13.050569288 +0000 UTC m=+6020.887694641" watchObservedRunningTime="2026-01-22 07:26:13.058121293 +0000 UTC m=+6020.895246666" Jan 22 07:26:17 crc kubenswrapper[4933]: I0122 07:26:17.412820 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-healthmanager-sd9kx" Jan 22 07:26:18 crc kubenswrapper[4933]: I0122 07:26:18.086870 4933 generic.go:334] "Generic (PLEG): container finished" podID="67ea81f4-bc8e-4883-9053-410468b0f4f6" containerID="1428d5256c45e1740c709bfb66fc57e081b93e70f300aefc87d3f525e5add8e0" exitCode=0 Jan 22 07:26:18 crc kubenswrapper[4933]: I0122 07:26:18.086981 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-68d465b9c5-6lv2f" event={"ID":"67ea81f4-bc8e-4883-9053-410468b0f4f6","Type":"ContainerDied","Data":"1428d5256c45e1740c709bfb66fc57e081b93e70f300aefc87d3f525e5add8e0"} Jan 22 07:26:19 crc kubenswrapper[4933]: I0122 07:26:19.099706 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-68d465b9c5-6lv2f" event={"ID":"67ea81f4-bc8e-4883-9053-410468b0f4f6","Type":"ContainerStarted","Data":"f4e0bfe83290dc5da12b844d580d2d54e65136ca8e839594b3d1393801509546"} Jan 22 07:26:19 crc kubenswrapper[4933]: I0122 07:26:19.100160 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-68d465b9c5-6lv2f" event={"ID":"67ea81f4-bc8e-4883-9053-410468b0f4f6","Type":"ContainerStarted","Data":"b77950d15132a9104c277cafb3ad4d849c0f62579a63807331a8c2b9388ff6b2"} Jan 22 07:26:19 crc kubenswrapper[4933]: I0122 07:26:19.100380 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:19 crc kubenswrapper[4933]: I0122 07:26:19.100402 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:19 crc kubenswrapper[4933]: I0122 07:26:19.101916 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-gkn68" event={"ID":"09aef9f6-a8ee-4751-8a6b-64c323cb7bce","Type":"ContainerStarted","Data":"63d7344d71ae9ffcd018ceea72ae8f97eb33c903d0be730c5f204b2ef1c8a12d"} Jan 22 07:26:19 crc kubenswrapper[4933]: I0122 07:26:19.105662 4933 generic.go:334] "Generic (PLEG): container finished" podID="2d46e8ea-1174-431e-8fa3-8ffc44be7919" containerID="cc41ba4baf68f57a2a84774619a0da69f66b52a13c8796bd3b6094c12c0ec8ef" exitCode=0 Jan 22 07:26:19 crc kubenswrapper[4933]: I0122 07:26:19.105710 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-27ltq" event={"ID":"2d46e8ea-1174-431e-8fa3-8ffc44be7919","Type":"ContainerDied","Data":"cc41ba4baf68f57a2a84774619a0da69f66b52a13c8796bd3b6094c12c0ec8ef"} Jan 22 07:26:19 crc kubenswrapper[4933]: I0122 07:26:19.143357 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-api-68d465b9c5-6lv2f" podStartSLOduration=8.143334195 podStartE2EDuration="8.143334195s" podCreationTimestamp="2026-01-22 07:26:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:26:19.14231698 +0000 UTC m=+6026.979442333" watchObservedRunningTime="2026-01-22 07:26:19.143334195 +0000 UTC m=+6026.980459558" Jan 22 07:26:20 crc kubenswrapper[4933]: I0122 07:26:20.117298 4933 generic.go:334] "Generic (PLEG): container finished" podID="09aef9f6-a8ee-4751-8a6b-64c323cb7bce" containerID="63d7344d71ae9ffcd018ceea72ae8f97eb33c903d0be730c5f204b2ef1c8a12d" exitCode=0 Jan 22 07:26:20 crc kubenswrapper[4933]: I0122 07:26:20.117352 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-gkn68" event={"ID":"09aef9f6-a8ee-4751-8a6b-64c323cb7bce","Type":"ContainerDied","Data":"63d7344d71ae9ffcd018ceea72ae8f97eb33c903d0be730c5f204b2ef1c8a12d"} Jan 22 07:26:20 crc kubenswrapper[4933]: I0122 07:26:20.117721 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-gkn68" event={"ID":"09aef9f6-a8ee-4751-8a6b-64c323cb7bce","Type":"ContainerStarted","Data":"5c70101f6eb4d4ca854f95383a8ad2d9be9c0eaca7c9059063b27a8cdd4b1464"} Jan 22 07:26:20 crc kubenswrapper[4933]: I0122 07:26:20.117750 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-worker-gkn68" Jan 22 07:26:20 crc kubenswrapper[4933]: I0122 07:26:20.120482 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-27ltq" event={"ID":"2d46e8ea-1174-431e-8fa3-8ffc44be7919","Type":"ContainerStarted","Data":"f68ce387a5c0fe6080f3bcf1e8aa17745452760734f2cd16c726118805936214"} Jan 22 07:26:20 crc kubenswrapper[4933]: I0122 07:26:20.148199 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-worker-gkn68" podStartSLOduration=7.624416321 podStartE2EDuration="14.148181623s" podCreationTimestamp="2026-01-22 07:26:06 +0000 UTC" firstStartedPulling="2026-01-22 07:26:11.179067996 +0000 UTC m=+6019.016193349" lastFinishedPulling="2026-01-22 07:26:17.702833298 +0000 UTC m=+6025.539958651" observedRunningTime="2026-01-22 07:26:20.140633249 +0000 UTC m=+6027.977758622" watchObservedRunningTime="2026-01-22 07:26:20.148181623 +0000 UTC m=+6027.985306976" Jan 22 07:26:20 crc kubenswrapper[4933]: I0122 07:26:20.177376 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-housekeeping-27ltq" podStartSLOduration=9.569979236 podStartE2EDuration="16.177352352s" podCreationTimestamp="2026-01-22 07:26:04 +0000 UTC" firstStartedPulling="2026-01-22 07:26:11.091133756 +0000 UTC m=+6018.928259109" lastFinishedPulling="2026-01-22 07:26:17.698506882 +0000 UTC m=+6025.535632225" observedRunningTime="2026-01-22 07:26:20.170713331 +0000 UTC m=+6028.007838694" watchObservedRunningTime="2026-01-22 07:26:20.177352352 +0000 UTC m=+6028.014477705" Jan 22 07:26:21 crc kubenswrapper[4933]: I0122 07:26:21.128991 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:23 crc kubenswrapper[4933]: I0122 07:26:23.491615 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:26:23 crc kubenswrapper[4933]: E0122 07:26:23.492047 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:26:30 crc kubenswrapper[4933]: I0122 07:26:30.923522 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:31 crc kubenswrapper[4933]: I0122 07:26:31.186565 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-68d465b9c5-6lv2f" Jan 22 07:26:31 crc kubenswrapper[4933]: I0122 07:26:31.252973 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-api-d6c5fd9f-kftw2"] Jan 22 07:26:31 crc kubenswrapper[4933]: I0122 07:26:31.253310 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/octavia-api-d6c5fd9f-kftw2" podUID="f10a3846-d1b2-4d43-bd53-b005c791e9c1" containerName="octavia-api" containerID="cri-o://c6725f03766c525d9a1a757c71b3ffcda7688f696f77c8cc794e879f820b7d13" gracePeriod=30 Jan 22 07:26:31 crc kubenswrapper[4933]: I0122 07:26:31.253413 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/octavia-api-d6c5fd9f-kftw2" podUID="f10a3846-d1b2-4d43-bd53-b005c791e9c1" containerName="octavia-api-provider-agent" containerID="cri-o://5b4cf48304839e1ebf882c53ca8142875f24359ea8380ddefd58cd54995ae44b" gracePeriod=30 Jan 22 07:26:32 crc kubenswrapper[4933]: I0122 07:26:32.232737 4933 generic.go:334] "Generic (PLEG): container finished" podID="f10a3846-d1b2-4d43-bd53-b005c791e9c1" containerID="5b4cf48304839e1ebf882c53ca8142875f24359ea8380ddefd58cd54995ae44b" exitCode=0 Jan 22 07:26:32 crc kubenswrapper[4933]: I0122 07:26:32.232823 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-d6c5fd9f-kftw2" event={"ID":"f10a3846-d1b2-4d43-bd53-b005c791e9c1","Type":"ContainerDied","Data":"5b4cf48304839e1ebf882c53ca8142875f24359ea8380ddefd58cd54995ae44b"} Jan 22 07:26:34 crc kubenswrapper[4933]: I0122 07:26:34.505685 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-housekeeping-27ltq" Jan 22 07:26:34 crc kubenswrapper[4933]: I0122 07:26:34.945894 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.056888 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-ovndb-tls-certs\") pod \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.056952 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-config-data\") pod \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.057054 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/f10a3846-d1b2-4d43-bd53-b005c791e9c1-octavia-run\") pod \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.057135 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/f10a3846-d1b2-4d43-bd53-b005c791e9c1-config-data-merged\") pod \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.057215 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-combined-ca-bundle\") pod \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.057265 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-scripts\") pod \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\" (UID: \"f10a3846-d1b2-4d43-bd53-b005c791e9c1\") " Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.057370 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f10a3846-d1b2-4d43-bd53-b005c791e9c1-octavia-run" (OuterVolumeSpecName: "octavia-run") pod "f10a3846-d1b2-4d43-bd53-b005c791e9c1" (UID: "f10a3846-d1b2-4d43-bd53-b005c791e9c1"). InnerVolumeSpecName "octavia-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.057652 4933 reconciler_common.go:293] "Volume detached for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/f10a3846-d1b2-4d43-bd53-b005c791e9c1-octavia-run\") on node \"crc\" DevicePath \"\"" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.065276 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-scripts" (OuterVolumeSpecName: "scripts") pod "f10a3846-d1b2-4d43-bd53-b005c791e9c1" (UID: "f10a3846-d1b2-4d43-bd53-b005c791e9c1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.082153 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-config-data" (OuterVolumeSpecName: "config-data") pod "f10a3846-d1b2-4d43-bd53-b005c791e9c1" (UID: "f10a3846-d1b2-4d43-bd53-b005c791e9c1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.113674 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f10a3846-d1b2-4d43-bd53-b005c791e9c1-config-data-merged" (OuterVolumeSpecName: "config-data-merged") pod "f10a3846-d1b2-4d43-bd53-b005c791e9c1" (UID: "f10a3846-d1b2-4d43-bd53-b005c791e9c1"). InnerVolumeSpecName "config-data-merged". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.129000 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f10a3846-d1b2-4d43-bd53-b005c791e9c1" (UID: "f10a3846-d1b2-4d43-bd53-b005c791e9c1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.159799 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.159833 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.159845 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.159855 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/f10a3846-d1b2-4d43-bd53-b005c791e9c1-config-data-merged\") on node \"crc\" DevicePath \"\"" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.215889 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "f10a3846-d1b2-4d43-bd53-b005c791e9c1" (UID: "f10a3846-d1b2-4d43-bd53-b005c791e9c1"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.263585 4933 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f10a3846-d1b2-4d43-bd53-b005c791e9c1-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.279432 4933 generic.go:334] "Generic (PLEG): container finished" podID="f10a3846-d1b2-4d43-bd53-b005c791e9c1" containerID="c6725f03766c525d9a1a757c71b3ffcda7688f696f77c8cc794e879f820b7d13" exitCode=0 Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.279674 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-d6c5fd9f-kftw2" event={"ID":"f10a3846-d1b2-4d43-bd53-b005c791e9c1","Type":"ContainerDied","Data":"c6725f03766c525d9a1a757c71b3ffcda7688f696f77c8cc794e879f820b7d13"} Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.279759 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-d6c5fd9f-kftw2" event={"ID":"f10a3846-d1b2-4d43-bd53-b005c791e9c1","Type":"ContainerDied","Data":"e03f579a788db902bd3d81f05243aded0ba5e9a2068deaf4680701533f820780"} Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.279840 4933 scope.go:117] "RemoveContainer" containerID="5b4cf48304839e1ebf882c53ca8142875f24359ea8380ddefd58cd54995ae44b" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.280019 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-d6c5fd9f-kftw2" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.325240 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-api-d6c5fd9f-kftw2"] Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.333320 4933 scope.go:117] "RemoveContainer" containerID="c6725f03766c525d9a1a757c71b3ffcda7688f696f77c8cc794e879f820b7d13" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.335610 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-api-d6c5fd9f-kftw2"] Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.366924 4933 scope.go:117] "RemoveContainer" containerID="a61a7fbe224fd798ac818dde3fcf7034cff453fb9e3830e32647ea5ea480bcbb" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.385427 4933 scope.go:117] "RemoveContainer" containerID="5b4cf48304839e1ebf882c53ca8142875f24359ea8380ddefd58cd54995ae44b" Jan 22 07:26:35 crc kubenswrapper[4933]: E0122 07:26:35.385850 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b4cf48304839e1ebf882c53ca8142875f24359ea8380ddefd58cd54995ae44b\": container with ID starting with 5b4cf48304839e1ebf882c53ca8142875f24359ea8380ddefd58cd54995ae44b not found: ID does not exist" containerID="5b4cf48304839e1ebf882c53ca8142875f24359ea8380ddefd58cd54995ae44b" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.385893 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b4cf48304839e1ebf882c53ca8142875f24359ea8380ddefd58cd54995ae44b"} err="failed to get container status \"5b4cf48304839e1ebf882c53ca8142875f24359ea8380ddefd58cd54995ae44b\": rpc error: code = NotFound desc = could not find container \"5b4cf48304839e1ebf882c53ca8142875f24359ea8380ddefd58cd54995ae44b\": container with ID starting with 5b4cf48304839e1ebf882c53ca8142875f24359ea8380ddefd58cd54995ae44b not found: ID does not exist" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.385919 4933 scope.go:117] "RemoveContainer" containerID="c6725f03766c525d9a1a757c71b3ffcda7688f696f77c8cc794e879f820b7d13" Jan 22 07:26:35 crc kubenswrapper[4933]: E0122 07:26:35.386190 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6725f03766c525d9a1a757c71b3ffcda7688f696f77c8cc794e879f820b7d13\": container with ID starting with c6725f03766c525d9a1a757c71b3ffcda7688f696f77c8cc794e879f820b7d13 not found: ID does not exist" containerID="c6725f03766c525d9a1a757c71b3ffcda7688f696f77c8cc794e879f820b7d13" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.386217 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6725f03766c525d9a1a757c71b3ffcda7688f696f77c8cc794e879f820b7d13"} err="failed to get container status \"c6725f03766c525d9a1a757c71b3ffcda7688f696f77c8cc794e879f820b7d13\": rpc error: code = NotFound desc = could not find container \"c6725f03766c525d9a1a757c71b3ffcda7688f696f77c8cc794e879f820b7d13\": container with ID starting with c6725f03766c525d9a1a757c71b3ffcda7688f696f77c8cc794e879f820b7d13 not found: ID does not exist" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.386238 4933 scope.go:117] "RemoveContainer" containerID="a61a7fbe224fd798ac818dde3fcf7034cff453fb9e3830e32647ea5ea480bcbb" Jan 22 07:26:35 crc kubenswrapper[4933]: E0122 07:26:35.386711 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a61a7fbe224fd798ac818dde3fcf7034cff453fb9e3830e32647ea5ea480bcbb\": container with ID starting with a61a7fbe224fd798ac818dde3fcf7034cff453fb9e3830e32647ea5ea480bcbb not found: ID does not exist" containerID="a61a7fbe224fd798ac818dde3fcf7034cff453fb9e3830e32647ea5ea480bcbb" Jan 22 07:26:35 crc kubenswrapper[4933]: I0122 07:26:35.386778 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a61a7fbe224fd798ac818dde3fcf7034cff453fb9e3830e32647ea5ea480bcbb"} err="failed to get container status \"a61a7fbe224fd798ac818dde3fcf7034cff453fb9e3830e32647ea5ea480bcbb\": rpc error: code = NotFound desc = could not find container \"a61a7fbe224fd798ac818dde3fcf7034cff453fb9e3830e32647ea5ea480bcbb\": container with ID starting with a61a7fbe224fd798ac818dde3fcf7034cff453fb9e3830e32647ea5ea480bcbb not found: ID does not exist" Jan 22 07:26:36 crc kubenswrapper[4933]: I0122 07:26:36.500787 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f10a3846-d1b2-4d43-bd53-b005c791e9c1" path="/var/lib/kubelet/pods/f10a3846-d1b2-4d43-bd53-b005c791e9c1/volumes" Jan 22 07:26:37 crc kubenswrapper[4933]: I0122 07:26:37.040286 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-2vclg"] Jan 22 07:26:37 crc kubenswrapper[4933]: I0122 07:26:37.047326 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-2vclg"] Jan 22 07:26:37 crc kubenswrapper[4933]: I0122 07:26:37.136693 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-worker-gkn68" Jan 22 07:26:38 crc kubenswrapper[4933]: I0122 07:26:38.025285 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-3160-account-create-update-4tvjz"] Jan 22 07:26:38 crc kubenswrapper[4933]: I0122 07:26:38.039511 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-3160-account-create-update-4tvjz"] Jan 22 07:26:38 crc kubenswrapper[4933]: I0122 07:26:38.492097 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:26:38 crc kubenswrapper[4933]: E0122 07:26:38.492455 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:26:38 crc kubenswrapper[4933]: I0122 07:26:38.507407 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ab530e0-2186-46ec-a3b7-c89cc912357b" path="/var/lib/kubelet/pods/4ab530e0-2186-46ec-a3b7-c89cc912357b/volumes" Jan 22 07:26:38 crc kubenswrapper[4933]: I0122 07:26:38.508654 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4ba1a83-41bb-4111-a346-25041d9476de" path="/var/lib/kubelet/pods/d4ba1a83-41bb-4111-a346-25041d9476de/volumes" Jan 22 07:26:45 crc kubenswrapper[4933]: I0122 07:26:45.064731 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-fllwb"] Jan 22 07:26:45 crc kubenswrapper[4933]: I0122 07:26:45.079262 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-fllwb"] Jan 22 07:26:46 crc kubenswrapper[4933]: I0122 07:26:46.506865 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee" path="/var/lib/kubelet/pods/a8c5fbc4-e5f1-4bc8-9b5f-bd1e2929ddee/volumes" Jan 22 07:26:51 crc kubenswrapper[4933]: I0122 07:26:51.492200 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:26:51 crc kubenswrapper[4933]: E0122 07:26:51.493060 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:27:04 crc kubenswrapper[4933]: I0122 07:27:04.491622 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:27:04 crc kubenswrapper[4933]: E0122 07:27:04.492678 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:27:05 crc kubenswrapper[4933]: I0122 07:27:05.434222 4933 scope.go:117] "RemoveContainer" containerID="c9f75adfa66bac22b89b0e8c19109fb40c58e61fe0f1daf7dbb409c4486e321b" Jan 22 07:27:05 crc kubenswrapper[4933]: I0122 07:27:05.464155 4933 scope.go:117] "RemoveContainer" containerID="a618cb87ee04a6cba951ff64ff371edf55b1b26728fb3dda75de985ac3622e53" Jan 22 07:27:05 crc kubenswrapper[4933]: I0122 07:27:05.522195 4933 scope.go:117] "RemoveContainer" containerID="d89d6bd1e0d49f851a61145fc1ff7a9d635d3c6276a3e8cece2277d2ccd49749" Jan 22 07:27:10 crc kubenswrapper[4933]: I0122 07:27:10.064697 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-2dtp5"] Jan 22 07:27:10 crc kubenswrapper[4933]: I0122 07:27:10.074265 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-2dtp5"] Jan 22 07:27:10 crc kubenswrapper[4933]: I0122 07:27:10.501243 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55750da1-d536-4f73-8a46-b599bdf0298c" path="/var/lib/kubelet/pods/55750da1-d536-4f73-8a46-b599bdf0298c/volumes" Jan 22 07:27:11 crc kubenswrapper[4933]: I0122 07:27:11.034911 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-a91e-account-create-update-2nc7n"] Jan 22 07:27:11 crc kubenswrapper[4933]: I0122 07:27:11.043954 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-a91e-account-create-update-2nc7n"] Jan 22 07:27:12 crc kubenswrapper[4933]: I0122 07:27:12.509392 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5419c0b4-6d57-47e9-a285-c7ddf188d895" path="/var/lib/kubelet/pods/5419c0b4-6d57-47e9-a285-c7ddf188d895/volumes" Jan 22 07:27:15 crc kubenswrapper[4933]: I0122 07:27:15.490805 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:27:15 crc kubenswrapper[4933]: E0122 07:27:15.491877 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:27:19 crc kubenswrapper[4933]: I0122 07:27:19.028853 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-bmwtw"] Jan 22 07:27:19 crc kubenswrapper[4933]: I0122 07:27:19.038247 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-bmwtw"] Jan 22 07:27:20 crc kubenswrapper[4933]: I0122 07:27:20.501740 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c47ced8e-600a-41b0-a665-338664f8c335" path="/var/lib/kubelet/pods/c47ced8e-600a-41b0-a665-338664f8c335/volumes" Jan 22 07:27:27 crc kubenswrapper[4933]: I0122 07:27:27.492406 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:27:27 crc kubenswrapper[4933]: E0122 07:27:27.493806 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:27:29 crc kubenswrapper[4933]: I0122 07:27:29.802490 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-nmstate/nmstate-handler-z4cfk" podUID="95abb851-2f05-43e0-8c35-92027baf4a2c" containerName="nmstate-handler" probeResult="failure" output="command timed out" Jan 22 07:27:39 crc kubenswrapper[4933]: I0122 07:27:39.631727 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-7b97d6bc64-js7m6"] Jan 22 07:27:39 crc kubenswrapper[4933]: I0122 07:27:39.632571 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/octavia-image-upload-7b97d6bc64-js7m6" podUID="9147baef-7574-4104-83be-e54825e1a277" containerName="octavia-amphora-httpd" containerID="cri-o://d3434b379477d2edbb3ef250c8a4fbf25adb801438c1fbd4d6f98de826ca0bce" gracePeriod=30 Jan 22 07:27:40 crc kubenswrapper[4933]: I0122 07:27:40.155511 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-7b97d6bc64-js7m6" Jan 22 07:27:40 crc kubenswrapper[4933]: I0122 07:27:40.274600 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/9147baef-7574-4104-83be-e54825e1a277-amphora-image\") pod \"9147baef-7574-4104-83be-e54825e1a277\" (UID: \"9147baef-7574-4104-83be-e54825e1a277\") " Jan 22 07:27:40 crc kubenswrapper[4933]: I0122 07:27:40.274867 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9147baef-7574-4104-83be-e54825e1a277-httpd-config\") pod \"9147baef-7574-4104-83be-e54825e1a277\" (UID: \"9147baef-7574-4104-83be-e54825e1a277\") " Jan 22 07:27:40 crc kubenswrapper[4933]: I0122 07:27:40.307056 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9147baef-7574-4104-83be-e54825e1a277-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "9147baef-7574-4104-83be-e54825e1a277" (UID: "9147baef-7574-4104-83be-e54825e1a277"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:27:40 crc kubenswrapper[4933]: I0122 07:27:40.318930 4933 generic.go:334] "Generic (PLEG): container finished" podID="9147baef-7574-4104-83be-e54825e1a277" containerID="d3434b379477d2edbb3ef250c8a4fbf25adb801438c1fbd4d6f98de826ca0bce" exitCode=0 Jan 22 07:27:40 crc kubenswrapper[4933]: I0122 07:27:40.318971 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-7b97d6bc64-js7m6" event={"ID":"9147baef-7574-4104-83be-e54825e1a277","Type":"ContainerDied","Data":"d3434b379477d2edbb3ef250c8a4fbf25adb801438c1fbd4d6f98de826ca0bce"} Jan 22 07:27:40 crc kubenswrapper[4933]: I0122 07:27:40.319004 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-7b97d6bc64-js7m6" event={"ID":"9147baef-7574-4104-83be-e54825e1a277","Type":"ContainerDied","Data":"f9d2720a51b2c0f4ad7badb55d9d02248de785fb12ced532266b1132d4c3ccce"} Jan 22 07:27:40 crc kubenswrapper[4933]: I0122 07:27:40.319022 4933 scope.go:117] "RemoveContainer" containerID="d3434b379477d2edbb3ef250c8a4fbf25adb801438c1fbd4d6f98de826ca0bce" Jan 22 07:27:40 crc kubenswrapper[4933]: I0122 07:27:40.319384 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-7b97d6bc64-js7m6" Jan 22 07:27:40 crc kubenswrapper[4933]: I0122 07:27:40.364782 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9147baef-7574-4104-83be-e54825e1a277-amphora-image" (OuterVolumeSpecName: "amphora-image") pod "9147baef-7574-4104-83be-e54825e1a277" (UID: "9147baef-7574-4104-83be-e54825e1a277"). InnerVolumeSpecName "amphora-image". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:27:40 crc kubenswrapper[4933]: I0122 07:27:40.377347 4933 reconciler_common.go:293] "Volume detached for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/9147baef-7574-4104-83be-e54825e1a277-amphora-image\") on node \"crc\" DevicePath \"\"" Jan 22 07:27:40 crc kubenswrapper[4933]: I0122 07:27:40.377388 4933 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/9147baef-7574-4104-83be-e54825e1a277-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:27:40 crc kubenswrapper[4933]: I0122 07:27:40.399388 4933 scope.go:117] "RemoveContainer" containerID="a49b26f9f5551d3b9d7700bfb8622fd4477d854cde412ef6af9e2269f89ca31d" Jan 22 07:27:40 crc kubenswrapper[4933]: I0122 07:27:40.419941 4933 scope.go:117] "RemoveContainer" containerID="d3434b379477d2edbb3ef250c8a4fbf25adb801438c1fbd4d6f98de826ca0bce" Jan 22 07:27:40 crc kubenswrapper[4933]: E0122 07:27:40.420515 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3434b379477d2edbb3ef250c8a4fbf25adb801438c1fbd4d6f98de826ca0bce\": container with ID starting with d3434b379477d2edbb3ef250c8a4fbf25adb801438c1fbd4d6f98de826ca0bce not found: ID does not exist" containerID="d3434b379477d2edbb3ef250c8a4fbf25adb801438c1fbd4d6f98de826ca0bce" Jan 22 07:27:40 crc kubenswrapper[4933]: I0122 07:27:40.420638 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3434b379477d2edbb3ef250c8a4fbf25adb801438c1fbd4d6f98de826ca0bce"} err="failed to get container status \"d3434b379477d2edbb3ef250c8a4fbf25adb801438c1fbd4d6f98de826ca0bce\": rpc error: code = NotFound desc = could not find container \"d3434b379477d2edbb3ef250c8a4fbf25adb801438c1fbd4d6f98de826ca0bce\": container with ID starting with d3434b379477d2edbb3ef250c8a4fbf25adb801438c1fbd4d6f98de826ca0bce not found: ID does not exist" Jan 22 07:27:40 crc kubenswrapper[4933]: I0122 07:27:40.420749 4933 scope.go:117] "RemoveContainer" containerID="a49b26f9f5551d3b9d7700bfb8622fd4477d854cde412ef6af9e2269f89ca31d" Jan 22 07:27:40 crc kubenswrapper[4933]: E0122 07:27:40.421122 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a49b26f9f5551d3b9d7700bfb8622fd4477d854cde412ef6af9e2269f89ca31d\": container with ID starting with a49b26f9f5551d3b9d7700bfb8622fd4477d854cde412ef6af9e2269f89ca31d not found: ID does not exist" containerID="a49b26f9f5551d3b9d7700bfb8622fd4477d854cde412ef6af9e2269f89ca31d" Jan 22 07:27:40 crc kubenswrapper[4933]: I0122 07:27:40.421151 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a49b26f9f5551d3b9d7700bfb8622fd4477d854cde412ef6af9e2269f89ca31d"} err="failed to get container status \"a49b26f9f5551d3b9d7700bfb8622fd4477d854cde412ef6af9e2269f89ca31d\": rpc error: code = NotFound desc = could not find container \"a49b26f9f5551d3b9d7700bfb8622fd4477d854cde412ef6af9e2269f89ca31d\": container with ID starting with a49b26f9f5551d3b9d7700bfb8622fd4477d854cde412ef6af9e2269f89ca31d not found: ID does not exist" Jan 22 07:27:40 crc kubenswrapper[4933]: I0122 07:27:40.646580 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-7b97d6bc64-js7m6"] Jan 22 07:27:40 crc kubenswrapper[4933]: I0122 07:27:40.663915 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-image-upload-7b97d6bc64-js7m6"] Jan 22 07:27:41 crc kubenswrapper[4933]: I0122 07:27:41.490576 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:27:41 crc kubenswrapper[4933]: E0122 07:27:41.491044 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.511338 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9147baef-7574-4104-83be-e54825e1a277" path="/var/lib/kubelet/pods/9147baef-7574-4104-83be-e54825e1a277/volumes" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.691364 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dgj84"] Jan 22 07:27:42 crc kubenswrapper[4933]: E0122 07:27:42.691794 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9147baef-7574-4104-83be-e54825e1a277" containerName="octavia-amphora-httpd" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.691813 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9147baef-7574-4104-83be-e54825e1a277" containerName="octavia-amphora-httpd" Jan 22 07:27:42 crc kubenswrapper[4933]: E0122 07:27:42.691826 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f10a3846-d1b2-4d43-bd53-b005c791e9c1" containerName="octavia-api-provider-agent" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.691833 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f10a3846-d1b2-4d43-bd53-b005c791e9c1" containerName="octavia-api-provider-agent" Jan 22 07:27:42 crc kubenswrapper[4933]: E0122 07:27:42.691853 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9147baef-7574-4104-83be-e54825e1a277" containerName="init" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.691872 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9147baef-7574-4104-83be-e54825e1a277" containerName="init" Jan 22 07:27:42 crc kubenswrapper[4933]: E0122 07:27:42.691888 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f10a3846-d1b2-4d43-bd53-b005c791e9c1" containerName="octavia-api" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.691894 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f10a3846-d1b2-4d43-bd53-b005c791e9c1" containerName="octavia-api" Jan 22 07:27:42 crc kubenswrapper[4933]: E0122 07:27:42.691908 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f10a3846-d1b2-4d43-bd53-b005c791e9c1" containerName="init" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.691913 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f10a3846-d1b2-4d43-bd53-b005c791e9c1" containerName="init" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.692116 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f10a3846-d1b2-4d43-bd53-b005c791e9c1" containerName="octavia-api-provider-agent" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.692135 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f10a3846-d1b2-4d43-bd53-b005c791e9c1" containerName="octavia-api" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.692145 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="9147baef-7574-4104-83be-e54825e1a277" containerName="octavia-amphora-httpd" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.693440 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dgj84" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.730922 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dgj84"] Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.828542 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee-utilities\") pod \"certified-operators-dgj84\" (UID: \"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee\") " pod="openshift-marketplace/certified-operators-dgj84" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.828764 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7v68\" (UniqueName: \"kubernetes.io/projected/1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee-kube-api-access-j7v68\") pod \"certified-operators-dgj84\" (UID: \"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee\") " pod="openshift-marketplace/certified-operators-dgj84" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.828815 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee-catalog-content\") pod \"certified-operators-dgj84\" (UID: \"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee\") " pod="openshift-marketplace/certified-operators-dgj84" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.931008 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7v68\" (UniqueName: \"kubernetes.io/projected/1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee-kube-api-access-j7v68\") pod \"certified-operators-dgj84\" (UID: \"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee\") " pod="openshift-marketplace/certified-operators-dgj84" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.931101 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee-catalog-content\") pod \"certified-operators-dgj84\" (UID: \"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee\") " pod="openshift-marketplace/certified-operators-dgj84" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.931166 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee-utilities\") pod \"certified-operators-dgj84\" (UID: \"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee\") " pod="openshift-marketplace/certified-operators-dgj84" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.931748 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee-catalog-content\") pod \"certified-operators-dgj84\" (UID: \"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee\") " pod="openshift-marketplace/certified-operators-dgj84" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.931806 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee-utilities\") pod \"certified-operators-dgj84\" (UID: \"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee\") " pod="openshift-marketplace/certified-operators-dgj84" Jan 22 07:27:42 crc kubenswrapper[4933]: I0122 07:27:42.952697 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7v68\" (UniqueName: \"kubernetes.io/projected/1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee-kube-api-access-j7v68\") pod \"certified-operators-dgj84\" (UID: \"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee\") " pod="openshift-marketplace/certified-operators-dgj84" Jan 22 07:27:43 crc kubenswrapper[4933]: I0122 07:27:43.028980 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dgj84" Jan 22 07:27:43 crc kubenswrapper[4933]: I0122 07:27:43.631259 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dgj84"] Jan 22 07:27:43 crc kubenswrapper[4933]: W0122 07:27:43.647459 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1a6bcf9e_2bc6_4710_a1ff_47a3dc3970ee.slice/crio-92ffe684a38933cf1034e7ae2362acc39dd9582ac5da41d89489c29090f24eef WatchSource:0}: Error finding container 92ffe684a38933cf1034e7ae2362acc39dd9582ac5da41d89489c29090f24eef: Status 404 returned error can't find the container with id 92ffe684a38933cf1034e7ae2362acc39dd9582ac5da41d89489c29090f24eef Jan 22 07:27:44 crc kubenswrapper[4933]: I0122 07:27:44.365958 4933 generic.go:334] "Generic (PLEG): container finished" podID="1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee" containerID="f23d80c7b68563cb2d3781c6da3b236b1457a588f29b6d3d61e27d7fdfe17bdd" exitCode=0 Jan 22 07:27:44 crc kubenswrapper[4933]: I0122 07:27:44.366267 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dgj84" event={"ID":"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee","Type":"ContainerDied","Data":"f23d80c7b68563cb2d3781c6da3b236b1457a588f29b6d3d61e27d7fdfe17bdd"} Jan 22 07:27:44 crc kubenswrapper[4933]: I0122 07:27:44.366296 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dgj84" event={"ID":"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee","Type":"ContainerStarted","Data":"92ffe684a38933cf1034e7ae2362acc39dd9582ac5da41d89489c29090f24eef"} Jan 22 07:27:45 crc kubenswrapper[4933]: I0122 07:27:45.377169 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dgj84" event={"ID":"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee","Type":"ContainerStarted","Data":"b2b4a4cc1e03f3f67c8e5f722a896356441bc5822e12f244a6ea5466a99915df"} Jan 22 07:27:46 crc kubenswrapper[4933]: I0122 07:27:46.388432 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-image-upload-7b97d6bc64-wqqfs"] Jan 22 07:27:46 crc kubenswrapper[4933]: I0122 07:27:46.391048 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-7b97d6bc64-wqqfs" Jan 22 07:27:46 crc kubenswrapper[4933]: I0122 07:27:46.395432 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-config-data" Jan 22 07:27:46 crc kubenswrapper[4933]: I0122 07:27:46.397595 4933 generic.go:334] "Generic (PLEG): container finished" podID="1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee" containerID="b2b4a4cc1e03f3f67c8e5f722a896356441bc5822e12f244a6ea5466a99915df" exitCode=0 Jan 22 07:27:46 crc kubenswrapper[4933]: I0122 07:27:46.397640 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dgj84" event={"ID":"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee","Type":"ContainerDied","Data":"b2b4a4cc1e03f3f67c8e5f722a896356441bc5822e12f244a6ea5466a99915df"} Jan 22 07:27:46 crc kubenswrapper[4933]: I0122 07:27:46.404560 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-7b97d6bc64-wqqfs"] Jan 22 07:27:46 crc kubenswrapper[4933]: I0122 07:27:46.523639 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/b81027bd-04de-4c5c-a898-7c2d11d62abb-amphora-image\") pod \"octavia-image-upload-7b97d6bc64-wqqfs\" (UID: \"b81027bd-04de-4c5c-a898-7c2d11d62abb\") " pod="openstack/octavia-image-upload-7b97d6bc64-wqqfs" Jan 22 07:27:46 crc kubenswrapper[4933]: I0122 07:27:46.524161 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b81027bd-04de-4c5c-a898-7c2d11d62abb-httpd-config\") pod \"octavia-image-upload-7b97d6bc64-wqqfs\" (UID: \"b81027bd-04de-4c5c-a898-7c2d11d62abb\") " pod="openstack/octavia-image-upload-7b97d6bc64-wqqfs" Jan 22 07:27:46 crc kubenswrapper[4933]: I0122 07:27:46.625664 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b81027bd-04de-4c5c-a898-7c2d11d62abb-httpd-config\") pod \"octavia-image-upload-7b97d6bc64-wqqfs\" (UID: \"b81027bd-04de-4c5c-a898-7c2d11d62abb\") " pod="openstack/octavia-image-upload-7b97d6bc64-wqqfs" Jan 22 07:27:46 crc kubenswrapper[4933]: I0122 07:27:46.625804 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/b81027bd-04de-4c5c-a898-7c2d11d62abb-amphora-image\") pod \"octavia-image-upload-7b97d6bc64-wqqfs\" (UID: \"b81027bd-04de-4c5c-a898-7c2d11d62abb\") " pod="openstack/octavia-image-upload-7b97d6bc64-wqqfs" Jan 22 07:27:46 crc kubenswrapper[4933]: I0122 07:27:46.630523 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/b81027bd-04de-4c5c-a898-7c2d11d62abb-amphora-image\") pod \"octavia-image-upload-7b97d6bc64-wqqfs\" (UID: \"b81027bd-04de-4c5c-a898-7c2d11d62abb\") " pod="openstack/octavia-image-upload-7b97d6bc64-wqqfs" Jan 22 07:27:46 crc kubenswrapper[4933]: I0122 07:27:46.638622 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b81027bd-04de-4c5c-a898-7c2d11d62abb-httpd-config\") pod \"octavia-image-upload-7b97d6bc64-wqqfs\" (UID: \"b81027bd-04de-4c5c-a898-7c2d11d62abb\") " pod="openstack/octavia-image-upload-7b97d6bc64-wqqfs" Jan 22 07:27:46 crc kubenswrapper[4933]: I0122 07:27:46.725545 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-7b97d6bc64-wqqfs" Jan 22 07:27:47 crc kubenswrapper[4933]: I0122 07:27:47.189506 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-7b97d6bc64-wqqfs"] Jan 22 07:27:47 crc kubenswrapper[4933]: W0122 07:27:47.195481 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb81027bd_04de_4c5c_a898_7c2d11d62abb.slice/crio-ad383643a5d9d007390370e5d90e941ab2d546cac363457fdc6a359ae7e8eaaa WatchSource:0}: Error finding container ad383643a5d9d007390370e5d90e941ab2d546cac363457fdc6a359ae7e8eaaa: Status 404 returned error can't find the container with id ad383643a5d9d007390370e5d90e941ab2d546cac363457fdc6a359ae7e8eaaa Jan 22 07:27:47 crc kubenswrapper[4933]: I0122 07:27:47.409658 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-7b97d6bc64-wqqfs" event={"ID":"b81027bd-04de-4c5c-a898-7c2d11d62abb","Type":"ContainerStarted","Data":"ad383643a5d9d007390370e5d90e941ab2d546cac363457fdc6a359ae7e8eaaa"} Jan 22 07:27:48 crc kubenswrapper[4933]: I0122 07:27:48.424347 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dgj84" event={"ID":"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee","Type":"ContainerStarted","Data":"dce7f9ff3162a70075c5f130152cca4a6bfa0d1fc2971338b04994b140e7097e"} Jan 22 07:27:48 crc kubenswrapper[4933]: I0122 07:27:48.427488 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-7b97d6bc64-wqqfs" event={"ID":"b81027bd-04de-4c5c-a898-7c2d11d62abb","Type":"ContainerStarted","Data":"ea67c1dac22715a85e6b81a0cbfdc982dfc8d40a682d4cfc607ecddb57934fad"} Jan 22 07:27:48 crc kubenswrapper[4933]: I0122 07:27:48.455319 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dgj84" podStartSLOduration=3.536595292 podStartE2EDuration="6.455293804s" podCreationTimestamp="2026-01-22 07:27:42 +0000 UTC" firstStartedPulling="2026-01-22 07:27:44.368725488 +0000 UTC m=+6112.205850861" lastFinishedPulling="2026-01-22 07:27:47.28742402 +0000 UTC m=+6115.124549373" observedRunningTime="2026-01-22 07:27:48.443797465 +0000 UTC m=+6116.280922838" watchObservedRunningTime="2026-01-22 07:27:48.455293804 +0000 UTC m=+6116.292419177" Jan 22 07:27:49 crc kubenswrapper[4933]: I0122 07:27:49.435780 4933 generic.go:334] "Generic (PLEG): container finished" podID="b81027bd-04de-4c5c-a898-7c2d11d62abb" containerID="ea67c1dac22715a85e6b81a0cbfdc982dfc8d40a682d4cfc607ecddb57934fad" exitCode=0 Jan 22 07:27:49 crc kubenswrapper[4933]: I0122 07:27:49.435870 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-7b97d6bc64-wqqfs" event={"ID":"b81027bd-04de-4c5c-a898-7c2d11d62abb","Type":"ContainerDied","Data":"ea67c1dac22715a85e6b81a0cbfdc982dfc8d40a682d4cfc607ecddb57934fad"} Jan 22 07:27:50 crc kubenswrapper[4933]: I0122 07:27:50.454639 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-7b97d6bc64-wqqfs" event={"ID":"b81027bd-04de-4c5c-a898-7c2d11d62abb","Type":"ContainerStarted","Data":"a0f46e3e032042481d0f8d0c02850737d273935babc590a97372b4e6ac6ccbd0"} Jan 22 07:27:50 crc kubenswrapper[4933]: I0122 07:27:50.472370 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-image-upload-7b97d6bc64-wqqfs" podStartSLOduration=3.811462679 podStartE2EDuration="4.472348118s" podCreationTimestamp="2026-01-22 07:27:46 +0000 UTC" firstStartedPulling="2026-01-22 07:27:47.198068066 +0000 UTC m=+6115.035193419" lastFinishedPulling="2026-01-22 07:27:47.858953495 +0000 UTC m=+6115.696078858" observedRunningTime="2026-01-22 07:27:50.472210335 +0000 UTC m=+6118.309335688" watchObservedRunningTime="2026-01-22 07:27:50.472348118 +0000 UTC m=+6118.309473491" Jan 22 07:27:53 crc kubenswrapper[4933]: I0122 07:27:53.029761 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dgj84" Jan 22 07:27:53 crc kubenswrapper[4933]: I0122 07:27:53.030176 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dgj84" Jan 22 07:27:53 crc kubenswrapper[4933]: I0122 07:27:53.113688 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dgj84" Jan 22 07:27:53 crc kubenswrapper[4933]: I0122 07:27:53.491683 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:27:53 crc kubenswrapper[4933]: E0122 07:27:53.492011 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:27:53 crc kubenswrapper[4933]: I0122 07:27:53.548522 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dgj84" Jan 22 07:27:53 crc kubenswrapper[4933]: I0122 07:27:53.593792 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dgj84"] Jan 22 07:27:55 crc kubenswrapper[4933]: I0122 07:27:55.517232 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dgj84" podUID="1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee" containerName="registry-server" containerID="cri-o://dce7f9ff3162a70075c5f130152cca4a6bfa0d1fc2971338b04994b140e7097e" gracePeriod=2 Jan 22 07:27:56 crc kubenswrapper[4933]: I0122 07:27:56.528110 4933 generic.go:334] "Generic (PLEG): container finished" podID="1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee" containerID="dce7f9ff3162a70075c5f130152cca4a6bfa0d1fc2971338b04994b140e7097e" exitCode=0 Jan 22 07:27:56 crc kubenswrapper[4933]: I0122 07:27:56.528414 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dgj84" event={"ID":"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee","Type":"ContainerDied","Data":"dce7f9ff3162a70075c5f130152cca4a6bfa0d1fc2971338b04994b140e7097e"} Jan 22 07:27:57 crc kubenswrapper[4933]: I0122 07:27:57.452464 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dgj84" Jan 22 07:27:57 crc kubenswrapper[4933]: I0122 07:27:57.538863 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dgj84" event={"ID":"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee","Type":"ContainerDied","Data":"92ffe684a38933cf1034e7ae2362acc39dd9582ac5da41d89489c29090f24eef"} Jan 22 07:27:57 crc kubenswrapper[4933]: I0122 07:27:57.539210 4933 scope.go:117] "RemoveContainer" containerID="dce7f9ff3162a70075c5f130152cca4a6bfa0d1fc2971338b04994b140e7097e" Jan 22 07:27:57 crc kubenswrapper[4933]: I0122 07:27:57.538912 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dgj84" Jan 22 07:27:57 crc kubenswrapper[4933]: I0122 07:27:57.566733 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee-utilities\") pod \"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee\" (UID: \"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee\") " Jan 22 07:27:57 crc kubenswrapper[4933]: I0122 07:27:57.567044 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee-catalog-content\") pod \"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee\" (UID: \"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee\") " Jan 22 07:27:57 crc kubenswrapper[4933]: I0122 07:27:57.567194 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j7v68\" (UniqueName: \"kubernetes.io/projected/1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee-kube-api-access-j7v68\") pod \"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee\" (UID: \"1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee\") " Jan 22 07:27:57 crc kubenswrapper[4933]: I0122 07:27:57.567920 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee-utilities" (OuterVolumeSpecName: "utilities") pod "1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee" (UID: "1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:27:57 crc kubenswrapper[4933]: I0122 07:27:57.568363 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:27:57 crc kubenswrapper[4933]: I0122 07:27:57.575136 4933 scope.go:117] "RemoveContainer" containerID="b2b4a4cc1e03f3f67c8e5f722a896356441bc5822e12f244a6ea5466a99915df" Jan 22 07:27:57 crc kubenswrapper[4933]: I0122 07:27:57.577999 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee-kube-api-access-j7v68" (OuterVolumeSpecName: "kube-api-access-j7v68") pod "1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee" (UID: "1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee"). InnerVolumeSpecName "kube-api-access-j7v68". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:27:57 crc kubenswrapper[4933]: I0122 07:27:57.612046 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee" (UID: "1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:27:57 crc kubenswrapper[4933]: I0122 07:27:57.633140 4933 scope.go:117] "RemoveContainer" containerID="f23d80c7b68563cb2d3781c6da3b236b1457a588f29b6d3d61e27d7fdfe17bdd" Jan 22 07:27:57 crc kubenswrapper[4933]: I0122 07:27:57.673424 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:27:57 crc kubenswrapper[4933]: I0122 07:27:57.673473 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j7v68\" (UniqueName: \"kubernetes.io/projected/1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee-kube-api-access-j7v68\") on node \"crc\" DevicePath \"\"" Jan 22 07:27:57 crc kubenswrapper[4933]: I0122 07:27:57.878059 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dgj84"] Jan 22 07:27:57 crc kubenswrapper[4933]: I0122 07:27:57.888887 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dgj84"] Jan 22 07:27:58 crc kubenswrapper[4933]: I0122 07:27:58.501453 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee" path="/var/lib/kubelet/pods/1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee/volumes" Jan 22 07:28:05 crc kubenswrapper[4933]: I0122 07:28:05.634039 4933 scope.go:117] "RemoveContainer" containerID="94aee6d72ff1dd5d8864290ad13f8f73fa04be99ddb8086279485349921dde11" Jan 22 07:28:05 crc kubenswrapper[4933]: I0122 07:28:05.655897 4933 scope.go:117] "RemoveContainer" containerID="256cda9fa1b92127d3c79d4bda117f5969322f17bb444c18c594df78f6c31bad" Jan 22 07:28:05 crc kubenswrapper[4933]: I0122 07:28:05.713320 4933 scope.go:117] "RemoveContainer" containerID="012f5b30b5dfe06df7cad6f76ec25bd9517de258cfbb5f39e51fa5b810d07926" Jan 22 07:28:06 crc kubenswrapper[4933]: I0122 07:28:06.490574 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:28:06 crc kubenswrapper[4933]: E0122 07:28:06.491116 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:28:20 crc kubenswrapper[4933]: I0122 07:28:20.492170 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:28:20 crc kubenswrapper[4933]: E0122 07:28:20.493892 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.517730 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-78d45b5589-svr2j"] Jan 22 07:28:28 crc kubenswrapper[4933]: E0122 07:28:28.518506 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee" containerName="extract-utilities" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.518518 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee" containerName="extract-utilities" Jan 22 07:28:28 crc kubenswrapper[4933]: E0122 07:28:28.518533 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee" containerName="registry-server" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.518540 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee" containerName="registry-server" Jan 22 07:28:28 crc kubenswrapper[4933]: E0122 07:28:28.518570 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee" containerName="extract-content" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.518577 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee" containerName="extract-content" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.518763 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a6bcf9e-2bc6-4710-a1ff-47a3dc3970ee" containerName="registry-server" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.520313 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.523745 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-78d45b5589-svr2j"] Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.523934 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.523946 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.525432 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.525496 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-mkkg8" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.553421 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.553896 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="d4791b14-4baa-4617-8ed9-be7a99cefa10" containerName="glance-log" containerID="cri-o://c9596897b78fcef7e14f3af766be7bf3274b03cc4d6e81701c5db05b4038497f" gracePeriod=30 Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.554367 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="d4791b14-4baa-4617-8ed9-be7a99cefa10" containerName="glance-httpd" containerID="cri-o://7734da35a424a967b843c3fd0e2e954fb00ef47bc9ccae1413f16767fba617d8" gracePeriod=30 Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.613970 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-545f5b546f-zwk44"] Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.615636 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.639719 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-545f5b546f-zwk44"] Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.675352 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.675584 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="092e7306-126b-461f-926e-40eb750fe16c" containerName="glance-log" containerID="cri-o://286a9e012307ad919613e2363acff2d6b5283f8ea4ba44cfa8a37bebd45ea682" gracePeriod=30 Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.675759 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="092e7306-126b-461f-926e-40eb750fe16c" containerName="glance-httpd" containerID="cri-o://bbf8107eab3719f42cb265adf56d054bc8486cb67784a51193d0a5839262e8d6" gracePeriod=30 Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.705962 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7537515b-2d77-4197-a172-a34ef69681d8-horizon-secret-key\") pod \"horizon-78d45b5589-svr2j\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.706331 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7537515b-2d77-4197-a172-a34ef69681d8-logs\") pod \"horizon-78d45b5589-svr2j\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.706430 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7537515b-2d77-4197-a172-a34ef69681d8-scripts\") pod \"horizon-78d45b5589-svr2j\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.706570 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgzcz\" (UniqueName: \"kubernetes.io/projected/7537515b-2d77-4197-a172-a34ef69681d8-kube-api-access-xgzcz\") pod \"horizon-78d45b5589-svr2j\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.706762 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7537515b-2d77-4197-a172-a34ef69681d8-config-data\") pod \"horizon-78d45b5589-svr2j\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.808941 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7537515b-2d77-4197-a172-a34ef69681d8-logs\") pod \"horizon-78d45b5589-svr2j\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.808991 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7537515b-2d77-4197-a172-a34ef69681d8-scripts\") pod \"horizon-78d45b5589-svr2j\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.809021 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kslwb\" (UniqueName: \"kubernetes.io/projected/e29bcc09-3449-49eb-83a7-972bbf7ad894-kube-api-access-kslwb\") pod \"horizon-545f5b546f-zwk44\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.809058 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgzcz\" (UniqueName: \"kubernetes.io/projected/7537515b-2d77-4197-a172-a34ef69681d8-kube-api-access-xgzcz\") pod \"horizon-78d45b5589-svr2j\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.809102 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e29bcc09-3449-49eb-83a7-972bbf7ad894-scripts\") pod \"horizon-545f5b546f-zwk44\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.809143 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e29bcc09-3449-49eb-83a7-972bbf7ad894-config-data\") pod \"horizon-545f5b546f-zwk44\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.809170 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e29bcc09-3449-49eb-83a7-972bbf7ad894-horizon-secret-key\") pod \"horizon-545f5b546f-zwk44\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.809197 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e29bcc09-3449-49eb-83a7-972bbf7ad894-logs\") pod \"horizon-545f5b546f-zwk44\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.809217 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7537515b-2d77-4197-a172-a34ef69681d8-config-data\") pod \"horizon-78d45b5589-svr2j\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.809256 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7537515b-2d77-4197-a172-a34ef69681d8-horizon-secret-key\") pod \"horizon-78d45b5589-svr2j\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.809436 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7537515b-2d77-4197-a172-a34ef69681d8-logs\") pod \"horizon-78d45b5589-svr2j\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.810041 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7537515b-2d77-4197-a172-a34ef69681d8-scripts\") pod \"horizon-78d45b5589-svr2j\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.810904 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7537515b-2d77-4197-a172-a34ef69681d8-config-data\") pod \"horizon-78d45b5589-svr2j\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.815621 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7537515b-2d77-4197-a172-a34ef69681d8-horizon-secret-key\") pod \"horizon-78d45b5589-svr2j\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.828418 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgzcz\" (UniqueName: \"kubernetes.io/projected/7537515b-2d77-4197-a172-a34ef69681d8-kube-api-access-xgzcz\") pod \"horizon-78d45b5589-svr2j\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.849674 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.911241 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e29bcc09-3449-49eb-83a7-972bbf7ad894-config-data\") pod \"horizon-545f5b546f-zwk44\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.911566 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e29bcc09-3449-49eb-83a7-972bbf7ad894-horizon-secret-key\") pod \"horizon-545f5b546f-zwk44\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.911595 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e29bcc09-3449-49eb-83a7-972bbf7ad894-logs\") pod \"horizon-545f5b546f-zwk44\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.911692 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kslwb\" (UniqueName: \"kubernetes.io/projected/e29bcc09-3449-49eb-83a7-972bbf7ad894-kube-api-access-kslwb\") pod \"horizon-545f5b546f-zwk44\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.911731 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e29bcc09-3449-49eb-83a7-972bbf7ad894-scripts\") pod \"horizon-545f5b546f-zwk44\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.912392 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e29bcc09-3449-49eb-83a7-972bbf7ad894-scripts\") pod \"horizon-545f5b546f-zwk44\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.913736 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e29bcc09-3449-49eb-83a7-972bbf7ad894-logs\") pod \"horizon-545f5b546f-zwk44\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.916331 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e29bcc09-3449-49eb-83a7-972bbf7ad894-config-data\") pod \"horizon-545f5b546f-zwk44\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.921657 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e29bcc09-3449-49eb-83a7-972bbf7ad894-horizon-secret-key\") pod \"horizon-545f5b546f-zwk44\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.923901 4933 generic.go:334] "Generic (PLEG): container finished" podID="d4791b14-4baa-4617-8ed9-be7a99cefa10" containerID="c9596897b78fcef7e14f3af766be7bf3274b03cc4d6e81701c5db05b4038497f" exitCode=143 Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.923974 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d4791b14-4baa-4617-8ed9-be7a99cefa10","Type":"ContainerDied","Data":"c9596897b78fcef7e14f3af766be7bf3274b03cc4d6e81701c5db05b4038497f"} Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.926545 4933 generic.go:334] "Generic (PLEG): container finished" podID="092e7306-126b-461f-926e-40eb750fe16c" containerID="286a9e012307ad919613e2363acff2d6b5283f8ea4ba44cfa8a37bebd45ea682" exitCode=143 Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.926732 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"092e7306-126b-461f-926e-40eb750fe16c","Type":"ContainerDied","Data":"286a9e012307ad919613e2363acff2d6b5283f8ea4ba44cfa8a37bebd45ea682"} Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.929954 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kslwb\" (UniqueName: \"kubernetes.io/projected/e29bcc09-3449-49eb-83a7-972bbf7ad894-kube-api-access-kslwb\") pod \"horizon-545f5b546f-zwk44\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:28:28 crc kubenswrapper[4933]: I0122 07:28:28.933460 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:28:29 crc kubenswrapper[4933]: I0122 07:28:29.319000 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-78d45b5589-svr2j"] Jan 22 07:28:29 crc kubenswrapper[4933]: I0122 07:28:29.473056 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-545f5b546f-zwk44"] Jan 22 07:28:29 crc kubenswrapper[4933]: W0122 07:28:29.475771 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode29bcc09_3449_49eb_83a7_972bbf7ad894.slice/crio-50d638fe767e52d5de11773dcc3bbdbf8b0f621f3aba5277c25c9d557f9f16e7 WatchSource:0}: Error finding container 50d638fe767e52d5de11773dcc3bbdbf8b0f621f3aba5277c25c9d557f9f16e7: Status 404 returned error can't find the container with id 50d638fe767e52d5de11773dcc3bbdbf8b0f621f3aba5277c25c9d557f9f16e7 Jan 22 07:28:29 crc kubenswrapper[4933]: I0122 07:28:29.938557 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78d45b5589-svr2j" event={"ID":"7537515b-2d77-4197-a172-a34ef69681d8","Type":"ContainerStarted","Data":"2ba5e39f3230e76aba97cd4f8d3f456bd2112f1b63a1dee49587e679a0c6018f"} Jan 22 07:28:29 crc kubenswrapper[4933]: I0122 07:28:29.939981 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-545f5b546f-zwk44" event={"ID":"e29bcc09-3449-49eb-83a7-972bbf7ad894","Type":"ContainerStarted","Data":"50d638fe767e52d5de11773dcc3bbdbf8b0f621f3aba5277c25c9d557f9f16e7"} Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.217421 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-545f5b546f-zwk44"] Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.263880 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5c5f49847d-w5xqj"] Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.267237 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.273412 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.284221 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5c5f49847d-w5xqj"] Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.348995 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-78d45b5589-svr2j"] Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.382929 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-scripts\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.384034 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-config-data\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.384066 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-combined-ca-bundle\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.384108 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jb5j\" (UniqueName: \"kubernetes.io/projected/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-kube-api-access-5jb5j\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.384174 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-logs\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.384201 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-horizon-tls-certs\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.384258 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-horizon-secret-key\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.400150 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7f598858d8-sp7f8"] Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.403181 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.416688 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7f598858d8-sp7f8"] Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.485565 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-config-data\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.485614 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-combined-ca-bundle\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.485665 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jb5j\" (UniqueName: \"kubernetes.io/projected/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-kube-api-access-5jb5j\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.487201 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-config-data\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.487357 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-logs\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.487404 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-horizon-tls-certs\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.487469 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-horizon-secret-key\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.487499 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-scripts\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.487779 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-logs\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.488144 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-scripts\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.490728 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:28:31 crc kubenswrapper[4933]: E0122 07:28:31.491036 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.491650 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-combined-ca-bundle\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.491923 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-horizon-tls-certs\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.492354 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-horizon-secret-key\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.503495 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jb5j\" (UniqueName: \"kubernetes.io/projected/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-kube-api-access-5jb5j\") pod \"horizon-5c5f49847d-w5xqj\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.589676 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-scripts\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.589915 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-horizon-tls-certs\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.589958 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-horizon-secret-key\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.590068 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-combined-ca-bundle\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.590200 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-config-data\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.590221 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-logs\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.590248 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-687mc\" (UniqueName: \"kubernetes.io/projected/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-kube-api-access-687mc\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.602247 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.693469 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-config-data\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.693877 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-logs\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.693915 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-687mc\" (UniqueName: \"kubernetes.io/projected/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-kube-api-access-687mc\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.693990 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-scripts\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.694045 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-horizon-tls-certs\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.694063 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-horizon-secret-key\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.694233 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-combined-ca-bundle\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.696508 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-logs\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.696670 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-config-data\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.696674 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-scripts\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.705154 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-horizon-tls-certs\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.710043 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-combined-ca-bundle\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.713001 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-horizon-secret-key\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.713710 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-687mc\" (UniqueName: \"kubernetes.io/projected/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-kube-api-access-687mc\") pod \"horizon-7f598858d8-sp7f8\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.722802 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.965372 4933 generic.go:334] "Generic (PLEG): container finished" podID="092e7306-126b-461f-926e-40eb750fe16c" containerID="bbf8107eab3719f42cb265adf56d054bc8486cb67784a51193d0a5839262e8d6" exitCode=0 Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.965541 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"092e7306-126b-461f-926e-40eb750fe16c","Type":"ContainerDied","Data":"bbf8107eab3719f42cb265adf56d054bc8486cb67784a51193d0a5839262e8d6"} Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.974131 4933 generic.go:334] "Generic (PLEG): container finished" podID="d4791b14-4baa-4617-8ed9-be7a99cefa10" containerID="7734da35a424a967b843c3fd0e2e954fb00ef47bc9ccae1413f16767fba617d8" exitCode=0 Jan 22 07:28:31 crc kubenswrapper[4933]: I0122 07:28:31.974166 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d4791b14-4baa-4617-8ed9-be7a99cefa10","Type":"ContainerDied","Data":"7734da35a424a967b843c3fd0e2e954fb00ef47bc9ccae1413f16767fba617d8"} Jan 22 07:28:32 crc kubenswrapper[4933]: I0122 07:28:32.174116 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5c5f49847d-w5xqj"] Jan 22 07:28:32 crc kubenswrapper[4933]: I0122 07:28:32.577526 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7f598858d8-sp7f8"] Jan 22 07:28:36 crc kubenswrapper[4933]: W0122 07:28:36.525537 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd5a0747b_cb15_4a5a_bf0d_71de8f9592eb.slice/crio-773ca2e3da87f5254efe29ebdaa596523a49955616818d727fc3c7ff7ed20e44 WatchSource:0}: Error finding container 773ca2e3da87f5254efe29ebdaa596523a49955616818d727fc3c7ff7ed20e44: Status 404 returned error can't find the container with id 773ca2e3da87f5254efe29ebdaa596523a49955616818d727fc3c7ff7ed20e44 Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.613221 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.622615 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.815961 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4791b14-4baa-4617-8ed9-be7a99cefa10-logs\") pod \"d4791b14-4baa-4617-8ed9-be7a99cefa10\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.816273 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-scripts\") pod \"d4791b14-4baa-4617-8ed9-be7a99cefa10\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.816474 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-config-data\") pod \"d4791b14-4baa-4617-8ed9-be7a99cefa10\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.816568 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/092e7306-126b-461f-926e-40eb750fe16c-httpd-run\") pod \"092e7306-126b-461f-926e-40eb750fe16c\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.816691 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-scripts\") pod \"092e7306-126b-461f-926e-40eb750fe16c\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.816788 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-config-data\") pod \"092e7306-126b-461f-926e-40eb750fe16c\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.816929 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-internal-tls-certs\") pod \"092e7306-126b-461f-926e-40eb750fe16c\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.817113 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d4791b14-4baa-4617-8ed9-be7a99cefa10-httpd-run\") pod \"d4791b14-4baa-4617-8ed9-be7a99cefa10\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.817265 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/092e7306-126b-461f-926e-40eb750fe16c-logs\") pod \"092e7306-126b-461f-926e-40eb750fe16c\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.817382 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ft9n6\" (UniqueName: \"kubernetes.io/projected/092e7306-126b-461f-926e-40eb750fe16c-kube-api-access-ft9n6\") pod \"092e7306-126b-461f-926e-40eb750fe16c\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.817512 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-combined-ca-bundle\") pod \"092e7306-126b-461f-926e-40eb750fe16c\" (UID: \"092e7306-126b-461f-926e-40eb750fe16c\") " Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.818268 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-combined-ca-bundle\") pod \"d4791b14-4baa-4617-8ed9-be7a99cefa10\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.818399 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6tl5\" (UniqueName: \"kubernetes.io/projected/d4791b14-4baa-4617-8ed9-be7a99cefa10-kube-api-access-w6tl5\") pod \"d4791b14-4baa-4617-8ed9-be7a99cefa10\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.818577 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-public-tls-certs\") pod \"d4791b14-4baa-4617-8ed9-be7a99cefa10\" (UID: \"d4791b14-4baa-4617-8ed9-be7a99cefa10\") " Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.817901 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4791b14-4baa-4617-8ed9-be7a99cefa10-logs" (OuterVolumeSpecName: "logs") pod "d4791b14-4baa-4617-8ed9-be7a99cefa10" (UID: "d4791b14-4baa-4617-8ed9-be7a99cefa10"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.818528 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/092e7306-126b-461f-926e-40eb750fe16c-logs" (OuterVolumeSpecName: "logs") pod "092e7306-126b-461f-926e-40eb750fe16c" (UID: "092e7306-126b-461f-926e-40eb750fe16c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.819005 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4791b14-4baa-4617-8ed9-be7a99cefa10-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "d4791b14-4baa-4617-8ed9-be7a99cefa10" (UID: "d4791b14-4baa-4617-8ed9-be7a99cefa10"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.819224 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/092e7306-126b-461f-926e-40eb750fe16c-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "092e7306-126b-461f-926e-40eb750fe16c" (UID: "092e7306-126b-461f-926e-40eb750fe16c"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.823326 4933 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/092e7306-126b-461f-926e-40eb750fe16c-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.823560 4933 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d4791b14-4baa-4617-8ed9-be7a99cefa10-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.823574 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/092e7306-126b-461f-926e-40eb750fe16c-logs\") on node \"crc\" DevicePath \"\"" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.823588 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4791b14-4baa-4617-8ed9-be7a99cefa10-logs\") on node \"crc\" DevicePath \"\"" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.828385 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4791b14-4baa-4617-8ed9-be7a99cefa10-kube-api-access-w6tl5" (OuterVolumeSpecName: "kube-api-access-w6tl5") pod "d4791b14-4baa-4617-8ed9-be7a99cefa10" (UID: "d4791b14-4baa-4617-8ed9-be7a99cefa10"). InnerVolumeSpecName "kube-api-access-w6tl5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.831445 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-scripts" (OuterVolumeSpecName: "scripts") pod "d4791b14-4baa-4617-8ed9-be7a99cefa10" (UID: "d4791b14-4baa-4617-8ed9-be7a99cefa10"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.831782 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/092e7306-126b-461f-926e-40eb750fe16c-kube-api-access-ft9n6" (OuterVolumeSpecName: "kube-api-access-ft9n6") pod "092e7306-126b-461f-926e-40eb750fe16c" (UID: "092e7306-126b-461f-926e-40eb750fe16c"). InnerVolumeSpecName "kube-api-access-ft9n6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.856373 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-scripts" (OuterVolumeSpecName: "scripts") pod "092e7306-126b-461f-926e-40eb750fe16c" (UID: "092e7306-126b-461f-926e-40eb750fe16c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.905770 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "092e7306-126b-461f-926e-40eb750fe16c" (UID: "092e7306-126b-461f-926e-40eb750fe16c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.906332 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-config-data" (OuterVolumeSpecName: "config-data") pod "d4791b14-4baa-4617-8ed9-be7a99cefa10" (UID: "d4791b14-4baa-4617-8ed9-be7a99cefa10"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.906987 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d4791b14-4baa-4617-8ed9-be7a99cefa10" (UID: "d4791b14-4baa-4617-8ed9-be7a99cefa10"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.907857 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "092e7306-126b-461f-926e-40eb750fe16c" (UID: "092e7306-126b-461f-926e-40eb750fe16c"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.917155 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "d4791b14-4baa-4617-8ed9-be7a99cefa10" (UID: "d4791b14-4baa-4617-8ed9-be7a99cefa10"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.925526 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.925552 4933 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.925562 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ft9n6\" (UniqueName: \"kubernetes.io/projected/092e7306-126b-461f-926e-40eb750fe16c-kube-api-access-ft9n6\") on node \"crc\" DevicePath \"\"" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.925571 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.925580 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.925588 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6tl5\" (UniqueName: \"kubernetes.io/projected/d4791b14-4baa-4617-8ed9-be7a99cefa10-kube-api-access-w6tl5\") on node \"crc\" DevicePath \"\"" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.925598 4933 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.925606 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.925615 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4791b14-4baa-4617-8ed9-be7a99cefa10-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:28:36 crc kubenswrapper[4933]: I0122 07:28:36.927330 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-config-data" (OuterVolumeSpecName: "config-data") pod "092e7306-126b-461f-926e-40eb750fe16c" (UID: "092e7306-126b-461f-926e-40eb750fe16c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.018317 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78d45b5589-svr2j" event={"ID":"7537515b-2d77-4197-a172-a34ef69681d8","Type":"ContainerStarted","Data":"b6d9f9467fef161e98948042a00239c1bfe7cbad89aefd7ad87462024a1bbda4"} Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.019577 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-545f5b546f-zwk44" event={"ID":"e29bcc09-3449-49eb-83a7-972bbf7ad894","Type":"ContainerStarted","Data":"d32c544dba88f1a8a24876b08a2a668863c438e6c612120f84666f06b105243a"} Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.020998 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d4791b14-4baa-4617-8ed9-be7a99cefa10","Type":"ContainerDied","Data":"811a8828696100867ca7897585fb5b058e70a0ca53671a6abfb71d039a0fa5a1"} Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.021041 4933 scope.go:117] "RemoveContainer" containerID="7734da35a424a967b843c3fd0e2e954fb00ef47bc9ccae1413f16767fba617d8" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.021048 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.023779 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5c5f49847d-w5xqj" event={"ID":"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9","Type":"ContainerStarted","Data":"848e658d7f74b79529dc94ef4850e37fd66fed5beae6882e6c30e41c4bedb3b6"} Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.027051 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"092e7306-126b-461f-926e-40eb750fe16c","Type":"ContainerDied","Data":"4c9097d9029abafe88ee5edd24b87bf642c77c9d425e8557bfa28c539728f750"} Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.027105 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.027274 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/092e7306-126b-461f-926e-40eb750fe16c-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.029251 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f598858d8-sp7f8" event={"ID":"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb","Type":"ContainerStarted","Data":"773ca2e3da87f5254efe29ebdaa596523a49955616818d727fc3c7ff7ed20e44"} Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.082874 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.088018 4933 scope.go:117] "RemoveContainer" containerID="c9596897b78fcef7e14f3af766be7bf3274b03cc4d6e81701c5db05b4038497f" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.098675 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.108918 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.112436 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 07:28:37 crc kubenswrapper[4933]: E0122 07:28:37.112946 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4791b14-4baa-4617-8ed9-be7a99cefa10" containerName="glance-httpd" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.113009 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4791b14-4baa-4617-8ed9-be7a99cefa10" containerName="glance-httpd" Jan 22 07:28:37 crc kubenswrapper[4933]: E0122 07:28:37.113029 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="092e7306-126b-461f-926e-40eb750fe16c" containerName="glance-httpd" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.113037 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="092e7306-126b-461f-926e-40eb750fe16c" containerName="glance-httpd" Jan 22 07:28:37 crc kubenswrapper[4933]: E0122 07:28:37.113052 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="092e7306-126b-461f-926e-40eb750fe16c" containerName="glance-log" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.113059 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="092e7306-126b-461f-926e-40eb750fe16c" containerName="glance-log" Jan 22 07:28:37 crc kubenswrapper[4933]: E0122 07:28:37.113126 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4791b14-4baa-4617-8ed9-be7a99cefa10" containerName="glance-log" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.113135 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4791b14-4baa-4617-8ed9-be7a99cefa10" containerName="glance-log" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.113354 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4791b14-4baa-4617-8ed9-be7a99cefa10" containerName="glance-httpd" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.113383 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="092e7306-126b-461f-926e-40eb750fe16c" containerName="glance-log" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.113392 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4791b14-4baa-4617-8ed9-be7a99cefa10" containerName="glance-log" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.113411 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="092e7306-126b-461f-926e-40eb750fe16c" containerName="glance-httpd" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.114706 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.121330 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.121609 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-2pv9h" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.121947 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.122329 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.122385 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.134643 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.144101 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.145800 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.148599 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.149220 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.151702 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.194218 4933 scope.go:117] "RemoveContainer" containerID="bbf8107eab3719f42cb265adf56d054bc8486cb67784a51193d0a5839262e8d6" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.231331 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-scripts\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.233817 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcwtx\" (UniqueName: \"kubernetes.io/projected/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-kube-api-access-vcwtx\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.233973 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-logs\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.234209 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.234797 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.234911 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-config-data\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.235272 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.241163 4933 scope.go:117] "RemoveContainer" containerID="286a9e012307ad919613e2363acff2d6b5283f8ea4ba44cfa8a37bebd45ea682" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.342886 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4cw8\" (UniqueName: \"kubernetes.io/projected/8526897f-7543-4b1c-979b-601c17f31f54-kube-api-access-t4cw8\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.343498 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8526897f-7543-4b1c-979b-601c17f31f54-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.343563 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8526897f-7543-4b1c-979b-601c17f31f54-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.345229 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-scripts\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.345675 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcwtx\" (UniqueName: \"kubernetes.io/projected/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-kube-api-access-vcwtx\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.345732 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8526897f-7543-4b1c-979b-601c17f31f54-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.345789 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-logs\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.346194 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-logs\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.346386 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.346438 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.346472 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-config-data\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.346498 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8526897f-7543-4b1c-979b-601c17f31f54-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.346662 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8526897f-7543-4b1c-979b-601c17f31f54-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.346686 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8526897f-7543-4b1c-979b-601c17f31f54-logs\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.346750 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.346929 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.358536 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.359035 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-scripts\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.365003 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.365427 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-config-data\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.376700 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcwtx\" (UniqueName: \"kubernetes.io/projected/ef4fb634-4e08-44e1-9f52-c0ceeadefbd4-kube-api-access-vcwtx\") pod \"glance-default-external-api-0\" (UID: \"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4\") " pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.448848 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4cw8\" (UniqueName: \"kubernetes.io/projected/8526897f-7543-4b1c-979b-601c17f31f54-kube-api-access-t4cw8\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.448917 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8526897f-7543-4b1c-979b-601c17f31f54-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.448936 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8526897f-7543-4b1c-979b-601c17f31f54-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.448967 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8526897f-7543-4b1c-979b-601c17f31f54-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.449017 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8526897f-7543-4b1c-979b-601c17f31f54-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.449097 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8526897f-7543-4b1c-979b-601c17f31f54-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.449119 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8526897f-7543-4b1c-979b-601c17f31f54-logs\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.449562 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8526897f-7543-4b1c-979b-601c17f31f54-logs\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.451682 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8526897f-7543-4b1c-979b-601c17f31f54-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.453788 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8526897f-7543-4b1c-979b-601c17f31f54-scripts\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.454507 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8526897f-7543-4b1c-979b-601c17f31f54-config-data\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.455853 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8526897f-7543-4b1c-979b-601c17f31f54-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.456718 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8526897f-7543-4b1c-979b-601c17f31f54-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.457411 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.485094 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4cw8\" (UniqueName: \"kubernetes.io/projected/8526897f-7543-4b1c-979b-601c17f31f54-kube-api-access-t4cw8\") pod \"glance-default-internal-api-0\" (UID: \"8526897f-7543-4b1c-979b-601c17f31f54\") " pod="openstack/glance-default-internal-api-0" Jan 22 07:28:37 crc kubenswrapper[4933]: I0122 07:28:37.771711 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.048930 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5c5f49847d-w5xqj" event={"ID":"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9","Type":"ContainerStarted","Data":"2090202e08f4df1495dbf60793a8990961529f01c45d63a443c7b1afb1191cea"} Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.048981 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5c5f49847d-w5xqj" event={"ID":"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9","Type":"ContainerStarted","Data":"e755fb15a1df5046702ecfc8397f2bfcef3891d8d8a141bb7a0ae217f94ea160"} Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.052351 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f598858d8-sp7f8" event={"ID":"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb","Type":"ContainerStarted","Data":"3ab4f7e221db08ced43307f7f2b631adf3c32ed4365154344de1494821ad4e48"} Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.052403 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f598858d8-sp7f8" event={"ID":"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb","Type":"ContainerStarted","Data":"3dc911b554e56e3ebc70ae238c7cb0073de5c3341655535625142dd7a09ea6ec"} Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.059155 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78d45b5589-svr2j" event={"ID":"7537515b-2d77-4197-a172-a34ef69681d8","Type":"ContainerStarted","Data":"da6f6e1987effa0cdd5e73644c4af75b24fddb1bbf679a3af9a5200128223ed1"} Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.059299 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-78d45b5589-svr2j" podUID="7537515b-2d77-4197-a172-a34ef69681d8" containerName="horizon-log" containerID="cri-o://b6d9f9467fef161e98948042a00239c1bfe7cbad89aefd7ad87462024a1bbda4" gracePeriod=30 Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.059578 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-78d45b5589-svr2j" podUID="7537515b-2d77-4197-a172-a34ef69681d8" containerName="horizon" containerID="cri-o://da6f6e1987effa0cdd5e73644c4af75b24fddb1bbf679a3af9a5200128223ed1" gracePeriod=30 Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.068341 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-545f5b546f-zwk44" event={"ID":"e29bcc09-3449-49eb-83a7-972bbf7ad894","Type":"ContainerStarted","Data":"05b75c4098ecbaeae7f73496bba95ce8907081c430d632561147a16994762c79"} Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.068466 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-545f5b546f-zwk44" podUID="e29bcc09-3449-49eb-83a7-972bbf7ad894" containerName="horizon-log" containerID="cri-o://d32c544dba88f1a8a24876b08a2a668863c438e6c612120f84666f06b105243a" gracePeriod=30 Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.068480 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-545f5b546f-zwk44" podUID="e29bcc09-3449-49eb-83a7-972bbf7ad894" containerName="horizon" containerID="cri-o://05b75c4098ecbaeae7f73496bba95ce8907081c430d632561147a16994762c79" gracePeriod=30 Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.073888 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5c5f49847d-w5xqj" podStartSLOduration=6.607632214 podStartE2EDuration="7.073870198s" podCreationTimestamp="2026-01-22 07:28:31 +0000 UTC" firstStartedPulling="2026-01-22 07:28:36.51947587 +0000 UTC m=+6164.356601253" lastFinishedPulling="2026-01-22 07:28:36.985713884 +0000 UTC m=+6164.822839237" observedRunningTime="2026-01-22 07:28:38.067902393 +0000 UTC m=+6165.905027746" watchObservedRunningTime="2026-01-22 07:28:38.073870198 +0000 UTC m=+6165.910995551" Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.094253 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-78d45b5589-svr2j" podStartSLOduration=2.8092611720000003 podStartE2EDuration="10.094227404s" podCreationTimestamp="2026-01-22 07:28:28 +0000 UTC" firstStartedPulling="2026-01-22 07:28:29.322463988 +0000 UTC m=+6157.159589341" lastFinishedPulling="2026-01-22 07:28:36.60743019 +0000 UTC m=+6164.444555573" observedRunningTime="2026-01-22 07:28:38.088028203 +0000 UTC m=+6165.925153556" watchObservedRunningTime="2026-01-22 07:28:38.094227404 +0000 UTC m=+6165.931352757" Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.136609 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-545f5b546f-zwk44" podStartSLOduration=2.9785574500000003 podStartE2EDuration="10.136595235s" podCreationTimestamp="2026-01-22 07:28:28 +0000 UTC" firstStartedPulling="2026-01-22 07:28:29.477612292 +0000 UTC m=+6157.314737665" lastFinishedPulling="2026-01-22 07:28:36.635650067 +0000 UTC m=+6164.472775450" observedRunningTime="2026-01-22 07:28:38.135983469 +0000 UTC m=+6165.973108852" watchObservedRunningTime="2026-01-22 07:28:38.136595235 +0000 UTC m=+6165.973720588" Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.140842 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7f598858d8-sp7f8" podStartSLOduration=6.714229268 podStartE2EDuration="7.140819847s" podCreationTimestamp="2026-01-22 07:28:31 +0000 UTC" firstStartedPulling="2026-01-22 07:28:36.561437341 +0000 UTC m=+6164.398562694" lastFinishedPulling="2026-01-22 07:28:36.98802792 +0000 UTC m=+6164.825153273" observedRunningTime="2026-01-22 07:28:38.112435467 +0000 UTC m=+6165.949560820" watchObservedRunningTime="2026-01-22 07:28:38.140819847 +0000 UTC m=+6165.977945200" Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.243581 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.392213 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.507882 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="092e7306-126b-461f-926e-40eb750fe16c" path="/var/lib/kubelet/pods/092e7306-126b-461f-926e-40eb750fe16c/volumes" Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.510574 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4791b14-4baa-4617-8ed9-be7a99cefa10" path="/var/lib/kubelet/pods/d4791b14-4baa-4617-8ed9-be7a99cefa10/volumes" Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.850513 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:28:38 crc kubenswrapper[4933]: I0122 07:28:38.934143 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:28:39 crc kubenswrapper[4933]: I0122 07:28:39.078352 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4","Type":"ContainerStarted","Data":"f05fb8ab5ce1bc981f859104bdec2f020175bb4db4b0b5595cd1aa01e92b7332"} Jan 22 07:28:39 crc kubenswrapper[4933]: I0122 07:28:39.078397 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4","Type":"ContainerStarted","Data":"d104563fb6e7aff81ac8b006035c8cc3b274b254330f259bcb921cbec6b8907c"} Jan 22 07:28:39 crc kubenswrapper[4933]: I0122 07:28:39.081099 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8526897f-7543-4b1c-979b-601c17f31f54","Type":"ContainerStarted","Data":"7bacfa6384d6d68ff88f30967031dd961dde8b4dd2cf4258b29e5090fcb8e784"} Jan 22 07:28:39 crc kubenswrapper[4933]: I0122 07:28:39.081142 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8526897f-7543-4b1c-979b-601c17f31f54","Type":"ContainerStarted","Data":"7c83a991bc71840624787048605defa556b4cd8d4f2c80fac5ced13c537bdae1"} Jan 22 07:28:40 crc kubenswrapper[4933]: I0122 07:28:40.090417 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"8526897f-7543-4b1c-979b-601c17f31f54","Type":"ContainerStarted","Data":"9599c573948039ba2e198c3f809e82f5487f713c7c744dfb6f2d6c36c27f5661"} Jan 22 07:28:40 crc kubenswrapper[4933]: I0122 07:28:40.092423 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ef4fb634-4e08-44e1-9f52-c0ceeadefbd4","Type":"ContainerStarted","Data":"ab7c88759cedfd6af6021e9e2ca53b107c1997b77390dc78ec72aaa4ce819c40"} Jan 22 07:28:40 crc kubenswrapper[4933]: I0122 07:28:40.118287 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.118269518 podStartE2EDuration="3.118269518s" podCreationTimestamp="2026-01-22 07:28:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:28:40.114477666 +0000 UTC m=+6167.951603019" watchObservedRunningTime="2026-01-22 07:28:40.118269518 +0000 UTC m=+6167.955394871" Jan 22 07:28:41 crc kubenswrapper[4933]: I0122 07:28:41.603161 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:41 crc kubenswrapper[4933]: I0122 07:28:41.603705 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:28:41 crc kubenswrapper[4933]: I0122 07:28:41.725324 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:41 crc kubenswrapper[4933]: I0122 07:28:41.725371 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:28:44 crc kubenswrapper[4933]: I0122 07:28:44.491023 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:28:44 crc kubenswrapper[4933]: E0122 07:28:44.492120 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:28:47 crc kubenswrapper[4933]: I0122 07:28:47.458625 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 22 07:28:47 crc kubenswrapper[4933]: I0122 07:28:47.459856 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 22 07:28:47 crc kubenswrapper[4933]: I0122 07:28:47.494205 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 22 07:28:47 crc kubenswrapper[4933]: I0122 07:28:47.518099 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 22 07:28:47 crc kubenswrapper[4933]: I0122 07:28:47.518482 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=10.518461754 podStartE2EDuration="10.518461754s" podCreationTimestamp="2026-01-22 07:28:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:28:40.136272437 +0000 UTC m=+6167.973397800" watchObservedRunningTime="2026-01-22 07:28:47.518461754 +0000 UTC m=+6175.355587097" Jan 22 07:28:47 crc kubenswrapper[4933]: I0122 07:28:47.773446 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 22 07:28:47 crc kubenswrapper[4933]: I0122 07:28:47.773531 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 22 07:28:47 crc kubenswrapper[4933]: I0122 07:28:47.823473 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 22 07:28:47 crc kubenswrapper[4933]: I0122 07:28:47.829960 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 22 07:28:48 crc kubenswrapper[4933]: I0122 07:28:48.198301 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 07:28:48 crc kubenswrapper[4933]: I0122 07:28:48.198656 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 07:28:48 crc kubenswrapper[4933]: I0122 07:28:48.198674 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 07:28:48 crc kubenswrapper[4933]: I0122 07:28:48.198686 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 07:28:50 crc kubenswrapper[4933]: I0122 07:28:50.837626 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 22 07:28:50 crc kubenswrapper[4933]: I0122 07:28:50.838048 4933 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 07:28:50 crc kubenswrapper[4933]: I0122 07:28:50.879636 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 22 07:28:50 crc kubenswrapper[4933]: I0122 07:28:50.897111 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 22 07:28:50 crc kubenswrapper[4933]: I0122 07:28:50.897234 4933 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 07:28:50 crc kubenswrapper[4933]: I0122 07:28:50.898936 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 22 07:28:51 crc kubenswrapper[4933]: I0122 07:28:51.606105 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5c5f49847d-w5xqj" podUID="7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.119:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.119:8443: connect: connection refused" Jan 22 07:28:51 crc kubenswrapper[4933]: I0122 07:28:51.725829 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7f598858d8-sp7f8" podUID="d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.120:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.120:8443: connect: connection refused" Jan 22 07:28:55 crc kubenswrapper[4933]: I0122 07:28:55.491287 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:28:55 crc kubenswrapper[4933]: E0122 07:28:55.492330 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:29:03 crc kubenswrapper[4933]: I0122 07:29:03.347682 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:29:03 crc kubenswrapper[4933]: I0122 07:29:03.422267 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:29:05 crc kubenswrapper[4933]: I0122 07:29:05.139836 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:29:05 crc kubenswrapper[4933]: I0122 07:29:05.152337 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:29:05 crc kubenswrapper[4933]: I0122 07:29:05.264035 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5c5f49847d-w5xqj"] Jan 22 07:29:05 crc kubenswrapper[4933]: I0122 07:29:05.359374 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5c5f49847d-w5xqj" podUID="7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" containerName="horizon-log" containerID="cri-o://e755fb15a1df5046702ecfc8397f2bfcef3891d8d8a141bb7a0ae217f94ea160" gracePeriod=30 Jan 22 07:29:05 crc kubenswrapper[4933]: I0122 07:29:05.359758 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5c5f49847d-w5xqj" podUID="7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" containerName="horizon" containerID="cri-o://2090202e08f4df1495dbf60793a8990961529f01c45d63a443c7b1afb1191cea" gracePeriod=30 Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.391069 4933 generic.go:334] "Generic (PLEG): container finished" podID="7537515b-2d77-4197-a172-a34ef69681d8" containerID="da6f6e1987effa0cdd5e73644c4af75b24fddb1bbf679a3af9a5200128223ed1" exitCode=137 Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.391479 4933 generic.go:334] "Generic (PLEG): container finished" podID="7537515b-2d77-4197-a172-a34ef69681d8" containerID="b6d9f9467fef161e98948042a00239c1bfe7cbad89aefd7ad87462024a1bbda4" exitCode=137 Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.391521 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78d45b5589-svr2j" event={"ID":"7537515b-2d77-4197-a172-a34ef69681d8","Type":"ContainerDied","Data":"da6f6e1987effa0cdd5e73644c4af75b24fddb1bbf679a3af9a5200128223ed1"} Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.391551 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78d45b5589-svr2j" event={"ID":"7537515b-2d77-4197-a172-a34ef69681d8","Type":"ContainerDied","Data":"b6d9f9467fef161e98948042a00239c1bfe7cbad89aefd7ad87462024a1bbda4"} Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.392862 4933 generic.go:334] "Generic (PLEG): container finished" podID="e29bcc09-3449-49eb-83a7-972bbf7ad894" containerID="05b75c4098ecbaeae7f73496bba95ce8907081c430d632561147a16994762c79" exitCode=137 Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.392875 4933 generic.go:334] "Generic (PLEG): container finished" podID="e29bcc09-3449-49eb-83a7-972bbf7ad894" containerID="d32c544dba88f1a8a24876b08a2a668863c438e6c612120f84666f06b105243a" exitCode=137 Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.392887 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-545f5b546f-zwk44" event={"ID":"e29bcc09-3449-49eb-83a7-972bbf7ad894","Type":"ContainerDied","Data":"05b75c4098ecbaeae7f73496bba95ce8907081c430d632561147a16994762c79"} Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.392939 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-545f5b546f-zwk44" event={"ID":"e29bcc09-3449-49eb-83a7-972bbf7ad894","Type":"ContainerDied","Data":"d32c544dba88f1a8a24876b08a2a668863c438e6c612120f84666f06b105243a"} Jan 22 07:29:08 crc kubenswrapper[4933]: E0122 07:29:08.426400 4933 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7537515b_2d77_4197_a172_a34ef69681d8.slice/crio-da6f6e1987effa0cdd5e73644c4af75b24fddb1bbf679a3af9a5200128223ed1.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode29bcc09_3449_49eb_83a7_972bbf7ad894.slice/crio-conmon-d32c544dba88f1a8a24876b08a2a668863c438e6c612120f84666f06b105243a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode29bcc09_3449_49eb_83a7_972bbf7ad894.slice/crio-conmon-05b75c4098ecbaeae7f73496bba95ce8907081c430d632561147a16994762c79.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7537515b_2d77_4197_a172_a34ef69681d8.slice/crio-conmon-da6f6e1987effa0cdd5e73644c4af75b24fddb1bbf679a3af9a5200128223ed1.scope\": RecentStats: unable to find data in memory cache]" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.590535 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.646767 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.673736 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgzcz\" (UniqueName: \"kubernetes.io/projected/7537515b-2d77-4197-a172-a34ef69681d8-kube-api-access-xgzcz\") pod \"7537515b-2d77-4197-a172-a34ef69681d8\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.673893 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7537515b-2d77-4197-a172-a34ef69681d8-logs\") pod \"7537515b-2d77-4197-a172-a34ef69681d8\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.673931 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7537515b-2d77-4197-a172-a34ef69681d8-config-data\") pod \"7537515b-2d77-4197-a172-a34ef69681d8\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.674005 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7537515b-2d77-4197-a172-a34ef69681d8-horizon-secret-key\") pod \"7537515b-2d77-4197-a172-a34ef69681d8\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.674066 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7537515b-2d77-4197-a172-a34ef69681d8-scripts\") pod \"7537515b-2d77-4197-a172-a34ef69681d8\" (UID: \"7537515b-2d77-4197-a172-a34ef69681d8\") " Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.675232 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7537515b-2d77-4197-a172-a34ef69681d8-logs" (OuterVolumeSpecName: "logs") pod "7537515b-2d77-4197-a172-a34ef69681d8" (UID: "7537515b-2d77-4197-a172-a34ef69681d8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.702351 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7537515b-2d77-4197-a172-a34ef69681d8-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "7537515b-2d77-4197-a172-a34ef69681d8" (UID: "7537515b-2d77-4197-a172-a34ef69681d8"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.702555 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7537515b-2d77-4197-a172-a34ef69681d8-kube-api-access-xgzcz" (OuterVolumeSpecName: "kube-api-access-xgzcz") pod "7537515b-2d77-4197-a172-a34ef69681d8" (UID: "7537515b-2d77-4197-a172-a34ef69681d8"). InnerVolumeSpecName "kube-api-access-xgzcz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.719865 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7537515b-2d77-4197-a172-a34ef69681d8-config-data" (OuterVolumeSpecName: "config-data") pod "7537515b-2d77-4197-a172-a34ef69681d8" (UID: "7537515b-2d77-4197-a172-a34ef69681d8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.776891 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e29bcc09-3449-49eb-83a7-972bbf7ad894-logs\") pod \"e29bcc09-3449-49eb-83a7-972bbf7ad894\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.777000 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e29bcc09-3449-49eb-83a7-972bbf7ad894-config-data\") pod \"e29bcc09-3449-49eb-83a7-972bbf7ad894\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.777071 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kslwb\" (UniqueName: \"kubernetes.io/projected/e29bcc09-3449-49eb-83a7-972bbf7ad894-kube-api-access-kslwb\") pod \"e29bcc09-3449-49eb-83a7-972bbf7ad894\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.777156 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e29bcc09-3449-49eb-83a7-972bbf7ad894-scripts\") pod \"e29bcc09-3449-49eb-83a7-972bbf7ad894\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.777306 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e29bcc09-3449-49eb-83a7-972bbf7ad894-logs" (OuterVolumeSpecName: "logs") pod "e29bcc09-3449-49eb-83a7-972bbf7ad894" (UID: "e29bcc09-3449-49eb-83a7-972bbf7ad894"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.777338 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e29bcc09-3449-49eb-83a7-972bbf7ad894-horizon-secret-key\") pod \"e29bcc09-3449-49eb-83a7-972bbf7ad894\" (UID: \"e29bcc09-3449-49eb-83a7-972bbf7ad894\") " Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.778185 4933 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7537515b-2d77-4197-a172-a34ef69681d8-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.778203 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e29bcc09-3449-49eb-83a7-972bbf7ad894-logs\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.778213 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgzcz\" (UniqueName: \"kubernetes.io/projected/7537515b-2d77-4197-a172-a34ef69681d8-kube-api-access-xgzcz\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.778221 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7537515b-2d77-4197-a172-a34ef69681d8-logs\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.778229 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7537515b-2d77-4197-a172-a34ef69681d8-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.783617 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e29bcc09-3449-49eb-83a7-972bbf7ad894-kube-api-access-kslwb" (OuterVolumeSpecName: "kube-api-access-kslwb") pod "e29bcc09-3449-49eb-83a7-972bbf7ad894" (UID: "e29bcc09-3449-49eb-83a7-972bbf7ad894"). InnerVolumeSpecName "kube-api-access-kslwb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.787611 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7537515b-2d77-4197-a172-a34ef69681d8-scripts" (OuterVolumeSpecName: "scripts") pod "7537515b-2d77-4197-a172-a34ef69681d8" (UID: "7537515b-2d77-4197-a172-a34ef69681d8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.789315 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e29bcc09-3449-49eb-83a7-972bbf7ad894-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "e29bcc09-3449-49eb-83a7-972bbf7ad894" (UID: "e29bcc09-3449-49eb-83a7-972bbf7ad894"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.847478 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e29bcc09-3449-49eb-83a7-972bbf7ad894-scripts" (OuterVolumeSpecName: "scripts") pod "e29bcc09-3449-49eb-83a7-972bbf7ad894" (UID: "e29bcc09-3449-49eb-83a7-972bbf7ad894"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.849672 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e29bcc09-3449-49eb-83a7-972bbf7ad894-config-data" (OuterVolumeSpecName: "config-data") pod "e29bcc09-3449-49eb-83a7-972bbf7ad894" (UID: "e29bcc09-3449-49eb-83a7-972bbf7ad894"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.880263 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7537515b-2d77-4197-a172-a34ef69681d8-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.880299 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e29bcc09-3449-49eb-83a7-972bbf7ad894-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.880312 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kslwb\" (UniqueName: \"kubernetes.io/projected/e29bcc09-3449-49eb-83a7-972bbf7ad894-kube-api-access-kslwb\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.880322 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e29bcc09-3449-49eb-83a7-972bbf7ad894-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:08 crc kubenswrapper[4933]: I0122 07:29:08.880332 4933 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e29bcc09-3449-49eb-83a7-972bbf7ad894-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:09 crc kubenswrapper[4933]: I0122 07:29:09.404982 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-78d45b5589-svr2j" event={"ID":"7537515b-2d77-4197-a172-a34ef69681d8","Type":"ContainerDied","Data":"2ba5e39f3230e76aba97cd4f8d3f456bd2112f1b63a1dee49587e679a0c6018f"} Jan 22 07:29:09 crc kubenswrapper[4933]: I0122 07:29:09.405310 4933 scope.go:117] "RemoveContainer" containerID="da6f6e1987effa0cdd5e73644c4af75b24fddb1bbf679a3af9a5200128223ed1" Jan 22 07:29:09 crc kubenswrapper[4933]: I0122 07:29:09.405429 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-78d45b5589-svr2j" Jan 22 07:29:09 crc kubenswrapper[4933]: I0122 07:29:09.415656 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-545f5b546f-zwk44" event={"ID":"e29bcc09-3449-49eb-83a7-972bbf7ad894","Type":"ContainerDied","Data":"50d638fe767e52d5de11773dcc3bbdbf8b0f621f3aba5277c25c9d557f9f16e7"} Jan 22 07:29:09 crc kubenswrapper[4933]: I0122 07:29:09.416196 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-545f5b546f-zwk44" Jan 22 07:29:09 crc kubenswrapper[4933]: I0122 07:29:09.424309 4933 generic.go:334] "Generic (PLEG): container finished" podID="7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" containerID="2090202e08f4df1495dbf60793a8990961529f01c45d63a443c7b1afb1191cea" exitCode=0 Jan 22 07:29:09 crc kubenswrapper[4933]: I0122 07:29:09.424366 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5c5f49847d-w5xqj" event={"ID":"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9","Type":"ContainerDied","Data":"2090202e08f4df1495dbf60793a8990961529f01c45d63a443c7b1afb1191cea"} Jan 22 07:29:09 crc kubenswrapper[4933]: I0122 07:29:09.450665 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-78d45b5589-svr2j"] Jan 22 07:29:09 crc kubenswrapper[4933]: I0122 07:29:09.463701 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-78d45b5589-svr2j"] Jan 22 07:29:09 crc kubenswrapper[4933]: I0122 07:29:09.478157 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-545f5b546f-zwk44"] Jan 22 07:29:09 crc kubenswrapper[4933]: I0122 07:29:09.489450 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-545f5b546f-zwk44"] Jan 22 07:29:09 crc kubenswrapper[4933]: I0122 07:29:09.493371 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:29:09 crc kubenswrapper[4933]: E0122 07:29:09.493633 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:29:09 crc kubenswrapper[4933]: I0122 07:29:09.600037 4933 scope.go:117] "RemoveContainer" containerID="b6d9f9467fef161e98948042a00239c1bfe7cbad89aefd7ad87462024a1bbda4" Jan 22 07:29:09 crc kubenswrapper[4933]: I0122 07:29:09.622503 4933 scope.go:117] "RemoveContainer" containerID="05b75c4098ecbaeae7f73496bba95ce8907081c430d632561147a16994762c79" Jan 22 07:29:09 crc kubenswrapper[4933]: I0122 07:29:09.810001 4933 scope.go:117] "RemoveContainer" containerID="d32c544dba88f1a8a24876b08a2a668863c438e6c612120f84666f06b105243a" Jan 22 07:29:10 crc kubenswrapper[4933]: I0122 07:29:10.510653 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7537515b-2d77-4197-a172-a34ef69681d8" path="/var/lib/kubelet/pods/7537515b-2d77-4197-a172-a34ef69681d8/volumes" Jan 22 07:29:10 crc kubenswrapper[4933]: I0122 07:29:10.512606 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e29bcc09-3449-49eb-83a7-972bbf7ad894" path="/var/lib/kubelet/pods/e29bcc09-3449-49eb-83a7-972bbf7ad894/volumes" Jan 22 07:29:11 crc kubenswrapper[4933]: I0122 07:29:11.604143 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5c5f49847d-w5xqj" podUID="7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.119:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.119:8443: connect: connection refused" Jan 22 07:29:17 crc kubenswrapper[4933]: I0122 07:29:17.058006 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-5e60-account-create-update-jwn9r"] Jan 22 07:29:17 crc kubenswrapper[4933]: I0122 07:29:17.074566 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-lgf6v"] Jan 22 07:29:17 crc kubenswrapper[4933]: I0122 07:29:17.084510 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-5e60-account-create-update-jwn9r"] Jan 22 07:29:17 crc kubenswrapper[4933]: I0122 07:29:17.093626 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-lgf6v"] Jan 22 07:29:18 crc kubenswrapper[4933]: I0122 07:29:18.502811 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="265ce84e-0c9e-4e32-9eaa-8821b19bc29d" path="/var/lib/kubelet/pods/265ce84e-0c9e-4e32-9eaa-8821b19bc29d/volumes" Jan 22 07:29:18 crc kubenswrapper[4933]: I0122 07:29:18.504088 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79198723-552a-4002-8ac2-f66a008a26ae" path="/var/lib/kubelet/pods/79198723-552a-4002-8ac2-f66a008a26ae/volumes" Jan 22 07:29:20 crc kubenswrapper[4933]: I0122 07:29:20.491118 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:29:20 crc kubenswrapper[4933]: E0122 07:29:20.491966 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:29:21 crc kubenswrapper[4933]: I0122 07:29:21.604041 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5c5f49847d-w5xqj" podUID="7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.119:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.119:8443: connect: connection refused" Jan 22 07:29:25 crc kubenswrapper[4933]: I0122 07:29:25.037533 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-x2t8l"] Jan 22 07:29:25 crc kubenswrapper[4933]: I0122 07:29:25.053318 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-x2t8l"] Jan 22 07:29:26 crc kubenswrapper[4933]: I0122 07:29:26.512689 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a51acc4-f217-4041-8538-dd03b67531a1" path="/var/lib/kubelet/pods/3a51acc4-f217-4041-8538-dd03b67531a1/volumes" Jan 22 07:29:31 crc kubenswrapper[4933]: I0122 07:29:31.604155 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5c5f49847d-w5xqj" podUID="7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.119:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.119:8443: connect: connection refused" Jan 22 07:29:31 crc kubenswrapper[4933]: I0122 07:29:31.604930 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:29:34 crc kubenswrapper[4933]: I0122 07:29:34.491387 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:29:34 crc kubenswrapper[4933]: E0122 07:29:34.492353 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:29:35 crc kubenswrapper[4933]: I0122 07:29:35.716362 4933 generic.go:334] "Generic (PLEG): container finished" podID="7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" containerID="e755fb15a1df5046702ecfc8397f2bfcef3891d8d8a141bb7a0ae217f94ea160" exitCode=137 Jan 22 07:29:35 crc kubenswrapper[4933]: I0122 07:29:35.716524 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5c5f49847d-w5xqj" event={"ID":"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9","Type":"ContainerDied","Data":"e755fb15a1df5046702ecfc8397f2bfcef3891d8d8a141bb7a0ae217f94ea160"} Jan 22 07:29:35 crc kubenswrapper[4933]: I0122 07:29:35.716550 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5c5f49847d-w5xqj" event={"ID":"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9","Type":"ContainerDied","Data":"848e658d7f74b79529dc94ef4850e37fd66fed5beae6882e6c30e41c4bedb3b6"} Jan 22 07:29:35 crc kubenswrapper[4933]: I0122 07:29:35.716560 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="848e658d7f74b79529dc94ef4850e37fd66fed5beae6882e6c30e41c4bedb3b6" Jan 22 07:29:35 crc kubenswrapper[4933]: I0122 07:29:35.845356 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:29:35 crc kubenswrapper[4933]: I0122 07:29:35.964444 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5jb5j\" (UniqueName: \"kubernetes.io/projected/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-kube-api-access-5jb5j\") pod \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " Jan 22 07:29:35 crc kubenswrapper[4933]: I0122 07:29:35.964526 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-logs\") pod \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " Jan 22 07:29:35 crc kubenswrapper[4933]: I0122 07:29:35.964660 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-horizon-tls-certs\") pod \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " Jan 22 07:29:35 crc kubenswrapper[4933]: I0122 07:29:35.964766 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-scripts\") pod \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " Jan 22 07:29:35 crc kubenswrapper[4933]: I0122 07:29:35.964832 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-config-data\") pod \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " Jan 22 07:29:35 crc kubenswrapper[4933]: I0122 07:29:35.964979 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-combined-ca-bundle\") pod \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " Jan 22 07:29:35 crc kubenswrapper[4933]: I0122 07:29:35.965171 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-horizon-secret-key\") pod \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\" (UID: \"7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9\") " Jan 22 07:29:35 crc kubenswrapper[4933]: I0122 07:29:35.965870 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-logs" (OuterVolumeSpecName: "logs") pod "7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" (UID: "7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:29:35 crc kubenswrapper[4933]: I0122 07:29:35.966291 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-logs\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:35 crc kubenswrapper[4933]: I0122 07:29:35.969926 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-kube-api-access-5jb5j" (OuterVolumeSpecName: "kube-api-access-5jb5j") pod "7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" (UID: "7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9"). InnerVolumeSpecName "kube-api-access-5jb5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:29:35 crc kubenswrapper[4933]: I0122 07:29:35.970014 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" (UID: "7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:29:35 crc kubenswrapper[4933]: I0122 07:29:35.988419 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-scripts" (OuterVolumeSpecName: "scripts") pod "7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" (UID: "7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:29:35 crc kubenswrapper[4933]: I0122 07:29:35.990726 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" (UID: "7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:29:35 crc kubenswrapper[4933]: I0122 07:29:35.996729 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-config-data" (OuterVolumeSpecName: "config-data") pod "7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" (UID: "7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:29:36 crc kubenswrapper[4933]: I0122 07:29:36.044166 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" (UID: "7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:29:36 crc kubenswrapper[4933]: I0122 07:29:36.068403 4933 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:36 crc kubenswrapper[4933]: I0122 07:29:36.068709 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:36 crc kubenswrapper[4933]: I0122 07:29:36.068837 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:36 crc kubenswrapper[4933]: I0122 07:29:36.068953 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:36 crc kubenswrapper[4933]: I0122 07:29:36.069065 4933 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:36 crc kubenswrapper[4933]: I0122 07:29:36.069296 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5jb5j\" (UniqueName: \"kubernetes.io/projected/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9-kube-api-access-5jb5j\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:36 crc kubenswrapper[4933]: I0122 07:29:36.728606 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5c5f49847d-w5xqj" Jan 22 07:29:36 crc kubenswrapper[4933]: I0122 07:29:36.768559 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5c5f49847d-w5xqj"] Jan 22 07:29:36 crc kubenswrapper[4933]: I0122 07:29:36.779774 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5c5f49847d-w5xqj"] Jan 22 07:29:38 crc kubenswrapper[4933]: I0122 07:29:38.509780 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" path="/var/lib/kubelet/pods/7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9/volumes" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.910727 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6d688745d-jhzmc"] Jan 22 07:29:45 crc kubenswrapper[4933]: E0122 07:29:45.911869 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" containerName="horizon-log" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.911886 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" containerName="horizon-log" Jan 22 07:29:45 crc kubenswrapper[4933]: E0122 07:29:45.911908 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e29bcc09-3449-49eb-83a7-972bbf7ad894" containerName="horizon-log" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.911914 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e29bcc09-3449-49eb-83a7-972bbf7ad894" containerName="horizon-log" Jan 22 07:29:45 crc kubenswrapper[4933]: E0122 07:29:45.911929 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7537515b-2d77-4197-a172-a34ef69681d8" containerName="horizon-log" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.911937 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7537515b-2d77-4197-a172-a34ef69681d8" containerName="horizon-log" Jan 22 07:29:45 crc kubenswrapper[4933]: E0122 07:29:45.911950 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7537515b-2d77-4197-a172-a34ef69681d8" containerName="horizon" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.911956 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7537515b-2d77-4197-a172-a34ef69681d8" containerName="horizon" Jan 22 07:29:45 crc kubenswrapper[4933]: E0122 07:29:45.911977 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" containerName="horizon" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.911982 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" containerName="horizon" Jan 22 07:29:45 crc kubenswrapper[4933]: E0122 07:29:45.911993 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e29bcc09-3449-49eb-83a7-972bbf7ad894" containerName="horizon" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.911999 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e29bcc09-3449-49eb-83a7-972bbf7ad894" containerName="horizon" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.912197 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="e29bcc09-3449-49eb-83a7-972bbf7ad894" containerName="horizon-log" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.912212 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="e29bcc09-3449-49eb-83a7-972bbf7ad894" containerName="horizon" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.912227 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" containerName="horizon-log" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.912237 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b6c2b9c-89b8-4e41-8e24-ca35e2c722f9" containerName="horizon" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.912248 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="7537515b-2d77-4197-a172-a34ef69681d8" containerName="horizon-log" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.912258 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="7537515b-2d77-4197-a172-a34ef69681d8" containerName="horizon" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.913372 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.930986 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6d688745d-jhzmc"] Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.934777 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cgr6\" (UniqueName: \"kubernetes.io/projected/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-kube-api-access-7cgr6\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.934855 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-horizon-secret-key\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.934881 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-config-data\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.934965 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-combined-ca-bundle\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.934982 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-horizon-tls-certs\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.935012 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-scripts\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:45 crc kubenswrapper[4933]: I0122 07:29:45.935039 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-logs\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:46 crc kubenswrapper[4933]: I0122 07:29:46.036668 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-horizon-secret-key\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:46 crc kubenswrapper[4933]: I0122 07:29:46.036741 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-config-data\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:46 crc kubenswrapper[4933]: I0122 07:29:46.036857 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-combined-ca-bundle\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:46 crc kubenswrapper[4933]: I0122 07:29:46.038262 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-config-data\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:46 crc kubenswrapper[4933]: I0122 07:29:46.038316 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-horizon-tls-certs\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:46 crc kubenswrapper[4933]: I0122 07:29:46.038391 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-scripts\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:46 crc kubenswrapper[4933]: I0122 07:29:46.038426 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-logs\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:46 crc kubenswrapper[4933]: I0122 07:29:46.038466 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cgr6\" (UniqueName: \"kubernetes.io/projected/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-kube-api-access-7cgr6\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:46 crc kubenswrapper[4933]: I0122 07:29:46.039335 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-logs\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:46 crc kubenswrapper[4933]: I0122 07:29:46.039438 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-scripts\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:46 crc kubenswrapper[4933]: I0122 07:29:46.043257 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-combined-ca-bundle\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:46 crc kubenswrapper[4933]: I0122 07:29:46.043258 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-horizon-secret-key\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:46 crc kubenswrapper[4933]: I0122 07:29:46.043443 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-horizon-tls-certs\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:46 crc kubenswrapper[4933]: I0122 07:29:46.055414 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cgr6\" (UniqueName: \"kubernetes.io/projected/d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa-kube-api-access-7cgr6\") pod \"horizon-6d688745d-jhzmc\" (UID: \"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa\") " pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:46 crc kubenswrapper[4933]: I0122 07:29:46.234386 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:46 crc kubenswrapper[4933]: I0122 07:29:46.493137 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:29:46 crc kubenswrapper[4933]: E0122 07:29:46.493518 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:29:46 crc kubenswrapper[4933]: I0122 07:29:46.750938 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6d688745d-jhzmc"] Jan 22 07:29:46 crc kubenswrapper[4933]: I0122 07:29:46.845930 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6d688745d-jhzmc" event={"ID":"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa","Type":"ContainerStarted","Data":"47ba2e97ce5e55f1e5cbb4b334ce5248a63b45a82f3eb43c348b48ec58367b95"} Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.341248 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-4b4lx"] Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.343142 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-4b4lx" Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.363797 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-5e82-account-create-update-vs4tx"] Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.365106 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-5e82-account-create-update-vs4tx" Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.372155 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.376208 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-4b4lx"] Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.382043 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkxjw\" (UniqueName: \"kubernetes.io/projected/cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4-kube-api-access-vkxjw\") pod \"heat-db-create-4b4lx\" (UID: \"cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4\") " pod="openstack/heat-db-create-4b4lx" Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.382298 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4-operator-scripts\") pod \"heat-db-create-4b4lx\" (UID: \"cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4\") " pod="openstack/heat-db-create-4b4lx" Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.398277 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-5e82-account-create-update-vs4tx"] Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.483981 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vkxjw\" (UniqueName: \"kubernetes.io/projected/cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4-kube-api-access-vkxjw\") pod \"heat-db-create-4b4lx\" (UID: \"cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4\") " pod="openstack/heat-db-create-4b4lx" Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.484166 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjhbc\" (UniqueName: \"kubernetes.io/projected/9cde1ab7-883c-49c7-9d99-970ac204daf1-kube-api-access-pjhbc\") pod \"heat-5e82-account-create-update-vs4tx\" (UID: \"9cde1ab7-883c-49c7-9d99-970ac204daf1\") " pod="openstack/heat-5e82-account-create-update-vs4tx" Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.484219 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9cde1ab7-883c-49c7-9d99-970ac204daf1-operator-scripts\") pod \"heat-5e82-account-create-update-vs4tx\" (UID: \"9cde1ab7-883c-49c7-9d99-970ac204daf1\") " pod="openstack/heat-5e82-account-create-update-vs4tx" Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.484251 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4-operator-scripts\") pod \"heat-db-create-4b4lx\" (UID: \"cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4\") " pod="openstack/heat-db-create-4b4lx" Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.485400 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4-operator-scripts\") pod \"heat-db-create-4b4lx\" (UID: \"cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4\") " pod="openstack/heat-db-create-4b4lx" Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.502390 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkxjw\" (UniqueName: \"kubernetes.io/projected/cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4-kube-api-access-vkxjw\") pod \"heat-db-create-4b4lx\" (UID: \"cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4\") " pod="openstack/heat-db-create-4b4lx" Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.586755 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjhbc\" (UniqueName: \"kubernetes.io/projected/9cde1ab7-883c-49c7-9d99-970ac204daf1-kube-api-access-pjhbc\") pod \"heat-5e82-account-create-update-vs4tx\" (UID: \"9cde1ab7-883c-49c7-9d99-970ac204daf1\") " pod="openstack/heat-5e82-account-create-update-vs4tx" Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.587158 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9cde1ab7-883c-49c7-9d99-970ac204daf1-operator-scripts\") pod \"heat-5e82-account-create-update-vs4tx\" (UID: \"9cde1ab7-883c-49c7-9d99-970ac204daf1\") " pod="openstack/heat-5e82-account-create-update-vs4tx" Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.589242 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9cde1ab7-883c-49c7-9d99-970ac204daf1-operator-scripts\") pod \"heat-5e82-account-create-update-vs4tx\" (UID: \"9cde1ab7-883c-49c7-9d99-970ac204daf1\") " pod="openstack/heat-5e82-account-create-update-vs4tx" Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.605892 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjhbc\" (UniqueName: \"kubernetes.io/projected/9cde1ab7-883c-49c7-9d99-970ac204daf1-kube-api-access-pjhbc\") pod \"heat-5e82-account-create-update-vs4tx\" (UID: \"9cde1ab7-883c-49c7-9d99-970ac204daf1\") " pod="openstack/heat-5e82-account-create-update-vs4tx" Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.664596 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-4b4lx" Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.689102 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-5e82-account-create-update-vs4tx" Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.859747 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6d688745d-jhzmc" event={"ID":"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa","Type":"ContainerStarted","Data":"2da9aa8d47841eaf6d53d70fed748d3eb42241498941fa67929f04074d814bf6"} Jan 22 07:29:47 crc kubenswrapper[4933]: I0122 07:29:47.860161 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6d688745d-jhzmc" event={"ID":"d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa","Type":"ContainerStarted","Data":"e05b37fdd204015535df7f75eac14a1259caa9d5c82c5c7864350f9b94f04a47"} Jan 22 07:29:48 crc kubenswrapper[4933]: I0122 07:29:48.213402 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6d688745d-jhzmc" podStartSLOduration=3.213385604 podStartE2EDuration="3.213385604s" podCreationTimestamp="2026-01-22 07:29:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:29:47.884687137 +0000 UTC m=+6235.721812560" watchObservedRunningTime="2026-01-22 07:29:48.213385604 +0000 UTC m=+6236.050510957" Jan 22 07:29:48 crc kubenswrapper[4933]: I0122 07:29:48.218351 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-4b4lx"] Jan 22 07:29:48 crc kubenswrapper[4933]: I0122 07:29:48.284839 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-5e82-account-create-update-vs4tx"] Jan 22 07:29:48 crc kubenswrapper[4933]: W0122 07:29:48.295368 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9cde1ab7_883c_49c7_9d99_970ac204daf1.slice/crio-1851e32bc44fcfb51a846579ed8fe41bcc4b50bc95432fa815c03cbf1671ffdf WatchSource:0}: Error finding container 1851e32bc44fcfb51a846579ed8fe41bcc4b50bc95432fa815c03cbf1671ffdf: Status 404 returned error can't find the container with id 1851e32bc44fcfb51a846579ed8fe41bcc4b50bc95432fa815c03cbf1671ffdf Jan 22 07:29:48 crc kubenswrapper[4933]: I0122 07:29:48.882894 4933 generic.go:334] "Generic (PLEG): container finished" podID="cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4" containerID="7ef209d0698bfe81ba7c846ab6c3525756cd905f0c2ad77c2a1e66f05cd87dfb" exitCode=0 Jan 22 07:29:48 crc kubenswrapper[4933]: I0122 07:29:48.883246 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-4b4lx" event={"ID":"cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4","Type":"ContainerDied","Data":"7ef209d0698bfe81ba7c846ab6c3525756cd905f0c2ad77c2a1e66f05cd87dfb"} Jan 22 07:29:48 crc kubenswrapper[4933]: I0122 07:29:48.883294 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-4b4lx" event={"ID":"cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4","Type":"ContainerStarted","Data":"915d06f36edaaace81bb4f4da2f6c42245503e0edfdcccf9017672681f7fc587"} Jan 22 07:29:48 crc kubenswrapper[4933]: I0122 07:29:48.887029 4933 generic.go:334] "Generic (PLEG): container finished" podID="9cde1ab7-883c-49c7-9d99-970ac204daf1" containerID="a07ca5909ec1cef50965ab17eeb78ddcb34df7fea965528dbe7874b10123d663" exitCode=0 Jan 22 07:29:48 crc kubenswrapper[4933]: I0122 07:29:48.887108 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-5e82-account-create-update-vs4tx" event={"ID":"9cde1ab7-883c-49c7-9d99-970ac204daf1","Type":"ContainerDied","Data":"a07ca5909ec1cef50965ab17eeb78ddcb34df7fea965528dbe7874b10123d663"} Jan 22 07:29:48 crc kubenswrapper[4933]: I0122 07:29:48.887143 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-5e82-account-create-update-vs4tx" event={"ID":"9cde1ab7-883c-49c7-9d99-970ac204daf1","Type":"ContainerStarted","Data":"1851e32bc44fcfb51a846579ed8fe41bcc4b50bc95432fa815c03cbf1671ffdf"} Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.349618 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-4b4lx" Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.360018 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-5e82-account-create-update-vs4tx" Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.483832 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9cde1ab7-883c-49c7-9d99-970ac204daf1-operator-scripts\") pod \"9cde1ab7-883c-49c7-9d99-970ac204daf1\" (UID: \"9cde1ab7-883c-49c7-9d99-970ac204daf1\") " Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.484041 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4-operator-scripts\") pod \"cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4\" (UID: \"cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4\") " Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.484124 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjhbc\" (UniqueName: \"kubernetes.io/projected/9cde1ab7-883c-49c7-9d99-970ac204daf1-kube-api-access-pjhbc\") pod \"9cde1ab7-883c-49c7-9d99-970ac204daf1\" (UID: \"9cde1ab7-883c-49c7-9d99-970ac204daf1\") " Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.484153 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vkxjw\" (UniqueName: \"kubernetes.io/projected/cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4-kube-api-access-vkxjw\") pod \"cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4\" (UID: \"cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4\") " Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.484395 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cde1ab7-883c-49c7-9d99-970ac204daf1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9cde1ab7-883c-49c7-9d99-970ac204daf1" (UID: "9cde1ab7-883c-49c7-9d99-970ac204daf1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.484986 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9cde1ab7-883c-49c7-9d99-970ac204daf1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.485702 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4" (UID: "cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.493269 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4-kube-api-access-vkxjw" (OuterVolumeSpecName: "kube-api-access-vkxjw") pod "cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4" (UID: "cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4"). InnerVolumeSpecName "kube-api-access-vkxjw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.495284 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cde1ab7-883c-49c7-9d99-970ac204daf1-kube-api-access-pjhbc" (OuterVolumeSpecName: "kube-api-access-pjhbc") pod "9cde1ab7-883c-49c7-9d99-970ac204daf1" (UID: "9cde1ab7-883c-49c7-9d99-970ac204daf1"). InnerVolumeSpecName "kube-api-access-pjhbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.586640 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.586669 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjhbc\" (UniqueName: \"kubernetes.io/projected/9cde1ab7-883c-49c7-9d99-970ac204daf1-kube-api-access-pjhbc\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.586678 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vkxjw\" (UniqueName: \"kubernetes.io/projected/cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4-kube-api-access-vkxjw\") on node \"crc\" DevicePath \"\"" Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.903976 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-4b4lx" event={"ID":"cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4","Type":"ContainerDied","Data":"915d06f36edaaace81bb4f4da2f6c42245503e0edfdcccf9017672681f7fc587"} Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.904303 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="915d06f36edaaace81bb4f4da2f6c42245503e0edfdcccf9017672681f7fc587" Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.904366 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-4b4lx" Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.906661 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-5e82-account-create-update-vs4tx" event={"ID":"9cde1ab7-883c-49c7-9d99-970ac204daf1","Type":"ContainerDied","Data":"1851e32bc44fcfb51a846579ed8fe41bcc4b50bc95432fa815c03cbf1671ffdf"} Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.906889 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1851e32bc44fcfb51a846579ed8fe41bcc4b50bc95432fa815c03cbf1671ffdf" Jan 22 07:29:50 crc kubenswrapper[4933]: I0122 07:29:50.906865 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-5e82-account-create-update-vs4tx" Jan 22 07:29:52 crc kubenswrapper[4933]: I0122 07:29:52.505546 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-2vflz"] Jan 22 07:29:52 crc kubenswrapper[4933]: E0122 07:29:52.506322 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cde1ab7-883c-49c7-9d99-970ac204daf1" containerName="mariadb-account-create-update" Jan 22 07:29:52 crc kubenswrapper[4933]: I0122 07:29:52.506340 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cde1ab7-883c-49c7-9d99-970ac204daf1" containerName="mariadb-account-create-update" Jan 22 07:29:52 crc kubenswrapper[4933]: E0122 07:29:52.506358 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4" containerName="mariadb-database-create" Jan 22 07:29:52 crc kubenswrapper[4933]: I0122 07:29:52.506367 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4" containerName="mariadb-database-create" Jan 22 07:29:52 crc kubenswrapper[4933]: I0122 07:29:52.506611 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="9cde1ab7-883c-49c7-9d99-970ac204daf1" containerName="mariadb-account-create-update" Jan 22 07:29:52 crc kubenswrapper[4933]: I0122 07:29:52.506635 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4" containerName="mariadb-database-create" Jan 22 07:29:52 crc kubenswrapper[4933]: I0122 07:29:52.507543 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-2vflz" Jan 22 07:29:52 crc kubenswrapper[4933]: I0122 07:29:52.511778 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-wmmwz" Jan 22 07:29:52 crc kubenswrapper[4933]: I0122 07:29:52.512044 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Jan 22 07:29:52 crc kubenswrapper[4933]: I0122 07:29:52.516833 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-2vflz"] Jan 22 07:29:52 crc kubenswrapper[4933]: I0122 07:29:52.626763 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9a2014a-9cf1-447f-ba46-0bd389003bf7-config-data\") pod \"heat-db-sync-2vflz\" (UID: \"a9a2014a-9cf1-447f-ba46-0bd389003bf7\") " pod="openstack/heat-db-sync-2vflz" Jan 22 07:29:52 crc kubenswrapper[4933]: I0122 07:29:52.626862 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9a2014a-9cf1-447f-ba46-0bd389003bf7-combined-ca-bundle\") pod \"heat-db-sync-2vflz\" (UID: \"a9a2014a-9cf1-447f-ba46-0bd389003bf7\") " pod="openstack/heat-db-sync-2vflz" Jan 22 07:29:52 crc kubenswrapper[4933]: I0122 07:29:52.626938 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45gw8\" (UniqueName: \"kubernetes.io/projected/a9a2014a-9cf1-447f-ba46-0bd389003bf7-kube-api-access-45gw8\") pod \"heat-db-sync-2vflz\" (UID: \"a9a2014a-9cf1-447f-ba46-0bd389003bf7\") " pod="openstack/heat-db-sync-2vflz" Jan 22 07:29:52 crc kubenswrapper[4933]: I0122 07:29:52.729400 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9a2014a-9cf1-447f-ba46-0bd389003bf7-config-data\") pod \"heat-db-sync-2vflz\" (UID: \"a9a2014a-9cf1-447f-ba46-0bd389003bf7\") " pod="openstack/heat-db-sync-2vflz" Jan 22 07:29:52 crc kubenswrapper[4933]: I0122 07:29:52.729468 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9a2014a-9cf1-447f-ba46-0bd389003bf7-combined-ca-bundle\") pod \"heat-db-sync-2vflz\" (UID: \"a9a2014a-9cf1-447f-ba46-0bd389003bf7\") " pod="openstack/heat-db-sync-2vflz" Jan 22 07:29:52 crc kubenswrapper[4933]: I0122 07:29:52.729538 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45gw8\" (UniqueName: \"kubernetes.io/projected/a9a2014a-9cf1-447f-ba46-0bd389003bf7-kube-api-access-45gw8\") pod \"heat-db-sync-2vflz\" (UID: \"a9a2014a-9cf1-447f-ba46-0bd389003bf7\") " pod="openstack/heat-db-sync-2vflz" Jan 22 07:29:52 crc kubenswrapper[4933]: I0122 07:29:52.734865 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9a2014a-9cf1-447f-ba46-0bd389003bf7-combined-ca-bundle\") pod \"heat-db-sync-2vflz\" (UID: \"a9a2014a-9cf1-447f-ba46-0bd389003bf7\") " pod="openstack/heat-db-sync-2vflz" Jan 22 07:29:52 crc kubenswrapper[4933]: I0122 07:29:52.735848 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9a2014a-9cf1-447f-ba46-0bd389003bf7-config-data\") pod \"heat-db-sync-2vflz\" (UID: \"a9a2014a-9cf1-447f-ba46-0bd389003bf7\") " pod="openstack/heat-db-sync-2vflz" Jan 22 07:29:52 crc kubenswrapper[4933]: I0122 07:29:52.756097 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45gw8\" (UniqueName: \"kubernetes.io/projected/a9a2014a-9cf1-447f-ba46-0bd389003bf7-kube-api-access-45gw8\") pod \"heat-db-sync-2vflz\" (UID: \"a9a2014a-9cf1-447f-ba46-0bd389003bf7\") " pod="openstack/heat-db-sync-2vflz" Jan 22 07:29:52 crc kubenswrapper[4933]: I0122 07:29:52.827647 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-2vflz" Jan 22 07:29:53 crc kubenswrapper[4933]: I0122 07:29:53.356230 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-2vflz"] Jan 22 07:29:53 crc kubenswrapper[4933]: I0122 07:29:53.940023 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-2vflz" event={"ID":"a9a2014a-9cf1-447f-ba46-0bd389003bf7","Type":"ContainerStarted","Data":"21edec93cee5efd21277f47c2eba95d1c6a838c29eb308d36187dac2f2fdf8c1"} Jan 22 07:29:56 crc kubenswrapper[4933]: I0122 07:29:56.235162 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:29:56 crc kubenswrapper[4933]: I0122 07:29:56.235744 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:30:00 crc kubenswrapper[4933]: I0122 07:30:00.145472 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4"] Jan 22 07:30:00 crc kubenswrapper[4933]: I0122 07:30:00.147740 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4" Jan 22 07:30:00 crc kubenswrapper[4933]: I0122 07:30:00.150938 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 07:30:00 crc kubenswrapper[4933]: I0122 07:30:00.150938 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 07:30:00 crc kubenswrapper[4933]: I0122 07:30:00.165697 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4"] Jan 22 07:30:00 crc kubenswrapper[4933]: I0122 07:30:00.212778 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/58d48b6b-6a4a-4ab6-866c-251ea91606b4-secret-volume\") pod \"collect-profiles-29484450-pc2c4\" (UID: \"58d48b6b-6a4a-4ab6-866c-251ea91606b4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4" Jan 22 07:30:00 crc kubenswrapper[4933]: I0122 07:30:00.213012 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phl74\" (UniqueName: \"kubernetes.io/projected/58d48b6b-6a4a-4ab6-866c-251ea91606b4-kube-api-access-phl74\") pod \"collect-profiles-29484450-pc2c4\" (UID: \"58d48b6b-6a4a-4ab6-866c-251ea91606b4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4" Jan 22 07:30:00 crc kubenswrapper[4933]: I0122 07:30:00.213046 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/58d48b6b-6a4a-4ab6-866c-251ea91606b4-config-volume\") pod \"collect-profiles-29484450-pc2c4\" (UID: \"58d48b6b-6a4a-4ab6-866c-251ea91606b4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4" Jan 22 07:30:00 crc kubenswrapper[4933]: I0122 07:30:00.314549 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/58d48b6b-6a4a-4ab6-866c-251ea91606b4-secret-volume\") pod \"collect-profiles-29484450-pc2c4\" (UID: \"58d48b6b-6a4a-4ab6-866c-251ea91606b4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4" Jan 22 07:30:00 crc kubenswrapper[4933]: I0122 07:30:00.314754 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phl74\" (UniqueName: \"kubernetes.io/projected/58d48b6b-6a4a-4ab6-866c-251ea91606b4-kube-api-access-phl74\") pod \"collect-profiles-29484450-pc2c4\" (UID: \"58d48b6b-6a4a-4ab6-866c-251ea91606b4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4" Jan 22 07:30:00 crc kubenswrapper[4933]: I0122 07:30:00.314783 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/58d48b6b-6a4a-4ab6-866c-251ea91606b4-config-volume\") pod \"collect-profiles-29484450-pc2c4\" (UID: \"58d48b6b-6a4a-4ab6-866c-251ea91606b4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4" Jan 22 07:30:00 crc kubenswrapper[4933]: I0122 07:30:00.315937 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/58d48b6b-6a4a-4ab6-866c-251ea91606b4-config-volume\") pod \"collect-profiles-29484450-pc2c4\" (UID: \"58d48b6b-6a4a-4ab6-866c-251ea91606b4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4" Jan 22 07:30:00 crc kubenswrapper[4933]: I0122 07:30:00.336664 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phl74\" (UniqueName: \"kubernetes.io/projected/58d48b6b-6a4a-4ab6-866c-251ea91606b4-kube-api-access-phl74\") pod \"collect-profiles-29484450-pc2c4\" (UID: \"58d48b6b-6a4a-4ab6-866c-251ea91606b4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4" Jan 22 07:30:00 crc kubenswrapper[4933]: I0122 07:30:00.344378 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/58d48b6b-6a4a-4ab6-866c-251ea91606b4-secret-volume\") pod \"collect-profiles-29484450-pc2c4\" (UID: \"58d48b6b-6a4a-4ab6-866c-251ea91606b4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4" Jan 22 07:30:00 crc kubenswrapper[4933]: I0122 07:30:00.476511 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4" Jan 22 07:30:01 crc kubenswrapper[4933]: I0122 07:30:01.491834 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:30:01 crc kubenswrapper[4933]: E0122 07:30:01.492725 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:30:01 crc kubenswrapper[4933]: I0122 07:30:01.793561 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4"] Jan 22 07:30:02 crc kubenswrapper[4933]: I0122 07:30:02.031891 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4" event={"ID":"58d48b6b-6a4a-4ab6-866c-251ea91606b4","Type":"ContainerStarted","Data":"d3b501225a3eb635dd64b971b927905a74c467676a62a38ede5d5e5bba6e04ed"} Jan 22 07:30:02 crc kubenswrapper[4933]: I0122 07:30:02.031946 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4" event={"ID":"58d48b6b-6a4a-4ab6-866c-251ea91606b4","Type":"ContainerStarted","Data":"9e60840e51f0af82fce76021783c88c82a1daab52910080cdbaee68af1eb8fcb"} Jan 22 07:30:02 crc kubenswrapper[4933]: I0122 07:30:02.034267 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-2vflz" event={"ID":"a9a2014a-9cf1-447f-ba46-0bd389003bf7","Type":"ContainerStarted","Data":"f34b904e1b86a2167f4c6c0644d56c72e7f6b7cdc82d89991ec84ae1f04aa5c6"} Jan 22 07:30:02 crc kubenswrapper[4933]: I0122 07:30:02.071347 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4" podStartSLOduration=2.071326516 podStartE2EDuration="2.071326516s" podCreationTimestamp="2026-01-22 07:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:30:02.067017301 +0000 UTC m=+6249.904142664" watchObservedRunningTime="2026-01-22 07:30:02.071326516 +0000 UTC m=+6249.908451869" Jan 22 07:30:02 crc kubenswrapper[4933]: I0122 07:30:02.090690 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-2vflz" podStartSLOduration=2.259941476 podStartE2EDuration="10.090671387s" podCreationTimestamp="2026-01-22 07:29:52 +0000 UTC" firstStartedPulling="2026-01-22 07:29:53.391614249 +0000 UTC m=+6241.228739602" lastFinishedPulling="2026-01-22 07:30:01.22234412 +0000 UTC m=+6249.059469513" observedRunningTime="2026-01-22 07:30:02.080326126 +0000 UTC m=+6249.917451499" watchObservedRunningTime="2026-01-22 07:30:02.090671387 +0000 UTC m=+6249.927796740" Jan 22 07:30:03 crc kubenswrapper[4933]: I0122 07:30:03.047027 4933 generic.go:334] "Generic (PLEG): container finished" podID="58d48b6b-6a4a-4ab6-866c-251ea91606b4" containerID="d3b501225a3eb635dd64b971b927905a74c467676a62a38ede5d5e5bba6e04ed" exitCode=0 Jan 22 07:30:03 crc kubenswrapper[4933]: I0122 07:30:03.047066 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4" event={"ID":"58d48b6b-6a4a-4ab6-866c-251ea91606b4","Type":"ContainerDied","Data":"d3b501225a3eb635dd64b971b927905a74c467676a62a38ede5d5e5bba6e04ed"} Jan 22 07:30:04 crc kubenswrapper[4933]: I0122 07:30:04.057920 4933 generic.go:334] "Generic (PLEG): container finished" podID="a9a2014a-9cf1-447f-ba46-0bd389003bf7" containerID="f34b904e1b86a2167f4c6c0644d56c72e7f6b7cdc82d89991ec84ae1f04aa5c6" exitCode=0 Jan 22 07:30:04 crc kubenswrapper[4933]: I0122 07:30:04.058257 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-2vflz" event={"ID":"a9a2014a-9cf1-447f-ba46-0bd389003bf7","Type":"ContainerDied","Data":"f34b904e1b86a2167f4c6c0644d56c72e7f6b7cdc82d89991ec84ae1f04aa5c6"} Jan 22 07:30:04 crc kubenswrapper[4933]: I0122 07:30:04.409527 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4" Jan 22 07:30:04 crc kubenswrapper[4933]: I0122 07:30:04.515416 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phl74\" (UniqueName: \"kubernetes.io/projected/58d48b6b-6a4a-4ab6-866c-251ea91606b4-kube-api-access-phl74\") pod \"58d48b6b-6a4a-4ab6-866c-251ea91606b4\" (UID: \"58d48b6b-6a4a-4ab6-866c-251ea91606b4\") " Jan 22 07:30:04 crc kubenswrapper[4933]: I0122 07:30:04.515483 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/58d48b6b-6a4a-4ab6-866c-251ea91606b4-config-volume\") pod \"58d48b6b-6a4a-4ab6-866c-251ea91606b4\" (UID: \"58d48b6b-6a4a-4ab6-866c-251ea91606b4\") " Jan 22 07:30:04 crc kubenswrapper[4933]: I0122 07:30:04.515644 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/58d48b6b-6a4a-4ab6-866c-251ea91606b4-secret-volume\") pod \"58d48b6b-6a4a-4ab6-866c-251ea91606b4\" (UID: \"58d48b6b-6a4a-4ab6-866c-251ea91606b4\") " Jan 22 07:30:04 crc kubenswrapper[4933]: I0122 07:30:04.517965 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58d48b6b-6a4a-4ab6-866c-251ea91606b4-config-volume" (OuterVolumeSpecName: "config-volume") pod "58d48b6b-6a4a-4ab6-866c-251ea91606b4" (UID: "58d48b6b-6a4a-4ab6-866c-251ea91606b4"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:30:04 crc kubenswrapper[4933]: I0122 07:30:04.521774 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58d48b6b-6a4a-4ab6-866c-251ea91606b4-kube-api-access-phl74" (OuterVolumeSpecName: "kube-api-access-phl74") pod "58d48b6b-6a4a-4ab6-866c-251ea91606b4" (UID: "58d48b6b-6a4a-4ab6-866c-251ea91606b4"). InnerVolumeSpecName "kube-api-access-phl74". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:30:04 crc kubenswrapper[4933]: I0122 07:30:04.522175 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58d48b6b-6a4a-4ab6-866c-251ea91606b4-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "58d48b6b-6a4a-4ab6-866c-251ea91606b4" (UID: "58d48b6b-6a4a-4ab6-866c-251ea91606b4"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:04 crc kubenswrapper[4933]: I0122 07:30:04.621333 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phl74\" (UniqueName: \"kubernetes.io/projected/58d48b6b-6a4a-4ab6-866c-251ea91606b4-kube-api-access-phl74\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:04 crc kubenswrapper[4933]: I0122 07:30:04.621420 4933 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/58d48b6b-6a4a-4ab6-866c-251ea91606b4-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:04 crc kubenswrapper[4933]: I0122 07:30:04.621447 4933 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/58d48b6b-6a4a-4ab6-866c-251ea91606b4-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:04 crc kubenswrapper[4933]: I0122 07:30:04.872191 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt"] Jan 22 07:30:04 crc kubenswrapper[4933]: I0122 07:30:04.884886 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484405-bvspt"] Jan 22 07:30:05 crc kubenswrapper[4933]: I0122 07:30:05.070202 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4" Jan 22 07:30:05 crc kubenswrapper[4933]: I0122 07:30:05.070317 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4" event={"ID":"58d48b6b-6a4a-4ab6-866c-251ea91606b4","Type":"ContainerDied","Data":"9e60840e51f0af82fce76021783c88c82a1daab52910080cdbaee68af1eb8fcb"} Jan 22 07:30:05 crc kubenswrapper[4933]: I0122 07:30:05.073349 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9e60840e51f0af82fce76021783c88c82a1daab52910080cdbaee68af1eb8fcb" Jan 22 07:30:05 crc kubenswrapper[4933]: I0122 07:30:05.497161 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-2vflz" Jan 22 07:30:05 crc kubenswrapper[4933]: I0122 07:30:05.657023 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9a2014a-9cf1-447f-ba46-0bd389003bf7-config-data\") pod \"a9a2014a-9cf1-447f-ba46-0bd389003bf7\" (UID: \"a9a2014a-9cf1-447f-ba46-0bd389003bf7\") " Jan 22 07:30:05 crc kubenswrapper[4933]: I0122 07:30:05.657200 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45gw8\" (UniqueName: \"kubernetes.io/projected/a9a2014a-9cf1-447f-ba46-0bd389003bf7-kube-api-access-45gw8\") pod \"a9a2014a-9cf1-447f-ba46-0bd389003bf7\" (UID: \"a9a2014a-9cf1-447f-ba46-0bd389003bf7\") " Jan 22 07:30:05 crc kubenswrapper[4933]: I0122 07:30:05.657328 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9a2014a-9cf1-447f-ba46-0bd389003bf7-combined-ca-bundle\") pod \"a9a2014a-9cf1-447f-ba46-0bd389003bf7\" (UID: \"a9a2014a-9cf1-447f-ba46-0bd389003bf7\") " Jan 22 07:30:05 crc kubenswrapper[4933]: I0122 07:30:05.661878 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9a2014a-9cf1-447f-ba46-0bd389003bf7-kube-api-access-45gw8" (OuterVolumeSpecName: "kube-api-access-45gw8") pod "a9a2014a-9cf1-447f-ba46-0bd389003bf7" (UID: "a9a2014a-9cf1-447f-ba46-0bd389003bf7"). InnerVolumeSpecName "kube-api-access-45gw8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:30:05 crc kubenswrapper[4933]: I0122 07:30:05.683190 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9a2014a-9cf1-447f-ba46-0bd389003bf7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a9a2014a-9cf1-447f-ba46-0bd389003bf7" (UID: "a9a2014a-9cf1-447f-ba46-0bd389003bf7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:05 crc kubenswrapper[4933]: I0122 07:30:05.759737 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9a2014a-9cf1-447f-ba46-0bd389003bf7-config-data" (OuterVolumeSpecName: "config-data") pod "a9a2014a-9cf1-447f-ba46-0bd389003bf7" (UID: "a9a2014a-9cf1-447f-ba46-0bd389003bf7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:05 crc kubenswrapper[4933]: I0122 07:30:05.760436 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9a2014a-9cf1-447f-ba46-0bd389003bf7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:05 crc kubenswrapper[4933]: I0122 07:30:05.760467 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9a2014a-9cf1-447f-ba46-0bd389003bf7-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:05 crc kubenswrapper[4933]: I0122 07:30:05.760479 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45gw8\" (UniqueName: \"kubernetes.io/projected/a9a2014a-9cf1-447f-ba46-0bd389003bf7-kube-api-access-45gw8\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:05 crc kubenswrapper[4933]: I0122 07:30:05.965163 4933 scope.go:117] "RemoveContainer" containerID="012d065a57804f23df9a5ceb4e2d90f446ec846602b84adb74e9c66dfbabc23b" Jan 22 07:30:05 crc kubenswrapper[4933]: I0122 07:30:05.987232 4933 scope.go:117] "RemoveContainer" containerID="5b73a1e83346b1af58e93d754aad46f652bd9a279b47cb32e2c19b8e6d8f9c17" Jan 22 07:30:06 crc kubenswrapper[4933]: I0122 07:30:06.018431 4933 scope.go:117] "RemoveContainer" containerID="375fda8391f5b73ad69840756f1f1e095578a60dc7d58d1370d16cb7acb33b37" Jan 22 07:30:06 crc kubenswrapper[4933]: I0122 07:30:06.093820 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-2vflz" event={"ID":"a9a2014a-9cf1-447f-ba46-0bd389003bf7","Type":"ContainerDied","Data":"21edec93cee5efd21277f47c2eba95d1c6a838c29eb308d36187dac2f2fdf8c1"} Jan 22 07:30:06 crc kubenswrapper[4933]: I0122 07:30:06.093896 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21edec93cee5efd21277f47c2eba95d1c6a838c29eb308d36187dac2f2fdf8c1" Jan 22 07:30:06 crc kubenswrapper[4933]: I0122 07:30:06.093854 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-2vflz" Jan 22 07:30:06 crc kubenswrapper[4933]: I0122 07:30:06.504862 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cc8f64f-7f52-4255-b849-b2bad1f4dd09" path="/var/lib/kubelet/pods/8cc8f64f-7f52-4255-b849-b2bad1f4dd09/volumes" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.631453 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-6c58df6f69-mk9r8"] Jan 22 07:30:07 crc kubenswrapper[4933]: E0122 07:30:07.632270 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9a2014a-9cf1-447f-ba46-0bd389003bf7" containerName="heat-db-sync" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.632289 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9a2014a-9cf1-447f-ba46-0bd389003bf7" containerName="heat-db-sync" Jan 22 07:30:07 crc kubenswrapper[4933]: E0122 07:30:07.632316 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58d48b6b-6a4a-4ab6-866c-251ea91606b4" containerName="collect-profiles" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.632324 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="58d48b6b-6a4a-4ab6-866c-251ea91606b4" containerName="collect-profiles" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.632554 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9a2014a-9cf1-447f-ba46-0bd389003bf7" containerName="heat-db-sync" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.632584 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="58d48b6b-6a4a-4ab6-866c-251ea91606b4" containerName="collect-profiles" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.637287 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6c58df6f69-mk9r8" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.645490 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-wmmwz" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.648117 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.662910 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-6c58df6f69-mk9r8"] Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.664250 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.698849 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjm2s\" (UniqueName: \"kubernetes.io/projected/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-kube-api-access-cjm2s\") pod \"heat-engine-6c58df6f69-mk9r8\" (UID: \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\") " pod="openstack/heat-engine-6c58df6f69-mk9r8" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.699006 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-combined-ca-bundle\") pod \"heat-engine-6c58df6f69-mk9r8\" (UID: \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\") " pod="openstack/heat-engine-6c58df6f69-mk9r8" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.699067 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-config-data-custom\") pod \"heat-engine-6c58df6f69-mk9r8\" (UID: \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\") " pod="openstack/heat-engine-6c58df6f69-mk9r8" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.699148 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-config-data\") pod \"heat-engine-6c58df6f69-mk9r8\" (UID: \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\") " pod="openstack/heat-engine-6c58df6f69-mk9r8" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.744991 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-6f9f966dff-f4v2x"] Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.760455 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.765994 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-6f9f966dff-f4v2x"] Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.769647 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.790137 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-5496d89d78-ntrvh"] Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.795617 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5496d89d78-ntrvh" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.800293 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.801746 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-combined-ca-bundle\") pod \"heat-engine-6c58df6f69-mk9r8\" (UID: \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\") " pod="openstack/heat-engine-6c58df6f69-mk9r8" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.801822 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-config-data-custom\") pod \"heat-engine-6c58df6f69-mk9r8\" (UID: \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\") " pod="openstack/heat-engine-6c58df6f69-mk9r8" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.808084 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-config-data\") pod \"heat-engine-6c58df6f69-mk9r8\" (UID: \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\") " pod="openstack/heat-engine-6c58df6f69-mk9r8" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.808134 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjm2s\" (UniqueName: \"kubernetes.io/projected/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-kube-api-access-cjm2s\") pod \"heat-engine-6c58df6f69-mk9r8\" (UID: \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\") " pod="openstack/heat-engine-6c58df6f69-mk9r8" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.816008 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-config-data-custom\") pod \"heat-engine-6c58df6f69-mk9r8\" (UID: \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\") " pod="openstack/heat-engine-6c58df6f69-mk9r8" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.819785 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-combined-ca-bundle\") pod \"heat-engine-6c58df6f69-mk9r8\" (UID: \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\") " pod="openstack/heat-engine-6c58df6f69-mk9r8" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.825136 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5496d89d78-ntrvh"] Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.827303 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-config-data\") pod \"heat-engine-6c58df6f69-mk9r8\" (UID: \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\") " pod="openstack/heat-engine-6c58df6f69-mk9r8" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.835612 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjm2s\" (UniqueName: \"kubernetes.io/projected/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-kube-api-access-cjm2s\") pod \"heat-engine-6c58df6f69-mk9r8\" (UID: \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\") " pod="openstack/heat-engine-6c58df6f69-mk9r8" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.910306 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-config-data\") pod \"heat-cfnapi-6f9f966dff-f4v2x\" (UID: \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\") " pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.910386 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-combined-ca-bundle\") pod \"heat-cfnapi-6f9f966dff-f4v2x\" (UID: \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\") " pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.910426 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a95daff6-8bb9-4963-bfdd-52c7b8050f24-combined-ca-bundle\") pod \"heat-api-5496d89d78-ntrvh\" (UID: \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\") " pod="openstack/heat-api-5496d89d78-ntrvh" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.910504 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9j2zh\" (UniqueName: \"kubernetes.io/projected/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-kube-api-access-9j2zh\") pod \"heat-cfnapi-6f9f966dff-f4v2x\" (UID: \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\") " pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.910538 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fln4n\" (UniqueName: \"kubernetes.io/projected/a95daff6-8bb9-4963-bfdd-52c7b8050f24-kube-api-access-fln4n\") pod \"heat-api-5496d89d78-ntrvh\" (UID: \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\") " pod="openstack/heat-api-5496d89d78-ntrvh" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.910588 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a95daff6-8bb9-4963-bfdd-52c7b8050f24-config-data-custom\") pod \"heat-api-5496d89d78-ntrvh\" (UID: \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\") " pod="openstack/heat-api-5496d89d78-ntrvh" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.910617 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a95daff6-8bb9-4963-bfdd-52c7b8050f24-config-data\") pod \"heat-api-5496d89d78-ntrvh\" (UID: \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\") " pod="openstack/heat-api-5496d89d78-ntrvh" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.910673 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-config-data-custom\") pod \"heat-cfnapi-6f9f966dff-f4v2x\" (UID: \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\") " pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" Jan 22 07:30:07 crc kubenswrapper[4933]: I0122 07:30:07.971858 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6c58df6f69-mk9r8" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.011986 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-config-data\") pod \"heat-cfnapi-6f9f966dff-f4v2x\" (UID: \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\") " pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.012047 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-combined-ca-bundle\") pod \"heat-cfnapi-6f9f966dff-f4v2x\" (UID: \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\") " pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.012105 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a95daff6-8bb9-4963-bfdd-52c7b8050f24-combined-ca-bundle\") pod \"heat-api-5496d89d78-ntrvh\" (UID: \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\") " pod="openstack/heat-api-5496d89d78-ntrvh" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.012165 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9j2zh\" (UniqueName: \"kubernetes.io/projected/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-kube-api-access-9j2zh\") pod \"heat-cfnapi-6f9f966dff-f4v2x\" (UID: \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\") " pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.012189 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fln4n\" (UniqueName: \"kubernetes.io/projected/a95daff6-8bb9-4963-bfdd-52c7b8050f24-kube-api-access-fln4n\") pod \"heat-api-5496d89d78-ntrvh\" (UID: \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\") " pod="openstack/heat-api-5496d89d78-ntrvh" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.012263 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a95daff6-8bb9-4963-bfdd-52c7b8050f24-config-data-custom\") pod \"heat-api-5496d89d78-ntrvh\" (UID: \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\") " pod="openstack/heat-api-5496d89d78-ntrvh" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.012285 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a95daff6-8bb9-4963-bfdd-52c7b8050f24-config-data\") pod \"heat-api-5496d89d78-ntrvh\" (UID: \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\") " pod="openstack/heat-api-5496d89d78-ntrvh" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.012318 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-config-data-custom\") pod \"heat-cfnapi-6f9f966dff-f4v2x\" (UID: \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\") " pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.019198 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-config-data-custom\") pod \"heat-cfnapi-6f9f966dff-f4v2x\" (UID: \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\") " pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.021891 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a95daff6-8bb9-4963-bfdd-52c7b8050f24-config-data-custom\") pod \"heat-api-5496d89d78-ntrvh\" (UID: \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\") " pod="openstack/heat-api-5496d89d78-ntrvh" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.022514 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a95daff6-8bb9-4963-bfdd-52c7b8050f24-combined-ca-bundle\") pod \"heat-api-5496d89d78-ntrvh\" (UID: \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\") " pod="openstack/heat-api-5496d89d78-ntrvh" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.023178 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-combined-ca-bundle\") pod \"heat-cfnapi-6f9f966dff-f4v2x\" (UID: \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\") " pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.023321 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a95daff6-8bb9-4963-bfdd-52c7b8050f24-config-data\") pod \"heat-api-5496d89d78-ntrvh\" (UID: \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\") " pod="openstack/heat-api-5496d89d78-ntrvh" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.024401 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-config-data\") pod \"heat-cfnapi-6f9f966dff-f4v2x\" (UID: \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\") " pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.032732 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9j2zh\" (UniqueName: \"kubernetes.io/projected/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-kube-api-access-9j2zh\") pod \"heat-cfnapi-6f9f966dff-f4v2x\" (UID: \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\") " pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.033278 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fln4n\" (UniqueName: \"kubernetes.io/projected/a95daff6-8bb9-4963-bfdd-52c7b8050f24-kube-api-access-fln4n\") pod \"heat-api-5496d89d78-ntrvh\" (UID: \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\") " pod="openstack/heat-api-5496d89d78-ntrvh" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.113572 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.164659 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.225694 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5496d89d78-ntrvh" Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.513909 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-6c58df6f69-mk9r8"] Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.725882 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-6f9f966dff-f4v2x"] Jan 22 07:30:08 crc kubenswrapper[4933]: I0122 07:30:08.819272 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5496d89d78-ntrvh"] Jan 22 07:30:08 crc kubenswrapper[4933]: W0122 07:30:08.824878 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda95daff6_8bb9_4963_bfdd_52c7b8050f24.slice/crio-dd7e545d20ab121b7f0fa7286da11547198b3ba0c8770a779178315acdf826d0 WatchSource:0}: Error finding container dd7e545d20ab121b7f0fa7286da11547198b3ba0c8770a779178315acdf826d0: Status 404 returned error can't find the container with id dd7e545d20ab121b7f0fa7286da11547198b3ba0c8770a779178315acdf826d0 Jan 22 07:30:09 crc kubenswrapper[4933]: I0122 07:30:09.133773 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" event={"ID":"1a40190a-d7c0-42cf-86b0-10eb2ff37d24","Type":"ContainerStarted","Data":"1286c8f12b5e1586422132332cd4affeb79e948e476669573a28b5edfbc1f080"} Jan 22 07:30:09 crc kubenswrapper[4933]: I0122 07:30:09.135868 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6c58df6f69-mk9r8" event={"ID":"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c","Type":"ContainerStarted","Data":"588e27176aa729aee553f8d7df5223a01c3896d81e28650696d64cc1cb0b9a7c"} Jan 22 07:30:09 crc kubenswrapper[4933]: I0122 07:30:09.135898 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6c58df6f69-mk9r8" event={"ID":"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c","Type":"ContainerStarted","Data":"eb45099d2caec69e462616d40390e24e8a26d6e143486c42031c48e37b48c4f6"} Jan 22 07:30:09 crc kubenswrapper[4933]: I0122 07:30:09.137410 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-6c58df6f69-mk9r8" Jan 22 07:30:09 crc kubenswrapper[4933]: I0122 07:30:09.139861 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5496d89d78-ntrvh" event={"ID":"a95daff6-8bb9-4963-bfdd-52c7b8050f24","Type":"ContainerStarted","Data":"dd7e545d20ab121b7f0fa7286da11547198b3ba0c8770a779178315acdf826d0"} Jan 22 07:30:09 crc kubenswrapper[4933]: I0122 07:30:09.163004 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-6c58df6f69-mk9r8" podStartSLOduration=2.162979995 podStartE2EDuration="2.162979995s" podCreationTimestamp="2026-01-22 07:30:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:30:09.157745387 +0000 UTC m=+6256.994870760" watchObservedRunningTime="2026-01-22 07:30:09.162979995 +0000 UTC m=+6257.000105358" Jan 22 07:30:09 crc kubenswrapper[4933]: I0122 07:30:09.957141 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-6d688745d-jhzmc" Jan 22 07:30:10 crc kubenswrapper[4933]: I0122 07:30:10.034931 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7f598858d8-sp7f8"] Jan 22 07:30:10 crc kubenswrapper[4933]: I0122 07:30:10.035572 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7f598858d8-sp7f8" podUID="d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" containerName="horizon-log" containerID="cri-o://3dc911b554e56e3ebc70ae238c7cb0073de5c3341655535625142dd7a09ea6ec" gracePeriod=30 Jan 22 07:30:10 crc kubenswrapper[4933]: I0122 07:30:10.036069 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7f598858d8-sp7f8" podUID="d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" containerName="horizon" containerID="cri-o://3ab4f7e221db08ced43307f7f2b631adf3c32ed4365154344de1494821ad4e48" gracePeriod=30 Jan 22 07:30:13 crc kubenswrapper[4933]: I0122 07:30:13.209578 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5496d89d78-ntrvh" event={"ID":"a95daff6-8bb9-4963-bfdd-52c7b8050f24","Type":"ContainerStarted","Data":"38fcbd10171679c896a7fa0d0d3a6d8e5735eb4df02c387f4a819a63671552bd"} Jan 22 07:30:13 crc kubenswrapper[4933]: I0122 07:30:13.210290 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-5496d89d78-ntrvh" Jan 22 07:30:13 crc kubenswrapper[4933]: I0122 07:30:13.212841 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" event={"ID":"1a40190a-d7c0-42cf-86b0-10eb2ff37d24","Type":"ContainerStarted","Data":"ca56d3a356b6d37b7a37ea0a5c70709948d480bc04088e6b6676728ae701c298"} Jan 22 07:30:13 crc kubenswrapper[4933]: I0122 07:30:13.213273 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" Jan 22 07:30:13 crc kubenswrapper[4933]: I0122 07:30:13.229287 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-5496d89d78-ntrvh" podStartSLOduration=2.646465108 podStartE2EDuration="6.229267387s" podCreationTimestamp="2026-01-22 07:30:07 +0000 UTC" firstStartedPulling="2026-01-22 07:30:08.837737682 +0000 UTC m=+6256.674863035" lastFinishedPulling="2026-01-22 07:30:12.420539961 +0000 UTC m=+6260.257665314" observedRunningTime="2026-01-22 07:30:13.226606662 +0000 UTC m=+6261.063732015" watchObservedRunningTime="2026-01-22 07:30:13.229267387 +0000 UTC m=+6261.066392740" Jan 22 07:30:13 crc kubenswrapper[4933]: I0122 07:30:13.244678 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" podStartSLOduration=2.56932733 podStartE2EDuration="6.244655611s" podCreationTimestamp="2026-01-22 07:30:07 +0000 UTC" firstStartedPulling="2026-01-22 07:30:08.741284705 +0000 UTC m=+6256.578410058" lastFinishedPulling="2026-01-22 07:30:12.416612986 +0000 UTC m=+6260.253738339" observedRunningTime="2026-01-22 07:30:13.239720752 +0000 UTC m=+6261.076846115" watchObservedRunningTime="2026-01-22 07:30:13.244655611 +0000 UTC m=+6261.081780964" Jan 22 07:30:13 crc kubenswrapper[4933]: I0122 07:30:13.387614 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-7f598858d8-sp7f8" podUID="d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.120:8443/dashboard/auth/login/?next=/dashboard/\": read tcp 10.217.0.2:56016->10.217.1.120:8443: read: connection reset by peer" Jan 22 07:30:14 crc kubenswrapper[4933]: I0122 07:30:14.222043 4933 generic.go:334] "Generic (PLEG): container finished" podID="d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" containerID="3ab4f7e221db08ced43307f7f2b631adf3c32ed4365154344de1494821ad4e48" exitCode=0 Jan 22 07:30:14 crc kubenswrapper[4933]: I0122 07:30:14.222109 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f598858d8-sp7f8" event={"ID":"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb","Type":"ContainerDied","Data":"3ab4f7e221db08ced43307f7f2b631adf3c32ed4365154344de1494821ad4e48"} Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.082908 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-577d6f656f-2c5lp"] Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.084448 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-577d6f656f-2c5lp" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.109839 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-7598674657-gtnxh"] Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.111406 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7598674657-gtnxh" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.124695 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-577d6f656f-2c5lp"] Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.134603 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7598674657-gtnxh"] Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.159407 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-5d6bcfd56f-7cl29"] Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.164381 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5d6bcfd56f-7cl29" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.168946 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5d6bcfd56f-7cl29"] Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.173415 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/547965cc-5259-4daa-b4f0-96a52472e48f-config-data\") pod \"heat-cfnapi-7598674657-gtnxh\" (UID: \"547965cc-5259-4daa-b4f0-96a52472e48f\") " pod="openstack/heat-cfnapi-7598674657-gtnxh" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.173464 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shnd7\" (UniqueName: \"kubernetes.io/projected/547965cc-5259-4daa-b4f0-96a52472e48f-kube-api-access-shnd7\") pod \"heat-cfnapi-7598674657-gtnxh\" (UID: \"547965cc-5259-4daa-b4f0-96a52472e48f\") " pod="openstack/heat-cfnapi-7598674657-gtnxh" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.173549 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/547965cc-5259-4daa-b4f0-96a52472e48f-config-data-custom\") pod \"heat-cfnapi-7598674657-gtnxh\" (UID: \"547965cc-5259-4daa-b4f0-96a52472e48f\") " pod="openstack/heat-cfnapi-7598674657-gtnxh" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.173581 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/657490c7-ddc0-439c-bf19-ebffbfc535bb-combined-ca-bundle\") pod \"heat-engine-577d6f656f-2c5lp\" (UID: \"657490c7-ddc0-439c-bf19-ebffbfc535bb\") " pod="openstack/heat-engine-577d6f656f-2c5lp" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.173605 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/547965cc-5259-4daa-b4f0-96a52472e48f-combined-ca-bundle\") pod \"heat-cfnapi-7598674657-gtnxh\" (UID: \"547965cc-5259-4daa-b4f0-96a52472e48f\") " pod="openstack/heat-cfnapi-7598674657-gtnxh" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.173648 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6l6v\" (UniqueName: \"kubernetes.io/projected/657490c7-ddc0-439c-bf19-ebffbfc535bb-kube-api-access-t6l6v\") pod \"heat-engine-577d6f656f-2c5lp\" (UID: \"657490c7-ddc0-439c-bf19-ebffbfc535bb\") " pod="openstack/heat-engine-577d6f656f-2c5lp" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.173702 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/657490c7-ddc0-439c-bf19-ebffbfc535bb-config-data\") pod \"heat-engine-577d6f656f-2c5lp\" (UID: \"657490c7-ddc0-439c-bf19-ebffbfc535bb\") " pod="openstack/heat-engine-577d6f656f-2c5lp" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.173740 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/657490c7-ddc0-439c-bf19-ebffbfc535bb-config-data-custom\") pod \"heat-engine-577d6f656f-2c5lp\" (UID: \"657490c7-ddc0-439c-bf19-ebffbfc535bb\") " pod="openstack/heat-engine-577d6f656f-2c5lp" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.274965 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/547965cc-5259-4daa-b4f0-96a52472e48f-config-data-custom\") pod \"heat-cfnapi-7598674657-gtnxh\" (UID: \"547965cc-5259-4daa-b4f0-96a52472e48f\") " pod="openstack/heat-cfnapi-7598674657-gtnxh" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.275021 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/657490c7-ddc0-439c-bf19-ebffbfc535bb-combined-ca-bundle\") pod \"heat-engine-577d6f656f-2c5lp\" (UID: \"657490c7-ddc0-439c-bf19-ebffbfc535bb\") " pod="openstack/heat-engine-577d6f656f-2c5lp" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.275049 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/547965cc-5259-4daa-b4f0-96a52472e48f-combined-ca-bundle\") pod \"heat-cfnapi-7598674657-gtnxh\" (UID: \"547965cc-5259-4daa-b4f0-96a52472e48f\") " pod="openstack/heat-cfnapi-7598674657-gtnxh" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.275107 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6l6v\" (UniqueName: \"kubernetes.io/projected/657490c7-ddc0-439c-bf19-ebffbfc535bb-kube-api-access-t6l6v\") pod \"heat-engine-577d6f656f-2c5lp\" (UID: \"657490c7-ddc0-439c-bf19-ebffbfc535bb\") " pod="openstack/heat-engine-577d6f656f-2c5lp" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.275157 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24ee8df1-f55e-4d06-8642-c4ac760a512c-config-data\") pod \"heat-api-5d6bcfd56f-7cl29\" (UID: \"24ee8df1-f55e-4d06-8642-c4ac760a512c\") " pod="openstack/heat-api-5d6bcfd56f-7cl29" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.275185 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/657490c7-ddc0-439c-bf19-ebffbfc535bb-config-data\") pod \"heat-engine-577d6f656f-2c5lp\" (UID: \"657490c7-ddc0-439c-bf19-ebffbfc535bb\") " pod="openstack/heat-engine-577d6f656f-2c5lp" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.275212 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24ee8df1-f55e-4d06-8642-c4ac760a512c-combined-ca-bundle\") pod \"heat-api-5d6bcfd56f-7cl29\" (UID: \"24ee8df1-f55e-4d06-8642-c4ac760a512c\") " pod="openstack/heat-api-5d6bcfd56f-7cl29" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.275229 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdnhp\" (UniqueName: \"kubernetes.io/projected/24ee8df1-f55e-4d06-8642-c4ac760a512c-kube-api-access-kdnhp\") pod \"heat-api-5d6bcfd56f-7cl29\" (UID: \"24ee8df1-f55e-4d06-8642-c4ac760a512c\") " pod="openstack/heat-api-5d6bcfd56f-7cl29" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.275250 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/657490c7-ddc0-439c-bf19-ebffbfc535bb-config-data-custom\") pod \"heat-engine-577d6f656f-2c5lp\" (UID: \"657490c7-ddc0-439c-bf19-ebffbfc535bb\") " pod="openstack/heat-engine-577d6f656f-2c5lp" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.275270 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24ee8df1-f55e-4d06-8642-c4ac760a512c-config-data-custom\") pod \"heat-api-5d6bcfd56f-7cl29\" (UID: \"24ee8df1-f55e-4d06-8642-c4ac760a512c\") " pod="openstack/heat-api-5d6bcfd56f-7cl29" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.275289 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/547965cc-5259-4daa-b4f0-96a52472e48f-config-data\") pod \"heat-cfnapi-7598674657-gtnxh\" (UID: \"547965cc-5259-4daa-b4f0-96a52472e48f\") " pod="openstack/heat-cfnapi-7598674657-gtnxh" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.275306 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shnd7\" (UniqueName: \"kubernetes.io/projected/547965cc-5259-4daa-b4f0-96a52472e48f-kube-api-access-shnd7\") pod \"heat-cfnapi-7598674657-gtnxh\" (UID: \"547965cc-5259-4daa-b4f0-96a52472e48f\") " pod="openstack/heat-cfnapi-7598674657-gtnxh" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.284761 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/547965cc-5259-4daa-b4f0-96a52472e48f-config-data\") pod \"heat-cfnapi-7598674657-gtnxh\" (UID: \"547965cc-5259-4daa-b4f0-96a52472e48f\") " pod="openstack/heat-cfnapi-7598674657-gtnxh" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.284893 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/547965cc-5259-4daa-b4f0-96a52472e48f-config-data-custom\") pod \"heat-cfnapi-7598674657-gtnxh\" (UID: \"547965cc-5259-4daa-b4f0-96a52472e48f\") " pod="openstack/heat-cfnapi-7598674657-gtnxh" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.289128 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/657490c7-ddc0-439c-bf19-ebffbfc535bb-config-data\") pod \"heat-engine-577d6f656f-2c5lp\" (UID: \"657490c7-ddc0-439c-bf19-ebffbfc535bb\") " pod="openstack/heat-engine-577d6f656f-2c5lp" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.289678 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/657490c7-ddc0-439c-bf19-ebffbfc535bb-config-data-custom\") pod \"heat-engine-577d6f656f-2c5lp\" (UID: \"657490c7-ddc0-439c-bf19-ebffbfc535bb\") " pod="openstack/heat-engine-577d6f656f-2c5lp" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.294859 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/547965cc-5259-4daa-b4f0-96a52472e48f-combined-ca-bundle\") pod \"heat-cfnapi-7598674657-gtnxh\" (UID: \"547965cc-5259-4daa-b4f0-96a52472e48f\") " pod="openstack/heat-cfnapi-7598674657-gtnxh" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.295819 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/657490c7-ddc0-439c-bf19-ebffbfc535bb-combined-ca-bundle\") pod \"heat-engine-577d6f656f-2c5lp\" (UID: \"657490c7-ddc0-439c-bf19-ebffbfc535bb\") " pod="openstack/heat-engine-577d6f656f-2c5lp" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.296556 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shnd7\" (UniqueName: \"kubernetes.io/projected/547965cc-5259-4daa-b4f0-96a52472e48f-kube-api-access-shnd7\") pod \"heat-cfnapi-7598674657-gtnxh\" (UID: \"547965cc-5259-4daa-b4f0-96a52472e48f\") " pod="openstack/heat-cfnapi-7598674657-gtnxh" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.332153 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6l6v\" (UniqueName: \"kubernetes.io/projected/657490c7-ddc0-439c-bf19-ebffbfc535bb-kube-api-access-t6l6v\") pod \"heat-engine-577d6f656f-2c5lp\" (UID: \"657490c7-ddc0-439c-bf19-ebffbfc535bb\") " pod="openstack/heat-engine-577d6f656f-2c5lp" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.377168 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24ee8df1-f55e-4d06-8642-c4ac760a512c-config-data\") pod \"heat-api-5d6bcfd56f-7cl29\" (UID: \"24ee8df1-f55e-4d06-8642-c4ac760a512c\") " pod="openstack/heat-api-5d6bcfd56f-7cl29" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.377242 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24ee8df1-f55e-4d06-8642-c4ac760a512c-combined-ca-bundle\") pod \"heat-api-5d6bcfd56f-7cl29\" (UID: \"24ee8df1-f55e-4d06-8642-c4ac760a512c\") " pod="openstack/heat-api-5d6bcfd56f-7cl29" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.377265 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdnhp\" (UniqueName: \"kubernetes.io/projected/24ee8df1-f55e-4d06-8642-c4ac760a512c-kube-api-access-kdnhp\") pod \"heat-api-5d6bcfd56f-7cl29\" (UID: \"24ee8df1-f55e-4d06-8642-c4ac760a512c\") " pod="openstack/heat-api-5d6bcfd56f-7cl29" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.377296 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24ee8df1-f55e-4d06-8642-c4ac760a512c-config-data-custom\") pod \"heat-api-5d6bcfd56f-7cl29\" (UID: \"24ee8df1-f55e-4d06-8642-c4ac760a512c\") " pod="openstack/heat-api-5d6bcfd56f-7cl29" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.390359 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24ee8df1-f55e-4d06-8642-c4ac760a512c-config-data\") pod \"heat-api-5d6bcfd56f-7cl29\" (UID: \"24ee8df1-f55e-4d06-8642-c4ac760a512c\") " pod="openstack/heat-api-5d6bcfd56f-7cl29" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.391900 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24ee8df1-f55e-4d06-8642-c4ac760a512c-config-data-custom\") pod \"heat-api-5d6bcfd56f-7cl29\" (UID: \"24ee8df1-f55e-4d06-8642-c4ac760a512c\") " pod="openstack/heat-api-5d6bcfd56f-7cl29" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.399845 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24ee8df1-f55e-4d06-8642-c4ac760a512c-combined-ca-bundle\") pod \"heat-api-5d6bcfd56f-7cl29\" (UID: \"24ee8df1-f55e-4d06-8642-c4ac760a512c\") " pod="openstack/heat-api-5d6bcfd56f-7cl29" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.405566 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-577d6f656f-2c5lp" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.412695 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdnhp\" (UniqueName: \"kubernetes.io/projected/24ee8df1-f55e-4d06-8642-c4ac760a512c-kube-api-access-kdnhp\") pod \"heat-api-5d6bcfd56f-7cl29\" (UID: \"24ee8df1-f55e-4d06-8642-c4ac760a512c\") " pod="openstack/heat-api-5d6bcfd56f-7cl29" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.444799 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7598674657-gtnxh" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.489200 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5d6bcfd56f-7cl29" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.490797 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:30:15 crc kubenswrapper[4933]: E0122 07:30:15.491200 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:30:15 crc kubenswrapper[4933]: I0122 07:30:15.929980 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-577d6f656f-2c5lp"] Jan 22 07:30:15 crc kubenswrapper[4933]: W0122 07:30:15.935710 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod657490c7_ddc0_439c_bf19_ebffbfc535bb.slice/crio-73aa03add1f20e9e74e1bb54df41e08221a764ede4eae844ee1a4e2798e9cf64 WatchSource:0}: Error finding container 73aa03add1f20e9e74e1bb54df41e08221a764ede4eae844ee1a4e2798e9cf64: Status 404 returned error can't find the container with id 73aa03add1f20e9e74e1bb54df41e08221a764ede4eae844ee1a4e2798e9cf64 Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.040445 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7598674657-gtnxh"] Jan 22 07:30:16 crc kubenswrapper[4933]: W0122 07:30:16.051937 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod547965cc_5259_4daa_b4f0_96a52472e48f.slice/crio-b91e2aa06407c47c0460685986fec6901bc663b9039de0c51050f9c1e6069dbe WatchSource:0}: Error finding container b91e2aa06407c47c0460685986fec6901bc663b9039de0c51050f9c1e6069dbe: Status 404 returned error can't find the container with id b91e2aa06407c47c0460685986fec6901bc663b9039de0c51050f9c1e6069dbe Jan 22 07:30:16 crc kubenswrapper[4933]: W0122 07:30:16.140105 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod24ee8df1_f55e_4d06_8642_c4ac760a512c.slice/crio-78e136fe08bd9c6338df631b62b69e539ba9e1fca461709268293b0d5a80796c WatchSource:0}: Error finding container 78e136fe08bd9c6338df631b62b69e539ba9e1fca461709268293b0d5a80796c: Status 404 returned error can't find the container with id 78e136fe08bd9c6338df631b62b69e539ba9e1fca461709268293b0d5a80796c Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.142471 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5d6bcfd56f-7cl29"] Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.256387 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-577d6f656f-2c5lp" event={"ID":"657490c7-ddc0-439c-bf19-ebffbfc535bb","Type":"ContainerStarted","Data":"73aa03add1f20e9e74e1bb54df41e08221a764ede4eae844ee1a4e2798e9cf64"} Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.258152 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5d6bcfd56f-7cl29" event={"ID":"24ee8df1-f55e-4d06-8642-c4ac760a512c","Type":"ContainerStarted","Data":"78e136fe08bd9c6338df631b62b69e539ba9e1fca461709268293b0d5a80796c"} Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.260196 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7598674657-gtnxh" event={"ID":"547965cc-5259-4daa-b4f0-96a52472e48f","Type":"ContainerStarted","Data":"b91e2aa06407c47c0460685986fec6901bc663b9039de0c51050f9c1e6069dbe"} Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.630298 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5496d89d78-ntrvh"] Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.630502 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-5496d89d78-ntrvh" podUID="a95daff6-8bb9-4963-bfdd-52c7b8050f24" containerName="heat-api" containerID="cri-o://38fcbd10171679c896a7fa0d0d3a6d8e5735eb4df02c387f4a819a63671552bd" gracePeriod=60 Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.645505 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-6f9f966dff-f4v2x"] Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.645703 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" podUID="1a40190a-d7c0-42cf-86b0-10eb2ff37d24" containerName="heat-cfnapi" containerID="cri-o://ca56d3a356b6d37b7a37ea0a5c70709948d480bc04088e6b6676728ae701c298" gracePeriod=60 Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.697912 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-5649c5cc46-vf4xq"] Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.699742 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.701789 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.706546 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-76899c7dc6-c4kht"] Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.707060 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.709132 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.720685 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.720923 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.769106 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-76899c7dc6-c4kht"] Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.797158 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5649c5cc46-vf4xq"] Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.831021 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8524beec-3d96-43ec-9a41-0ce2a961831a-combined-ca-bundle\") pod \"heat-cfnapi-5649c5cc46-vf4xq\" (UID: \"8524beec-3d96-43ec-9a41-0ce2a961831a\") " pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.831160 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ae1c552-787d-4702-bbe7-2019f8f35738-public-tls-certs\") pod \"heat-api-76899c7dc6-c4kht\" (UID: \"0ae1c552-787d-4702-bbe7-2019f8f35738\") " pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.831196 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8524beec-3d96-43ec-9a41-0ce2a961831a-internal-tls-certs\") pod \"heat-cfnapi-5649c5cc46-vf4xq\" (UID: \"8524beec-3d96-43ec-9a41-0ce2a961831a\") " pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.831266 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvg9t\" (UniqueName: \"kubernetes.io/projected/0ae1c552-787d-4702-bbe7-2019f8f35738-kube-api-access-dvg9t\") pod \"heat-api-76899c7dc6-c4kht\" (UID: \"0ae1c552-787d-4702-bbe7-2019f8f35738\") " pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.831335 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ae1c552-787d-4702-bbe7-2019f8f35738-combined-ca-bundle\") pod \"heat-api-76899c7dc6-c4kht\" (UID: \"0ae1c552-787d-4702-bbe7-2019f8f35738\") " pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.831396 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8524beec-3d96-43ec-9a41-0ce2a961831a-public-tls-certs\") pod \"heat-cfnapi-5649c5cc46-vf4xq\" (UID: \"8524beec-3d96-43ec-9a41-0ce2a961831a\") " pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.831505 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8524beec-3d96-43ec-9a41-0ce2a961831a-config-data\") pod \"heat-cfnapi-5649c5cc46-vf4xq\" (UID: \"8524beec-3d96-43ec-9a41-0ce2a961831a\") " pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.831618 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8524beec-3d96-43ec-9a41-0ce2a961831a-config-data-custom\") pod \"heat-cfnapi-5649c5cc46-vf4xq\" (UID: \"8524beec-3d96-43ec-9a41-0ce2a961831a\") " pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.831644 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ae1c552-787d-4702-bbe7-2019f8f35738-internal-tls-certs\") pod \"heat-api-76899c7dc6-c4kht\" (UID: \"0ae1c552-787d-4702-bbe7-2019f8f35738\") " pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.831743 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7qf8\" (UniqueName: \"kubernetes.io/projected/8524beec-3d96-43ec-9a41-0ce2a961831a-kube-api-access-x7qf8\") pod \"heat-cfnapi-5649c5cc46-vf4xq\" (UID: \"8524beec-3d96-43ec-9a41-0ce2a961831a\") " pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.831804 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0ae1c552-787d-4702-bbe7-2019f8f35738-config-data-custom\") pod \"heat-api-76899c7dc6-c4kht\" (UID: \"0ae1c552-787d-4702-bbe7-2019f8f35738\") " pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.831818 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ae1c552-787d-4702-bbe7-2019f8f35738-config-data\") pod \"heat-api-76899c7dc6-c4kht\" (UID: \"0ae1c552-787d-4702-bbe7-2019f8f35738\") " pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.933672 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8524beec-3d96-43ec-9a41-0ce2a961831a-config-data-custom\") pod \"heat-cfnapi-5649c5cc46-vf4xq\" (UID: \"8524beec-3d96-43ec-9a41-0ce2a961831a\") " pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.933721 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ae1c552-787d-4702-bbe7-2019f8f35738-internal-tls-certs\") pod \"heat-api-76899c7dc6-c4kht\" (UID: \"0ae1c552-787d-4702-bbe7-2019f8f35738\") " pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.933772 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7qf8\" (UniqueName: \"kubernetes.io/projected/8524beec-3d96-43ec-9a41-0ce2a961831a-kube-api-access-x7qf8\") pod \"heat-cfnapi-5649c5cc46-vf4xq\" (UID: \"8524beec-3d96-43ec-9a41-0ce2a961831a\") " pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.933813 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0ae1c552-787d-4702-bbe7-2019f8f35738-config-data-custom\") pod \"heat-api-76899c7dc6-c4kht\" (UID: \"0ae1c552-787d-4702-bbe7-2019f8f35738\") " pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.933830 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ae1c552-787d-4702-bbe7-2019f8f35738-config-data\") pod \"heat-api-76899c7dc6-c4kht\" (UID: \"0ae1c552-787d-4702-bbe7-2019f8f35738\") " pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.933881 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8524beec-3d96-43ec-9a41-0ce2a961831a-combined-ca-bundle\") pod \"heat-cfnapi-5649c5cc46-vf4xq\" (UID: \"8524beec-3d96-43ec-9a41-0ce2a961831a\") " pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.933899 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ae1c552-787d-4702-bbe7-2019f8f35738-public-tls-certs\") pod \"heat-api-76899c7dc6-c4kht\" (UID: \"0ae1c552-787d-4702-bbe7-2019f8f35738\") " pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.933916 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8524beec-3d96-43ec-9a41-0ce2a961831a-internal-tls-certs\") pod \"heat-cfnapi-5649c5cc46-vf4xq\" (UID: \"8524beec-3d96-43ec-9a41-0ce2a961831a\") " pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.933972 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvg9t\" (UniqueName: \"kubernetes.io/projected/0ae1c552-787d-4702-bbe7-2019f8f35738-kube-api-access-dvg9t\") pod \"heat-api-76899c7dc6-c4kht\" (UID: \"0ae1c552-787d-4702-bbe7-2019f8f35738\") " pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.933988 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ae1c552-787d-4702-bbe7-2019f8f35738-combined-ca-bundle\") pod \"heat-api-76899c7dc6-c4kht\" (UID: \"0ae1c552-787d-4702-bbe7-2019f8f35738\") " pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.934014 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8524beec-3d96-43ec-9a41-0ce2a961831a-public-tls-certs\") pod \"heat-cfnapi-5649c5cc46-vf4xq\" (UID: \"8524beec-3d96-43ec-9a41-0ce2a961831a\") " pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.934062 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8524beec-3d96-43ec-9a41-0ce2a961831a-config-data\") pod \"heat-cfnapi-5649c5cc46-vf4xq\" (UID: \"8524beec-3d96-43ec-9a41-0ce2a961831a\") " pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.951660 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0ae1c552-787d-4702-bbe7-2019f8f35738-config-data-custom\") pod \"heat-api-76899c7dc6-c4kht\" (UID: \"0ae1c552-787d-4702-bbe7-2019f8f35738\") " pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.953271 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8524beec-3d96-43ec-9a41-0ce2a961831a-config-data\") pod \"heat-cfnapi-5649c5cc46-vf4xq\" (UID: \"8524beec-3d96-43ec-9a41-0ce2a961831a\") " pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.954690 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ae1c552-787d-4702-bbe7-2019f8f35738-internal-tls-certs\") pod \"heat-api-76899c7dc6-c4kht\" (UID: \"0ae1c552-787d-4702-bbe7-2019f8f35738\") " pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.958288 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8524beec-3d96-43ec-9a41-0ce2a961831a-config-data-custom\") pod \"heat-cfnapi-5649c5cc46-vf4xq\" (UID: \"8524beec-3d96-43ec-9a41-0ce2a961831a\") " pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.958937 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8524beec-3d96-43ec-9a41-0ce2a961831a-internal-tls-certs\") pod \"heat-cfnapi-5649c5cc46-vf4xq\" (UID: \"8524beec-3d96-43ec-9a41-0ce2a961831a\") " pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.959967 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ae1c552-787d-4702-bbe7-2019f8f35738-combined-ca-bundle\") pod \"heat-api-76899c7dc6-c4kht\" (UID: \"0ae1c552-787d-4702-bbe7-2019f8f35738\") " pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.960921 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvg9t\" (UniqueName: \"kubernetes.io/projected/0ae1c552-787d-4702-bbe7-2019f8f35738-kube-api-access-dvg9t\") pod \"heat-api-76899c7dc6-c4kht\" (UID: \"0ae1c552-787d-4702-bbe7-2019f8f35738\") " pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.965015 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7qf8\" (UniqueName: \"kubernetes.io/projected/8524beec-3d96-43ec-9a41-0ce2a961831a-kube-api-access-x7qf8\") pod \"heat-cfnapi-5649c5cc46-vf4xq\" (UID: \"8524beec-3d96-43ec-9a41-0ce2a961831a\") " pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.966394 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8524beec-3d96-43ec-9a41-0ce2a961831a-combined-ca-bundle\") pod \"heat-cfnapi-5649c5cc46-vf4xq\" (UID: \"8524beec-3d96-43ec-9a41-0ce2a961831a\") " pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.968115 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0ae1c552-787d-4702-bbe7-2019f8f35738-public-tls-certs\") pod \"heat-api-76899c7dc6-c4kht\" (UID: \"0ae1c552-787d-4702-bbe7-2019f8f35738\") " pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.968772 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8524beec-3d96-43ec-9a41-0ce2a961831a-public-tls-certs\") pod \"heat-cfnapi-5649c5cc46-vf4xq\" (UID: \"8524beec-3d96-43ec-9a41-0ce2a961831a\") " pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:16 crc kubenswrapper[4933]: I0122 07:30:16.969204 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ae1c552-787d-4702-bbe7-2019f8f35738-config-data\") pod \"heat-api-76899c7dc6-c4kht\" (UID: \"0ae1c552-787d-4702-bbe7-2019f8f35738\") " pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.033228 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.049477 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.361250 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-577d6f656f-2c5lp" event={"ID":"657490c7-ddc0-439c-bf19-ebffbfc535bb","Type":"ContainerStarted","Data":"86258ecd68c51b7ad9c8fc1d67c59ab7cfd15b8207fab50827044c9bc8014a07"} Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.361613 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-577d6f656f-2c5lp" Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.367849 4933 generic.go:334] "Generic (PLEG): container finished" podID="24ee8df1-f55e-4d06-8642-c4ac760a512c" containerID="fd2a32bf58862bc3d31b499855568490b97d38764458c0002d3650824e783edb" exitCode=1 Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.368053 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5d6bcfd56f-7cl29" event={"ID":"24ee8df1-f55e-4d06-8642-c4ac760a512c","Type":"ContainerDied","Data":"fd2a32bf58862bc3d31b499855568490b97d38764458c0002d3650824e783edb"} Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.368428 4933 scope.go:117] "RemoveContainer" containerID="fd2a32bf58862bc3d31b499855568490b97d38764458c0002d3650824e783edb" Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.392195 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-577d6f656f-2c5lp" podStartSLOduration=2.3921791040000002 podStartE2EDuration="2.392179104s" podCreationTimestamp="2026-01-22 07:30:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:30:17.384142638 +0000 UTC m=+6265.221267991" watchObservedRunningTime="2026-01-22 07:30:17.392179104 +0000 UTC m=+6265.229304457" Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.400128 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" event={"ID":"1a40190a-d7c0-42cf-86b0-10eb2ff37d24","Type":"ContainerDied","Data":"ca56d3a356b6d37b7a37ea0a5c70709948d480bc04088e6b6676728ae701c298"} Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.400904 4933 generic.go:334] "Generic (PLEG): container finished" podID="1a40190a-d7c0-42cf-86b0-10eb2ff37d24" containerID="ca56d3a356b6d37b7a37ea0a5c70709948d480bc04088e6b6676728ae701c298" exitCode=0 Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.424948 4933 generic.go:334] "Generic (PLEG): container finished" podID="547965cc-5259-4daa-b4f0-96a52472e48f" containerID="691d85cac1e188d688e8fcacc320c0e7f9f56c20877aaa617a155e8084278802" exitCode=1 Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.424994 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7598674657-gtnxh" event={"ID":"547965cc-5259-4daa-b4f0-96a52472e48f","Type":"ContainerDied","Data":"691d85cac1e188d688e8fcacc320c0e7f9f56c20877aaa617a155e8084278802"} Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.425645 4933 scope.go:117] "RemoveContainer" containerID="691d85cac1e188d688e8fcacc320c0e7f9f56c20877aaa617a155e8084278802" Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.685413 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.706445 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5649c5cc46-vf4xq"] Jan 22 07:30:17 crc kubenswrapper[4933]: W0122 07:30:17.736644 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8524beec_3d96_43ec_9a41_0ce2a961831a.slice/crio-b4ee4d71a277760d754986599be04c86aceee19d7569a8f97e03bb4a45442fea WatchSource:0}: Error finding container b4ee4d71a277760d754986599be04c86aceee19d7569a8f97e03bb4a45442fea: Status 404 returned error can't find the container with id b4ee4d71a277760d754986599be04c86aceee19d7569a8f97e03bb4a45442fea Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.764085 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9j2zh\" (UniqueName: \"kubernetes.io/projected/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-kube-api-access-9j2zh\") pod \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\" (UID: \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\") " Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.764133 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-config-data\") pod \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\" (UID: \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\") " Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.764154 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-config-data-custom\") pod \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\" (UID: \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\") " Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.764207 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-combined-ca-bundle\") pod \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\" (UID: \"1a40190a-d7c0-42cf-86b0-10eb2ff37d24\") " Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.779167 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-kube-api-access-9j2zh" (OuterVolumeSpecName: "kube-api-access-9j2zh") pod "1a40190a-d7c0-42cf-86b0-10eb2ff37d24" (UID: "1a40190a-d7c0-42cf-86b0-10eb2ff37d24"). InnerVolumeSpecName "kube-api-access-9j2zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.781572 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1a40190a-d7c0-42cf-86b0-10eb2ff37d24" (UID: "1a40190a-d7c0-42cf-86b0-10eb2ff37d24"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.827121 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-76899c7dc6-c4kht"] Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.867402 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9j2zh\" (UniqueName: \"kubernetes.io/projected/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-kube-api-access-9j2zh\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:17 crc kubenswrapper[4933]: I0122 07:30:17.867445 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:17.993021 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1a40190a-d7c0-42cf-86b0-10eb2ff37d24" (UID: "1a40190a-d7c0-42cf-86b0-10eb2ff37d24"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.052306 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-config-data" (OuterVolumeSpecName: "config-data") pod "1a40190a-d7c0-42cf-86b0-10eb2ff37d24" (UID: "1a40190a-d7c0-42cf-86b0-10eb2ff37d24"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.072087 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.072115 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a40190a-d7c0-42cf-86b0-10eb2ff37d24-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.134737 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5496d89d78-ntrvh" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.275534 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a95daff6-8bb9-4963-bfdd-52c7b8050f24-config-data-custom\") pod \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\" (UID: \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\") " Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.275687 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fln4n\" (UniqueName: \"kubernetes.io/projected/a95daff6-8bb9-4963-bfdd-52c7b8050f24-kube-api-access-fln4n\") pod \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\" (UID: \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\") " Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.275824 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a95daff6-8bb9-4963-bfdd-52c7b8050f24-config-data\") pod \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\" (UID: \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\") " Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.276065 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a95daff6-8bb9-4963-bfdd-52c7b8050f24-combined-ca-bundle\") pod \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\" (UID: \"a95daff6-8bb9-4963-bfdd-52c7b8050f24\") " Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.282482 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a95daff6-8bb9-4963-bfdd-52c7b8050f24-kube-api-access-fln4n" (OuterVolumeSpecName: "kube-api-access-fln4n") pod "a95daff6-8bb9-4963-bfdd-52c7b8050f24" (UID: "a95daff6-8bb9-4963-bfdd-52c7b8050f24"). InnerVolumeSpecName "kube-api-access-fln4n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.283936 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a95daff6-8bb9-4963-bfdd-52c7b8050f24-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a95daff6-8bb9-4963-bfdd-52c7b8050f24" (UID: "a95daff6-8bb9-4963-bfdd-52c7b8050f24"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.317772 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a95daff6-8bb9-4963-bfdd-52c7b8050f24-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a95daff6-8bb9-4963-bfdd-52c7b8050f24" (UID: "a95daff6-8bb9-4963-bfdd-52c7b8050f24"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.352248 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a95daff6-8bb9-4963-bfdd-52c7b8050f24-config-data" (OuterVolumeSpecName: "config-data") pod "a95daff6-8bb9-4963-bfdd-52c7b8050f24" (UID: "a95daff6-8bb9-4963-bfdd-52c7b8050f24"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.378868 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a95daff6-8bb9-4963-bfdd-52c7b8050f24-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.378897 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fln4n\" (UniqueName: \"kubernetes.io/projected/a95daff6-8bb9-4963-bfdd-52c7b8050f24-kube-api-access-fln4n\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.378907 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a95daff6-8bb9-4963-bfdd-52c7b8050f24-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.378915 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a95daff6-8bb9-4963-bfdd-52c7b8050f24-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.449212 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-76899c7dc6-c4kht" event={"ID":"0ae1c552-787d-4702-bbe7-2019f8f35738","Type":"ContainerStarted","Data":"52eb9602d8bb6be469715d1721be15a4b8ccc98a1d4c926029a85a9cad09736f"} Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.453312 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7598674657-gtnxh" event={"ID":"547965cc-5259-4daa-b4f0-96a52472e48f","Type":"ContainerStarted","Data":"3988486d694467a8893f40c6317266afc708893ce85786c97cff1d3b494887ab"} Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.453389 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7598674657-gtnxh" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.456127 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" event={"ID":"8524beec-3d96-43ec-9a41-0ce2a961831a","Type":"ContainerStarted","Data":"28e125f07f18d3a21911c776ce12ade28b98ea2a8fec297aa32d98b829cb40ed"} Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.456181 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" event={"ID":"8524beec-3d96-43ec-9a41-0ce2a961831a","Type":"ContainerStarted","Data":"b4ee4d71a277760d754986599be04c86aceee19d7569a8f97e03bb4a45442fea"} Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.456709 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.491995 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-7598674657-gtnxh" podStartSLOduration=3.491972594 podStartE2EDuration="3.491972594s" podCreationTimestamp="2026-01-22 07:30:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:30:18.47705336 +0000 UTC m=+6266.314178713" watchObservedRunningTime="2026-01-22 07:30:18.491972594 +0000 UTC m=+6266.329097957" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.493324 4933 generic.go:334] "Generic (PLEG): container finished" podID="a95daff6-8bb9-4963-bfdd-52c7b8050f24" containerID="38fcbd10171679c896a7fa0d0d3a6d8e5735eb4df02c387f4a819a63671552bd" exitCode=0 Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.493415 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5496d89d78-ntrvh" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.505363 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.509357 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5496d89d78-ntrvh" event={"ID":"a95daff6-8bb9-4963-bfdd-52c7b8050f24","Type":"ContainerDied","Data":"38fcbd10171679c896a7fa0d0d3a6d8e5735eb4df02c387f4a819a63671552bd"} Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.509389 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-5d6bcfd56f-7cl29" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.509402 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5496d89d78-ntrvh" event={"ID":"a95daff6-8bb9-4963-bfdd-52c7b8050f24","Type":"ContainerDied","Data":"dd7e545d20ab121b7f0fa7286da11547198b3ba0c8770a779178315acdf826d0"} Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.509413 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5d6bcfd56f-7cl29" event={"ID":"24ee8df1-f55e-4d06-8642-c4ac760a512c","Type":"ContainerStarted","Data":"00fe2a03f26c035ad2e95bc1172eb1b099c89a8be464c13a3462dbbb6e6fdb6d"} Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.509422 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-6f9f966dff-f4v2x" event={"ID":"1a40190a-d7c0-42cf-86b0-10eb2ff37d24","Type":"ContainerDied","Data":"1286c8f12b5e1586422132332cd4affeb79e948e476669573a28b5edfbc1f080"} Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.510829 4933 scope.go:117] "RemoveContainer" containerID="38fcbd10171679c896a7fa0d0d3a6d8e5735eb4df02c387f4a819a63671552bd" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.522357 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" podStartSLOduration=2.522339024 podStartE2EDuration="2.522339024s" podCreationTimestamp="2026-01-22 07:30:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:30:18.499633901 +0000 UTC m=+6266.336759254" watchObservedRunningTime="2026-01-22 07:30:18.522339024 +0000 UTC m=+6266.359464377" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.543390 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-5d6bcfd56f-7cl29" podStartSLOduration=3.543372667 podStartE2EDuration="3.543372667s" podCreationTimestamp="2026-01-22 07:30:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:30:18.524053956 +0000 UTC m=+6266.361179319" watchObservedRunningTime="2026-01-22 07:30:18.543372667 +0000 UTC m=+6266.380498020" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.609550 4933 scope.go:117] "RemoveContainer" containerID="38fcbd10171679c896a7fa0d0d3a6d8e5735eb4df02c387f4a819a63671552bd" Jan 22 07:30:19 crc kubenswrapper[4933]: E0122 07:30:18.610185 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38fcbd10171679c896a7fa0d0d3a6d8e5735eb4df02c387f4a819a63671552bd\": container with ID starting with 38fcbd10171679c896a7fa0d0d3a6d8e5735eb4df02c387f4a819a63671552bd not found: ID does not exist" containerID="38fcbd10171679c896a7fa0d0d3a6d8e5735eb4df02c387f4a819a63671552bd" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.610210 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38fcbd10171679c896a7fa0d0d3a6d8e5735eb4df02c387f4a819a63671552bd"} err="failed to get container status \"38fcbd10171679c896a7fa0d0d3a6d8e5735eb4df02c387f4a819a63671552bd\": rpc error: code = NotFound desc = could not find container \"38fcbd10171679c896a7fa0d0d3a6d8e5735eb4df02c387f4a819a63671552bd\": container with ID starting with 38fcbd10171679c896a7fa0d0d3a6d8e5735eb4df02c387f4a819a63671552bd not found: ID does not exist" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.610228 4933 scope.go:117] "RemoveContainer" containerID="ca56d3a356b6d37b7a37ea0a5c70709948d480bc04088e6b6676728ae701c298" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.615853 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-6f9f966dff-f4v2x"] Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.629030 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-6f9f966dff-f4v2x"] Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.644448 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5496d89d78-ntrvh"] Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:18.656179 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-5496d89d78-ntrvh"] Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:19.515167 4933 generic.go:334] "Generic (PLEG): container finished" podID="24ee8df1-f55e-4d06-8642-c4ac760a512c" containerID="00fe2a03f26c035ad2e95bc1172eb1b099c89a8be464c13a3462dbbb6e6fdb6d" exitCode=1 Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:19.515270 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5d6bcfd56f-7cl29" event={"ID":"24ee8df1-f55e-4d06-8642-c4ac760a512c","Type":"ContainerDied","Data":"00fe2a03f26c035ad2e95bc1172eb1b099c89a8be464c13a3462dbbb6e6fdb6d"} Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:19.515329 4933 scope.go:117] "RemoveContainer" containerID="fd2a32bf58862bc3d31b499855568490b97d38764458c0002d3650824e783edb" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:19.515900 4933 scope.go:117] "RemoveContainer" containerID="00fe2a03f26c035ad2e95bc1172eb1b099c89a8be464c13a3462dbbb6e6fdb6d" Jan 22 07:30:19 crc kubenswrapper[4933]: E0122 07:30:19.516153 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-5d6bcfd56f-7cl29_openstack(24ee8df1-f55e-4d06-8642-c4ac760a512c)\"" pod="openstack/heat-api-5d6bcfd56f-7cl29" podUID="24ee8df1-f55e-4d06-8642-c4ac760a512c" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:19.524953 4933 generic.go:334] "Generic (PLEG): container finished" podID="547965cc-5259-4daa-b4f0-96a52472e48f" containerID="3988486d694467a8893f40c6317266afc708893ce85786c97cff1d3b494887ab" exitCode=1 Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:19.524999 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7598674657-gtnxh" event={"ID":"547965cc-5259-4daa-b4f0-96a52472e48f","Type":"ContainerDied","Data":"3988486d694467a8893f40c6317266afc708893ce85786c97cff1d3b494887ab"} Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:19.525694 4933 scope.go:117] "RemoveContainer" containerID="3988486d694467a8893f40c6317266afc708893ce85786c97cff1d3b494887ab" Jan 22 07:30:19 crc kubenswrapper[4933]: E0122 07:30:19.525995 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-7598674657-gtnxh_openstack(547965cc-5259-4daa-b4f0-96a52472e48f)\"" pod="openstack/heat-cfnapi-7598674657-gtnxh" podUID="547965cc-5259-4daa-b4f0-96a52472e48f" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:19.526634 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-76899c7dc6-c4kht" event={"ID":"0ae1c552-787d-4702-bbe7-2019f8f35738","Type":"ContainerStarted","Data":"224c24dee3aa44c5e441c37a9a8e766f3e98bd47275f01bf8a927f3380d1e003"} Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:19.526781 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:19.559704 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-76899c7dc6-c4kht" podStartSLOduration=3.559682213 podStartE2EDuration="3.559682213s" podCreationTimestamp="2026-01-22 07:30:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:30:19.55425988 +0000 UTC m=+6267.391385233" watchObservedRunningTime="2026-01-22 07:30:19.559682213 +0000 UTC m=+6267.396807566" Jan 22 07:30:19 crc kubenswrapper[4933]: I0122 07:30:19.650910 4933 scope.go:117] "RemoveContainer" containerID="691d85cac1e188d688e8fcacc320c0e7f9f56c20877aaa617a155e8084278802" Jan 22 07:30:20 crc kubenswrapper[4933]: I0122 07:30:20.446436 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-7598674657-gtnxh" Jan 22 07:30:20 crc kubenswrapper[4933]: I0122 07:30:20.504364 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a40190a-d7c0-42cf-86b0-10eb2ff37d24" path="/var/lib/kubelet/pods/1a40190a-d7c0-42cf-86b0-10eb2ff37d24/volumes" Jan 22 07:30:20 crc kubenswrapper[4933]: I0122 07:30:20.504921 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a95daff6-8bb9-4963-bfdd-52c7b8050f24" path="/var/lib/kubelet/pods/a95daff6-8bb9-4963-bfdd-52c7b8050f24/volumes" Jan 22 07:30:20 crc kubenswrapper[4933]: I0122 07:30:20.505453 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-5d6bcfd56f-7cl29" Jan 22 07:30:20 crc kubenswrapper[4933]: I0122 07:30:20.540807 4933 scope.go:117] "RemoveContainer" containerID="00fe2a03f26c035ad2e95bc1172eb1b099c89a8be464c13a3462dbbb6e6fdb6d" Jan 22 07:30:20 crc kubenswrapper[4933]: E0122 07:30:20.541202 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-5d6bcfd56f-7cl29_openstack(24ee8df1-f55e-4d06-8642-c4ac760a512c)\"" pod="openstack/heat-api-5d6bcfd56f-7cl29" podUID="24ee8df1-f55e-4d06-8642-c4ac760a512c" Jan 22 07:30:20 crc kubenswrapper[4933]: I0122 07:30:20.542464 4933 scope.go:117] "RemoveContainer" containerID="3988486d694467a8893f40c6317266afc708893ce85786c97cff1d3b494887ab" Jan 22 07:30:20 crc kubenswrapper[4933]: E0122 07:30:20.542683 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-7598674657-gtnxh_openstack(547965cc-5259-4daa-b4f0-96a52472e48f)\"" pod="openstack/heat-cfnapi-7598674657-gtnxh" podUID="547965cc-5259-4daa-b4f0-96a52472e48f" Jan 22 07:30:21 crc kubenswrapper[4933]: I0122 07:30:21.551953 4933 scope.go:117] "RemoveContainer" containerID="00fe2a03f26c035ad2e95bc1172eb1b099c89a8be464c13a3462dbbb6e6fdb6d" Jan 22 07:30:21 crc kubenswrapper[4933]: I0122 07:30:21.552475 4933 scope.go:117] "RemoveContainer" containerID="3988486d694467a8893f40c6317266afc708893ce85786c97cff1d3b494887ab" Jan 22 07:30:21 crc kubenswrapper[4933]: E0122 07:30:21.552827 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-7598674657-gtnxh_openstack(547965cc-5259-4daa-b4f0-96a52472e48f)\"" pod="openstack/heat-cfnapi-7598674657-gtnxh" podUID="547965cc-5259-4daa-b4f0-96a52472e48f" Jan 22 07:30:21 crc kubenswrapper[4933]: E0122 07:30:21.552843 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-5d6bcfd56f-7cl29_openstack(24ee8df1-f55e-4d06-8642-c4ac760a512c)\"" pod="openstack/heat-api-5d6bcfd56f-7cl29" podUID="24ee8df1-f55e-4d06-8642-c4ac760a512c" Jan 22 07:30:21 crc kubenswrapper[4933]: I0122 07:30:21.724694 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-7f598858d8-sp7f8" podUID="d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.120:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.120:8443: connect: connection refused" Jan 22 07:30:27 crc kubenswrapper[4933]: I0122 07:30:27.491127 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:30:27 crc kubenswrapper[4933]: E0122 07:30:27.492064 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:30:28 crc kubenswrapper[4933]: I0122 07:30:28.001338 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-6c58df6f69-mk9r8" Jan 22 07:30:28 crc kubenswrapper[4933]: I0122 07:30:28.370766 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-76899c7dc6-c4kht" Jan 22 07:30:28 crc kubenswrapper[4933]: I0122 07:30:28.412410 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-5649c5cc46-vf4xq" Jan 22 07:30:28 crc kubenswrapper[4933]: I0122 07:30:28.432561 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5d6bcfd56f-7cl29"] Jan 22 07:30:28 crc kubenswrapper[4933]: I0122 07:30:28.524120 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7598674657-gtnxh"] Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.016412 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5d6bcfd56f-7cl29" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.023430 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7598674657-gtnxh" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.141740 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24ee8df1-f55e-4d06-8642-c4ac760a512c-config-data\") pod \"24ee8df1-f55e-4d06-8642-c4ac760a512c\" (UID: \"24ee8df1-f55e-4d06-8642-c4ac760a512c\") " Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.141882 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24ee8df1-f55e-4d06-8642-c4ac760a512c-config-data-custom\") pod \"24ee8df1-f55e-4d06-8642-c4ac760a512c\" (UID: \"24ee8df1-f55e-4d06-8642-c4ac760a512c\") " Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.141915 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/547965cc-5259-4daa-b4f0-96a52472e48f-config-data-custom\") pod \"547965cc-5259-4daa-b4f0-96a52472e48f\" (UID: \"547965cc-5259-4daa-b4f0-96a52472e48f\") " Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.142147 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kdnhp\" (UniqueName: \"kubernetes.io/projected/24ee8df1-f55e-4d06-8642-c4ac760a512c-kube-api-access-kdnhp\") pod \"24ee8df1-f55e-4d06-8642-c4ac760a512c\" (UID: \"24ee8df1-f55e-4d06-8642-c4ac760a512c\") " Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.142249 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-shnd7\" (UniqueName: \"kubernetes.io/projected/547965cc-5259-4daa-b4f0-96a52472e48f-kube-api-access-shnd7\") pod \"547965cc-5259-4daa-b4f0-96a52472e48f\" (UID: \"547965cc-5259-4daa-b4f0-96a52472e48f\") " Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.142309 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/547965cc-5259-4daa-b4f0-96a52472e48f-config-data\") pod \"547965cc-5259-4daa-b4f0-96a52472e48f\" (UID: \"547965cc-5259-4daa-b4f0-96a52472e48f\") " Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.142342 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24ee8df1-f55e-4d06-8642-c4ac760a512c-combined-ca-bundle\") pod \"24ee8df1-f55e-4d06-8642-c4ac760a512c\" (UID: \"24ee8df1-f55e-4d06-8642-c4ac760a512c\") " Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.142379 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/547965cc-5259-4daa-b4f0-96a52472e48f-combined-ca-bundle\") pod \"547965cc-5259-4daa-b4f0-96a52472e48f\" (UID: \"547965cc-5259-4daa-b4f0-96a52472e48f\") " Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.154961 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24ee8df1-f55e-4d06-8642-c4ac760a512c-kube-api-access-kdnhp" (OuterVolumeSpecName: "kube-api-access-kdnhp") pod "24ee8df1-f55e-4d06-8642-c4ac760a512c" (UID: "24ee8df1-f55e-4d06-8642-c4ac760a512c"). InnerVolumeSpecName "kube-api-access-kdnhp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.159371 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/547965cc-5259-4daa-b4f0-96a52472e48f-kube-api-access-shnd7" (OuterVolumeSpecName: "kube-api-access-shnd7") pod "547965cc-5259-4daa-b4f0-96a52472e48f" (UID: "547965cc-5259-4daa-b4f0-96a52472e48f"). InnerVolumeSpecName "kube-api-access-shnd7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.172330 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/547965cc-5259-4daa-b4f0-96a52472e48f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "547965cc-5259-4daa-b4f0-96a52472e48f" (UID: "547965cc-5259-4daa-b4f0-96a52472e48f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.196251 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24ee8df1-f55e-4d06-8642-c4ac760a512c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "24ee8df1-f55e-4d06-8642-c4ac760a512c" (UID: "24ee8df1-f55e-4d06-8642-c4ac760a512c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.247764 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24ee8df1-f55e-4d06-8642-c4ac760a512c-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.247803 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/547965cc-5259-4daa-b4f0-96a52472e48f-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.247816 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kdnhp\" (UniqueName: \"kubernetes.io/projected/24ee8df1-f55e-4d06-8642-c4ac760a512c-kube-api-access-kdnhp\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.247826 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-shnd7\" (UniqueName: \"kubernetes.io/projected/547965cc-5259-4daa-b4f0-96a52472e48f-kube-api-access-shnd7\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.254957 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24ee8df1-f55e-4d06-8642-c4ac760a512c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "24ee8df1-f55e-4d06-8642-c4ac760a512c" (UID: "24ee8df1-f55e-4d06-8642-c4ac760a512c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.272465 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/547965cc-5259-4daa-b4f0-96a52472e48f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "547965cc-5259-4daa-b4f0-96a52472e48f" (UID: "547965cc-5259-4daa-b4f0-96a52472e48f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.293768 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24ee8df1-f55e-4d06-8642-c4ac760a512c-config-data" (OuterVolumeSpecName: "config-data") pod "24ee8df1-f55e-4d06-8642-c4ac760a512c" (UID: "24ee8df1-f55e-4d06-8642-c4ac760a512c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.294269 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/547965cc-5259-4daa-b4f0-96a52472e48f-config-data" (OuterVolumeSpecName: "config-data") pod "547965cc-5259-4daa-b4f0-96a52472e48f" (UID: "547965cc-5259-4daa-b4f0-96a52472e48f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.350288 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/547965cc-5259-4daa-b4f0-96a52472e48f-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.350326 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24ee8df1-f55e-4d06-8642-c4ac760a512c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.350341 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/547965cc-5259-4daa-b4f0-96a52472e48f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.350350 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24ee8df1-f55e-4d06-8642-c4ac760a512c-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.634218 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5d6bcfd56f-7cl29" event={"ID":"24ee8df1-f55e-4d06-8642-c4ac760a512c","Type":"ContainerDied","Data":"78e136fe08bd9c6338df631b62b69e539ba9e1fca461709268293b0d5a80796c"} Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.635542 4933 scope.go:117] "RemoveContainer" containerID="00fe2a03f26c035ad2e95bc1172eb1b099c89a8be464c13a3462dbbb6e6fdb6d" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.635838 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5d6bcfd56f-7cl29" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.643544 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7598674657-gtnxh" event={"ID":"547965cc-5259-4daa-b4f0-96a52472e48f","Type":"ContainerDied","Data":"b91e2aa06407c47c0460685986fec6901bc663b9039de0c51050f9c1e6069dbe"} Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.643605 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7598674657-gtnxh" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.669582 4933 scope.go:117] "RemoveContainer" containerID="3988486d694467a8893f40c6317266afc708893ce85786c97cff1d3b494887ab" Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.692672 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7598674657-gtnxh"] Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.719924 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-7598674657-gtnxh"] Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.738200 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5d6bcfd56f-7cl29"] Jan 22 07:30:29 crc kubenswrapper[4933]: I0122 07:30:29.750427 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-5d6bcfd56f-7cl29"] Jan 22 07:30:30 crc kubenswrapper[4933]: I0122 07:30:30.501210 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24ee8df1-f55e-4d06-8642-c4ac760a512c" path="/var/lib/kubelet/pods/24ee8df1-f55e-4d06-8642-c4ac760a512c/volumes" Jan 22 07:30:30 crc kubenswrapper[4933]: I0122 07:30:30.502160 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="547965cc-5259-4daa-b4f0-96a52472e48f" path="/var/lib/kubelet/pods/547965cc-5259-4daa-b4f0-96a52472e48f/volumes" Jan 22 07:30:31 crc kubenswrapper[4933]: I0122 07:30:31.724352 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-7f598858d8-sp7f8" podUID="d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.120:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.120:8443: connect: connection refused" Jan 22 07:30:31 crc kubenswrapper[4933]: I0122 07:30:31.724777 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:30:35 crc kubenswrapper[4933]: I0122 07:30:35.440790 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-577d6f656f-2c5lp" Jan 22 07:30:35 crc kubenswrapper[4933]: I0122 07:30:35.489938 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-6c58df6f69-mk9r8"] Jan 22 07:30:35 crc kubenswrapper[4933]: I0122 07:30:35.490319 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-6c58df6f69-mk9r8" podUID="29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c" containerName="heat-engine" containerID="cri-o://588e27176aa729aee553f8d7df5223a01c3896d81e28650696d64cc1cb0b9a7c" gracePeriod=60 Jan 22 07:30:37 crc kubenswrapper[4933]: E0122 07:30:37.974898 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="588e27176aa729aee553f8d7df5223a01c3896d81e28650696d64cc1cb0b9a7c" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 22 07:30:37 crc kubenswrapper[4933]: E0122 07:30:37.977308 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="588e27176aa729aee553f8d7df5223a01c3896d81e28650696d64cc1cb0b9a7c" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 22 07:30:37 crc kubenswrapper[4933]: E0122 07:30:37.979026 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="588e27176aa729aee553f8d7df5223a01c3896d81e28650696d64cc1cb0b9a7c" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 22 07:30:37 crc kubenswrapper[4933]: E0122 07:30:37.979064 4933 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-6c58df6f69-mk9r8" podUID="29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c" containerName="heat-engine" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.504158 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.590092 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-logs\") pod \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.590212 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-horizon-secret-key\") pod \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.590279 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-config-data\") pod \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.590322 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-scripts\") pod \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.590361 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-horizon-tls-certs\") pod \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.590378 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-combined-ca-bundle\") pod \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.590544 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-logs" (OuterVolumeSpecName: "logs") pod "d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" (UID: "d5a0747b-cb15-4a5a-bf0d-71de8f9592eb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.591120 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-687mc\" (UniqueName: \"kubernetes.io/projected/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-kube-api-access-687mc\") pod \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\" (UID: \"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb\") " Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.592644 4933 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-logs\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.595859 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-kube-api-access-687mc" (OuterVolumeSpecName: "kube-api-access-687mc") pod "d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" (UID: "d5a0747b-cb15-4a5a-bf0d-71de8f9592eb"). InnerVolumeSpecName "kube-api-access-687mc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.596063 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" (UID: "d5a0747b-cb15-4a5a-bf0d-71de8f9592eb"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.626955 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-config-data" (OuterVolumeSpecName: "config-data") pod "d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" (UID: "d5a0747b-cb15-4a5a-bf0d-71de8f9592eb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.631744 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-scripts" (OuterVolumeSpecName: "scripts") pod "d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" (UID: "d5a0747b-cb15-4a5a-bf0d-71de8f9592eb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.639026 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" (UID: "d5a0747b-cb15-4a5a-bf0d-71de8f9592eb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.652304 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" (UID: "d5a0747b-cb15-4a5a-bf0d-71de8f9592eb"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.694736 4933 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.694777 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.694787 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.694840 4933 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.694849 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.694916 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-687mc\" (UniqueName: \"kubernetes.io/projected/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb-kube-api-access-687mc\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.750857 4933 generic.go:334] "Generic (PLEG): container finished" podID="d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" containerID="3dc911b554e56e3ebc70ae238c7cb0073de5c3341655535625142dd7a09ea6ec" exitCode=137 Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.750974 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f598858d8-sp7f8" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.751000 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f598858d8-sp7f8" event={"ID":"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb","Type":"ContainerDied","Data":"3dc911b554e56e3ebc70ae238c7cb0073de5c3341655535625142dd7a09ea6ec"} Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.751322 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f598858d8-sp7f8" event={"ID":"d5a0747b-cb15-4a5a-bf0d-71de8f9592eb","Type":"ContainerDied","Data":"773ca2e3da87f5254efe29ebdaa596523a49955616818d727fc3c7ff7ed20e44"} Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.751347 4933 scope.go:117] "RemoveContainer" containerID="3ab4f7e221db08ced43307f7f2b631adf3c32ed4365154344de1494821ad4e48" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.789685 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7f598858d8-sp7f8"] Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.802181 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-7f598858d8-sp7f8"] Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.924187 4933 scope.go:117] "RemoveContainer" containerID="3dc911b554e56e3ebc70ae238c7cb0073de5c3341655535625142dd7a09ea6ec" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.944558 4933 scope.go:117] "RemoveContainer" containerID="3ab4f7e221db08ced43307f7f2b631adf3c32ed4365154344de1494821ad4e48" Jan 22 07:30:40 crc kubenswrapper[4933]: E0122 07:30:40.945093 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ab4f7e221db08ced43307f7f2b631adf3c32ed4365154344de1494821ad4e48\": container with ID starting with 3ab4f7e221db08ced43307f7f2b631adf3c32ed4365154344de1494821ad4e48 not found: ID does not exist" containerID="3ab4f7e221db08ced43307f7f2b631adf3c32ed4365154344de1494821ad4e48" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.945134 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ab4f7e221db08ced43307f7f2b631adf3c32ed4365154344de1494821ad4e48"} err="failed to get container status \"3ab4f7e221db08ced43307f7f2b631adf3c32ed4365154344de1494821ad4e48\": rpc error: code = NotFound desc = could not find container \"3ab4f7e221db08ced43307f7f2b631adf3c32ed4365154344de1494821ad4e48\": container with ID starting with 3ab4f7e221db08ced43307f7f2b631adf3c32ed4365154344de1494821ad4e48 not found: ID does not exist" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.945158 4933 scope.go:117] "RemoveContainer" containerID="3dc911b554e56e3ebc70ae238c7cb0073de5c3341655535625142dd7a09ea6ec" Jan 22 07:30:40 crc kubenswrapper[4933]: E0122 07:30:40.945439 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3dc911b554e56e3ebc70ae238c7cb0073de5c3341655535625142dd7a09ea6ec\": container with ID starting with 3dc911b554e56e3ebc70ae238c7cb0073de5c3341655535625142dd7a09ea6ec not found: ID does not exist" containerID="3dc911b554e56e3ebc70ae238c7cb0073de5c3341655535625142dd7a09ea6ec" Jan 22 07:30:40 crc kubenswrapper[4933]: I0122 07:30:40.945463 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3dc911b554e56e3ebc70ae238c7cb0073de5c3341655535625142dd7a09ea6ec"} err="failed to get container status \"3dc911b554e56e3ebc70ae238c7cb0073de5c3341655535625142dd7a09ea6ec\": rpc error: code = NotFound desc = could not find container \"3dc911b554e56e3ebc70ae238c7cb0073de5c3341655535625142dd7a09ea6ec\": container with ID starting with 3dc911b554e56e3ebc70ae238c7cb0073de5c3341655535625142dd7a09ea6ec not found: ID does not exist" Jan 22 07:30:42 crc kubenswrapper[4933]: I0122 07:30:42.497730 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:30:42 crc kubenswrapper[4933]: E0122 07:30:42.498323 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:30:42 crc kubenswrapper[4933]: I0122 07:30:42.502352 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" path="/var/lib/kubelet/pods/d5a0747b-cb15-4a5a-bf0d-71de8f9592eb/volumes" Jan 22 07:30:47 crc kubenswrapper[4933]: E0122 07:30:47.981237 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="588e27176aa729aee553f8d7df5223a01c3896d81e28650696d64cc1cb0b9a7c" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 22 07:30:47 crc kubenswrapper[4933]: E0122 07:30:47.983521 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="588e27176aa729aee553f8d7df5223a01c3896d81e28650696d64cc1cb0b9a7c" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 22 07:30:47 crc kubenswrapper[4933]: E0122 07:30:47.985312 4933 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="588e27176aa729aee553f8d7df5223a01c3896d81e28650696d64cc1cb0b9a7c" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 22 07:30:47 crc kubenswrapper[4933]: E0122 07:30:47.985378 4933 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-6c58df6f69-mk9r8" podUID="29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c" containerName="heat-engine" Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.629507 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6c58df6f69-mk9r8" Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.673430 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cjm2s\" (UniqueName: \"kubernetes.io/projected/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-kube-api-access-cjm2s\") pod \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\" (UID: \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\") " Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.673522 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-combined-ca-bundle\") pod \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\" (UID: \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\") " Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.673726 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-config-data-custom\") pod \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\" (UID: \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\") " Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.673860 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-config-data\") pod \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\" (UID: \"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c\") " Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.681464 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-kube-api-access-cjm2s" (OuterVolumeSpecName: "kube-api-access-cjm2s") pod "29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c" (UID: "29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c"). InnerVolumeSpecName "kube-api-access-cjm2s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.682363 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c" (UID: "29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.708922 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c" (UID: "29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.739492 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-config-data" (OuterVolumeSpecName: "config-data") pod "29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c" (UID: "29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.776115 4933 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.776163 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.776174 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cjm2s\" (UniqueName: \"kubernetes.io/projected/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-kube-api-access-cjm2s\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.776188 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.841304 4933 generic.go:334] "Generic (PLEG): container finished" podID="29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c" containerID="588e27176aa729aee553f8d7df5223a01c3896d81e28650696d64cc1cb0b9a7c" exitCode=0 Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.841347 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6c58df6f69-mk9r8" event={"ID":"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c","Type":"ContainerDied","Data":"588e27176aa729aee553f8d7df5223a01c3896d81e28650696d64cc1cb0b9a7c"} Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.841366 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6c58df6f69-mk9r8" Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.841384 4933 scope.go:117] "RemoveContainer" containerID="588e27176aa729aee553f8d7df5223a01c3896d81e28650696d64cc1cb0b9a7c" Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.841372 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6c58df6f69-mk9r8" event={"ID":"29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c","Type":"ContainerDied","Data":"eb45099d2caec69e462616d40390e24e8a26d6e143486c42031c48e37b48c4f6"} Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.878134 4933 scope.go:117] "RemoveContainer" containerID="588e27176aa729aee553f8d7df5223a01c3896d81e28650696d64cc1cb0b9a7c" Jan 22 07:30:48 crc kubenswrapper[4933]: E0122 07:30:48.878545 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"588e27176aa729aee553f8d7df5223a01c3896d81e28650696d64cc1cb0b9a7c\": container with ID starting with 588e27176aa729aee553f8d7df5223a01c3896d81e28650696d64cc1cb0b9a7c not found: ID does not exist" containerID="588e27176aa729aee553f8d7df5223a01c3896d81e28650696d64cc1cb0b9a7c" Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.878589 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"588e27176aa729aee553f8d7df5223a01c3896d81e28650696d64cc1cb0b9a7c"} err="failed to get container status \"588e27176aa729aee553f8d7df5223a01c3896d81e28650696d64cc1cb0b9a7c\": rpc error: code = NotFound desc = could not find container \"588e27176aa729aee553f8d7df5223a01c3896d81e28650696d64cc1cb0b9a7c\": container with ID starting with 588e27176aa729aee553f8d7df5223a01c3896d81e28650696d64cc1cb0b9a7c not found: ID does not exist" Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.890874 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-6c58df6f69-mk9r8"] Jan 22 07:30:48 crc kubenswrapper[4933]: I0122 07:30:48.902382 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-6c58df6f69-mk9r8"] Jan 22 07:30:50 crc kubenswrapper[4933]: I0122 07:30:50.501407 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c" path="/var/lib/kubelet/pods/29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c/volumes" Jan 22 07:30:57 crc kubenswrapper[4933]: I0122 07:30:57.490890 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:30:57 crc kubenswrapper[4933]: E0122 07:30:57.491624 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:30:59 crc kubenswrapper[4933]: I0122 07:30:59.043316 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-gzkdg"] Jan 22 07:30:59 crc kubenswrapper[4933]: I0122 07:30:59.056603 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-4c14-account-create-update-jprrg"] Jan 22 07:30:59 crc kubenswrapper[4933]: I0122 07:30:59.065246 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-gzkdg"] Jan 22 07:30:59 crc kubenswrapper[4933]: I0122 07:30:59.073159 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-4c14-account-create-update-jprrg"] Jan 22 07:31:00 crc kubenswrapper[4933]: I0122 07:31:00.509062 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9fa99816-1176-4475-ba6d-a910e9008aa1" path="/var/lib/kubelet/pods/9fa99816-1176-4475-ba6d-a910e9008aa1/volumes" Jan 22 07:31:00 crc kubenswrapper[4933]: I0122 07:31:00.510551 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9a64653-d223-46b3-9c35-dd91036c310a" path="/var/lib/kubelet/pods/a9a64653-d223-46b3-9c35-dd91036c310a/volumes" Jan 22 07:31:06 crc kubenswrapper[4933]: I0122 07:31:06.201425 4933 scope.go:117] "RemoveContainer" containerID="d7881c8672504e8aabb87d5ab3f0a9d504b4f1eeff09bf0cd46c45e5d454eb11" Jan 22 07:31:06 crc kubenswrapper[4933]: I0122 07:31:06.291908 4933 scope.go:117] "RemoveContainer" containerID="04e389c31c38bd2ffd009fd698864582de82b945539be83e06ff7b6f5f3b106e" Jan 22 07:31:06 crc kubenswrapper[4933]: I0122 07:31:06.336908 4933 scope.go:117] "RemoveContainer" containerID="92fbc4d1d10495a04e56e0b82627140050fc1c17732f59379de7dd97ecfa4be9" Jan 22 07:31:07 crc kubenswrapper[4933]: I0122 07:31:07.054813 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-5n6cg"] Jan 22 07:31:07 crc kubenswrapper[4933]: I0122 07:31:07.081059 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-5n6cg"] Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.510326 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94de467d-6c82-41bc-bc7c-63cbe5a0aea4" path="/var/lib/kubelet/pods/94de467d-6c82-41bc-bc7c-63cbe5a0aea4/volumes" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.960432 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5"] Jan 22 07:31:08 crc kubenswrapper[4933]: E0122 07:31:08.960860 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24ee8df1-f55e-4d06-8642-c4ac760a512c" containerName="heat-api" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.960881 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="24ee8df1-f55e-4d06-8642-c4ac760a512c" containerName="heat-api" Jan 22 07:31:08 crc kubenswrapper[4933]: E0122 07:31:08.960891 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c" containerName="heat-engine" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.960899 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c" containerName="heat-engine" Jan 22 07:31:08 crc kubenswrapper[4933]: E0122 07:31:08.960914 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a95daff6-8bb9-4963-bfdd-52c7b8050f24" containerName="heat-api" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.960920 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a95daff6-8bb9-4963-bfdd-52c7b8050f24" containerName="heat-api" Jan 22 07:31:08 crc kubenswrapper[4933]: E0122 07:31:08.960928 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="547965cc-5259-4daa-b4f0-96a52472e48f" containerName="heat-cfnapi" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.960934 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="547965cc-5259-4daa-b4f0-96a52472e48f" containerName="heat-cfnapi" Jan 22 07:31:08 crc kubenswrapper[4933]: E0122 07:31:08.960953 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" containerName="horizon-log" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.960959 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" containerName="horizon-log" Jan 22 07:31:08 crc kubenswrapper[4933]: E0122 07:31:08.960977 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a40190a-d7c0-42cf-86b0-10eb2ff37d24" containerName="heat-cfnapi" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.960982 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a40190a-d7c0-42cf-86b0-10eb2ff37d24" containerName="heat-cfnapi" Jan 22 07:31:08 crc kubenswrapper[4933]: E0122 07:31:08.960993 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="547965cc-5259-4daa-b4f0-96a52472e48f" containerName="heat-cfnapi" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.960998 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="547965cc-5259-4daa-b4f0-96a52472e48f" containerName="heat-cfnapi" Jan 22 07:31:08 crc kubenswrapper[4933]: E0122 07:31:08.961009 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" containerName="horizon" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.961014 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" containerName="horizon" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.961257 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a95daff6-8bb9-4963-bfdd-52c7b8050f24" containerName="heat-api" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.961272 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" containerName="horizon-log" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.961280 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a40190a-d7c0-42cf-86b0-10eb2ff37d24" containerName="heat-cfnapi" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.961299 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="24ee8df1-f55e-4d06-8642-c4ac760a512c" containerName="heat-api" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.961309 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="29b19ef8-cef1-492a-8c8b-9bd21ef2dd2c" containerName="heat-engine" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.961321 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="547965cc-5259-4daa-b4f0-96a52472e48f" containerName="heat-cfnapi" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.961331 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5a0747b-cb15-4a5a-bf0d-71de8f9592eb" containerName="horizon" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.961343 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="24ee8df1-f55e-4d06-8642-c4ac760a512c" containerName="heat-api" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.961354 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="547965cc-5259-4daa-b4f0-96a52472e48f" containerName="heat-cfnapi" Jan 22 07:31:08 crc kubenswrapper[4933]: E0122 07:31:08.961517 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24ee8df1-f55e-4d06-8642-c4ac760a512c" containerName="heat-api" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.961525 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="24ee8df1-f55e-4d06-8642-c4ac760a512c" containerName="heat-api" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.962982 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.968874 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 22 07:31:08 crc kubenswrapper[4933]: I0122 07:31:08.972784 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5"] Jan 22 07:31:09 crc kubenswrapper[4933]: I0122 07:31:09.112413 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/627af3d3-01cc-49d7-833f-7a9f5f313302-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5\" (UID: \"627af3d3-01cc-49d7-833f-7a9f5f313302\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5" Jan 22 07:31:09 crc kubenswrapper[4933]: I0122 07:31:09.113019 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/627af3d3-01cc-49d7-833f-7a9f5f313302-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5\" (UID: \"627af3d3-01cc-49d7-833f-7a9f5f313302\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5" Jan 22 07:31:09 crc kubenswrapper[4933]: I0122 07:31:09.113454 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nprjh\" (UniqueName: \"kubernetes.io/projected/627af3d3-01cc-49d7-833f-7a9f5f313302-kube-api-access-nprjh\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5\" (UID: \"627af3d3-01cc-49d7-833f-7a9f5f313302\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5" Jan 22 07:31:09 crc kubenswrapper[4933]: I0122 07:31:09.216065 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/627af3d3-01cc-49d7-833f-7a9f5f313302-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5\" (UID: \"627af3d3-01cc-49d7-833f-7a9f5f313302\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5" Jan 22 07:31:09 crc kubenswrapper[4933]: I0122 07:31:09.216231 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/627af3d3-01cc-49d7-833f-7a9f5f313302-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5\" (UID: \"627af3d3-01cc-49d7-833f-7a9f5f313302\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5" Jan 22 07:31:09 crc kubenswrapper[4933]: I0122 07:31:09.216370 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nprjh\" (UniqueName: \"kubernetes.io/projected/627af3d3-01cc-49d7-833f-7a9f5f313302-kube-api-access-nprjh\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5\" (UID: \"627af3d3-01cc-49d7-833f-7a9f5f313302\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5" Jan 22 07:31:09 crc kubenswrapper[4933]: I0122 07:31:09.216668 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/627af3d3-01cc-49d7-833f-7a9f5f313302-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5\" (UID: \"627af3d3-01cc-49d7-833f-7a9f5f313302\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5" Jan 22 07:31:09 crc kubenswrapper[4933]: I0122 07:31:09.216783 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/627af3d3-01cc-49d7-833f-7a9f5f313302-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5\" (UID: \"627af3d3-01cc-49d7-833f-7a9f5f313302\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5" Jan 22 07:31:09 crc kubenswrapper[4933]: I0122 07:31:09.244857 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nprjh\" (UniqueName: \"kubernetes.io/projected/627af3d3-01cc-49d7-833f-7a9f5f313302-kube-api-access-nprjh\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5\" (UID: \"627af3d3-01cc-49d7-833f-7a9f5f313302\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5" Jan 22 07:31:09 crc kubenswrapper[4933]: I0122 07:31:09.290663 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5" Jan 22 07:31:09 crc kubenswrapper[4933]: I0122 07:31:09.762808 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5"] Jan 22 07:31:09 crc kubenswrapper[4933]: W0122 07:31:09.775441 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod627af3d3_01cc_49d7_833f_7a9f5f313302.slice/crio-79f13d4279ce32d4a5ba635124c2aca792545fdd7cc20cdbba7fbd36b753040d WatchSource:0}: Error finding container 79f13d4279ce32d4a5ba635124c2aca792545fdd7cc20cdbba7fbd36b753040d: Status 404 returned error can't find the container with id 79f13d4279ce32d4a5ba635124c2aca792545fdd7cc20cdbba7fbd36b753040d Jan 22 07:31:10 crc kubenswrapper[4933]: I0122 07:31:10.047416 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5" event={"ID":"627af3d3-01cc-49d7-833f-7a9f5f313302","Type":"ContainerStarted","Data":"abba15764801925e3ee9104e520efd80142f0e17fb5a822a0b1d8289ccd85083"} Jan 22 07:31:10 crc kubenswrapper[4933]: I0122 07:31:10.047792 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5" event={"ID":"627af3d3-01cc-49d7-833f-7a9f5f313302","Type":"ContainerStarted","Data":"79f13d4279ce32d4a5ba635124c2aca792545fdd7cc20cdbba7fbd36b753040d"} Jan 22 07:31:10 crc kubenswrapper[4933]: I0122 07:31:10.491413 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:31:10 crc kubenswrapper[4933]: E0122 07:31:10.491742 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:31:11 crc kubenswrapper[4933]: I0122 07:31:11.060525 4933 generic.go:334] "Generic (PLEG): container finished" podID="627af3d3-01cc-49d7-833f-7a9f5f313302" containerID="abba15764801925e3ee9104e520efd80142f0e17fb5a822a0b1d8289ccd85083" exitCode=0 Jan 22 07:31:11 crc kubenswrapper[4933]: I0122 07:31:11.060642 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5" event={"ID":"627af3d3-01cc-49d7-833f-7a9f5f313302","Type":"ContainerDied","Data":"abba15764801925e3ee9104e520efd80142f0e17fb5a822a0b1d8289ccd85083"} Jan 22 07:31:11 crc kubenswrapper[4933]: I0122 07:31:11.063393 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 07:31:13 crc kubenswrapper[4933]: I0122 07:31:13.083570 4933 generic.go:334] "Generic (PLEG): container finished" podID="627af3d3-01cc-49d7-833f-7a9f5f313302" containerID="1fbfed49f037f72277a94801e6c3a7e99aad16751b8ba3bd1d0e55b1ba2dbf09" exitCode=0 Jan 22 07:31:13 crc kubenswrapper[4933]: I0122 07:31:13.083740 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5" event={"ID":"627af3d3-01cc-49d7-833f-7a9f5f313302","Type":"ContainerDied","Data":"1fbfed49f037f72277a94801e6c3a7e99aad16751b8ba3bd1d0e55b1ba2dbf09"} Jan 22 07:31:14 crc kubenswrapper[4933]: I0122 07:31:14.093756 4933 generic.go:334] "Generic (PLEG): container finished" podID="627af3d3-01cc-49d7-833f-7a9f5f313302" containerID="4274e670877a64c74e801127122630c562cbd1ffd18f73f18bbeada938ebdc0f" exitCode=0 Jan 22 07:31:14 crc kubenswrapper[4933]: I0122 07:31:14.093798 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5" event={"ID":"627af3d3-01cc-49d7-833f-7a9f5f313302","Type":"ContainerDied","Data":"4274e670877a64c74e801127122630c562cbd1ffd18f73f18bbeada938ebdc0f"} Jan 22 07:31:15 crc kubenswrapper[4933]: I0122 07:31:15.505681 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5" Jan 22 07:31:15 crc kubenswrapper[4933]: I0122 07:31:15.644797 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/627af3d3-01cc-49d7-833f-7a9f5f313302-util\") pod \"627af3d3-01cc-49d7-833f-7a9f5f313302\" (UID: \"627af3d3-01cc-49d7-833f-7a9f5f313302\") " Jan 22 07:31:15 crc kubenswrapper[4933]: I0122 07:31:15.645132 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/627af3d3-01cc-49d7-833f-7a9f5f313302-bundle\") pod \"627af3d3-01cc-49d7-833f-7a9f5f313302\" (UID: \"627af3d3-01cc-49d7-833f-7a9f5f313302\") " Jan 22 07:31:15 crc kubenswrapper[4933]: I0122 07:31:15.645201 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nprjh\" (UniqueName: \"kubernetes.io/projected/627af3d3-01cc-49d7-833f-7a9f5f313302-kube-api-access-nprjh\") pod \"627af3d3-01cc-49d7-833f-7a9f5f313302\" (UID: \"627af3d3-01cc-49d7-833f-7a9f5f313302\") " Jan 22 07:31:15 crc kubenswrapper[4933]: I0122 07:31:15.647282 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/627af3d3-01cc-49d7-833f-7a9f5f313302-bundle" (OuterVolumeSpecName: "bundle") pod "627af3d3-01cc-49d7-833f-7a9f5f313302" (UID: "627af3d3-01cc-49d7-833f-7a9f5f313302"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:31:15 crc kubenswrapper[4933]: I0122 07:31:15.653887 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/627af3d3-01cc-49d7-833f-7a9f5f313302-kube-api-access-nprjh" (OuterVolumeSpecName: "kube-api-access-nprjh") pod "627af3d3-01cc-49d7-833f-7a9f5f313302" (UID: "627af3d3-01cc-49d7-833f-7a9f5f313302"). InnerVolumeSpecName "kube-api-access-nprjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:31:15 crc kubenswrapper[4933]: I0122 07:31:15.667021 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/627af3d3-01cc-49d7-833f-7a9f5f313302-util" (OuterVolumeSpecName: "util") pod "627af3d3-01cc-49d7-833f-7a9f5f313302" (UID: "627af3d3-01cc-49d7-833f-7a9f5f313302"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:31:15 crc kubenswrapper[4933]: I0122 07:31:15.748689 4933 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/627af3d3-01cc-49d7-833f-7a9f5f313302-util\") on node \"crc\" DevicePath \"\"" Jan 22 07:31:15 crc kubenswrapper[4933]: I0122 07:31:15.748735 4933 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/627af3d3-01cc-49d7-833f-7a9f5f313302-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:31:15 crc kubenswrapper[4933]: I0122 07:31:15.748745 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nprjh\" (UniqueName: \"kubernetes.io/projected/627af3d3-01cc-49d7-833f-7a9f5f313302-kube-api-access-nprjh\") on node \"crc\" DevicePath \"\"" Jan 22 07:31:16 crc kubenswrapper[4933]: I0122 07:31:16.117744 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5" event={"ID":"627af3d3-01cc-49d7-833f-7a9f5f313302","Type":"ContainerDied","Data":"79f13d4279ce32d4a5ba635124c2aca792545fdd7cc20cdbba7fbd36b753040d"} Jan 22 07:31:16 crc kubenswrapper[4933]: I0122 07:31:16.117821 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="79f13d4279ce32d4a5ba635124c2aca792545fdd7cc20cdbba7fbd36b753040d" Jan 22 07:31:16 crc kubenswrapper[4933]: I0122 07:31:16.117848 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5" Jan 22 07:31:21 crc kubenswrapper[4933]: I0122 07:31:21.491266 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:31:22 crc kubenswrapper[4933]: I0122 07:31:22.197836 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"1b32e660a15a3b4635250c60c37311d6f65b1e17832e99f37c6d9d25bd8e5087"} Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.239548 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-jxw4c"] Jan 22 07:31:28 crc kubenswrapper[4933]: E0122 07:31:28.240485 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="627af3d3-01cc-49d7-833f-7a9f5f313302" containerName="pull" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.240503 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="627af3d3-01cc-49d7-833f-7a9f5f313302" containerName="pull" Jan 22 07:31:28 crc kubenswrapper[4933]: E0122 07:31:28.240532 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="627af3d3-01cc-49d7-833f-7a9f5f313302" containerName="extract" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.240541 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="627af3d3-01cc-49d7-833f-7a9f5f313302" containerName="extract" Jan 22 07:31:28 crc kubenswrapper[4933]: E0122 07:31:28.240560 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="627af3d3-01cc-49d7-833f-7a9f5f313302" containerName="util" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.240567 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="627af3d3-01cc-49d7-833f-7a9f5f313302" containerName="util" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.240843 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="627af3d3-01cc-49d7-833f-7a9f5f313302" containerName="extract" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.241796 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-jxw4c" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.244710 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.244773 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.245218 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-s4w6f" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.257230 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-jxw4c"] Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.351720 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkj87\" (UniqueName: \"kubernetes.io/projected/a79423c3-5348-464f-9866-425625f2a8f1-kube-api-access-nkj87\") pod \"obo-prometheus-operator-68bc856cb9-jxw4c\" (UID: \"a79423c3-5348-464f-9866-425625f2a8f1\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-jxw4c" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.362154 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-jv7mp"] Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.363867 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-jv7mp" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.368287 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-l489f" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.368489 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.370179 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-hzfjv"] Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.375461 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-hzfjv" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.441002 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-jv7mp"] Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.453404 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f4cb4026-e4bf-45f2-9373-71f24b0ecd16-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-697444475f-hzfjv\" (UID: \"f4cb4026-e4bf-45f2-9373-71f24b0ecd16\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-hzfjv" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.453451 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f6675820-e9ac-4e84-8e2f-2971304513ea-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-697444475f-jv7mp\" (UID: \"f6675820-e9ac-4e84-8e2f-2971304513ea\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-jv7mp" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.453497 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkj87\" (UniqueName: \"kubernetes.io/projected/a79423c3-5348-464f-9866-425625f2a8f1-kube-api-access-nkj87\") pod \"obo-prometheus-operator-68bc856cb9-jxw4c\" (UID: \"a79423c3-5348-464f-9866-425625f2a8f1\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-jxw4c" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.453539 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f4cb4026-e4bf-45f2-9373-71f24b0ecd16-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-697444475f-hzfjv\" (UID: \"f4cb4026-e4bf-45f2-9373-71f24b0ecd16\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-hzfjv" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.453617 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f6675820-e9ac-4e84-8e2f-2971304513ea-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-697444475f-jv7mp\" (UID: \"f6675820-e9ac-4e84-8e2f-2971304513ea\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-jv7mp" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.475345 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkj87\" (UniqueName: \"kubernetes.io/projected/a79423c3-5348-464f-9866-425625f2a8f1-kube-api-access-nkj87\") pod \"obo-prometheus-operator-68bc856cb9-jxw4c\" (UID: \"a79423c3-5348-464f-9866-425625f2a8f1\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-jxw4c" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.502038 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-hzfjv"] Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.554629 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-2n7hh"] Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.555718 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f4cb4026-e4bf-45f2-9373-71f24b0ecd16-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-697444475f-hzfjv\" (UID: \"f4cb4026-e4bf-45f2-9373-71f24b0ecd16\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-hzfjv" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.555775 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f6675820-e9ac-4e84-8e2f-2971304513ea-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-697444475f-jv7mp\" (UID: \"f6675820-e9ac-4e84-8e2f-2971304513ea\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-jv7mp" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.555874 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f4cb4026-e4bf-45f2-9373-71f24b0ecd16-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-697444475f-hzfjv\" (UID: \"f4cb4026-e4bf-45f2-9373-71f24b0ecd16\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-hzfjv" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.556135 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f6675820-e9ac-4e84-8e2f-2971304513ea-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-697444475f-jv7mp\" (UID: \"f6675820-e9ac-4e84-8e2f-2971304513ea\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-jv7mp" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.558230 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-2n7hh" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.563423 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f6675820-e9ac-4e84-8e2f-2971304513ea-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-697444475f-jv7mp\" (UID: \"f6675820-e9ac-4e84-8e2f-2971304513ea\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-jv7mp" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.563738 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f4cb4026-e4bf-45f2-9373-71f24b0ecd16-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-697444475f-hzfjv\" (UID: \"f4cb4026-e4bf-45f2-9373-71f24b0ecd16\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-hzfjv" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.564165 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-jxw4c" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.570451 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.570529 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f4cb4026-e4bf-45f2-9373-71f24b0ecd16-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-697444475f-hzfjv\" (UID: \"f4cb4026-e4bf-45f2-9373-71f24b0ecd16\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-hzfjv" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.570576 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-98kkm" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.576070 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f6675820-e9ac-4e84-8e2f-2971304513ea-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-697444475f-jv7mp\" (UID: \"f6675820-e9ac-4e84-8e2f-2971304513ea\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-jv7mp" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.586760 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-2n7hh"] Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.658719 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/1ec7b8e4-e17f-4d56-8f1c-b04751ad3bf8-observability-operator-tls\") pod \"observability-operator-59bdc8b94-2n7hh\" (UID: \"1ec7b8e4-e17f-4d56-8f1c-b04751ad3bf8\") " pod="openshift-operators/observability-operator-59bdc8b94-2n7hh" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.659075 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knblz\" (UniqueName: \"kubernetes.io/projected/1ec7b8e4-e17f-4d56-8f1c-b04751ad3bf8-kube-api-access-knblz\") pod \"observability-operator-59bdc8b94-2n7hh\" (UID: \"1ec7b8e4-e17f-4d56-8f1c-b04751ad3bf8\") " pod="openshift-operators/observability-operator-59bdc8b94-2n7hh" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.695075 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-jv7mp" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.711819 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-hzfjv" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.770024 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/1ec7b8e4-e17f-4d56-8f1c-b04751ad3bf8-observability-operator-tls\") pod \"observability-operator-59bdc8b94-2n7hh\" (UID: \"1ec7b8e4-e17f-4d56-8f1c-b04751ad3bf8\") " pod="openshift-operators/observability-operator-59bdc8b94-2n7hh" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.770371 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knblz\" (UniqueName: \"kubernetes.io/projected/1ec7b8e4-e17f-4d56-8f1c-b04751ad3bf8-kube-api-access-knblz\") pod \"observability-operator-59bdc8b94-2n7hh\" (UID: \"1ec7b8e4-e17f-4d56-8f1c-b04751ad3bf8\") " pod="openshift-operators/observability-operator-59bdc8b94-2n7hh" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.773783 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/1ec7b8e4-e17f-4d56-8f1c-b04751ad3bf8-observability-operator-tls\") pod \"observability-operator-59bdc8b94-2n7hh\" (UID: \"1ec7b8e4-e17f-4d56-8f1c-b04751ad3bf8\") " pod="openshift-operators/observability-operator-59bdc8b94-2n7hh" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.794112 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knblz\" (UniqueName: \"kubernetes.io/projected/1ec7b8e4-e17f-4d56-8f1c-b04751ad3bf8-kube-api-access-knblz\") pod \"observability-operator-59bdc8b94-2n7hh\" (UID: \"1ec7b8e4-e17f-4d56-8f1c-b04751ad3bf8\") " pod="openshift-operators/observability-operator-59bdc8b94-2n7hh" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.809778 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-5b8t8"] Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.811333 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-5b8t8" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.830375 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-wdh82" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.857642 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-5b8t8"] Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.876918 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/cc705ce5-1fcd-4528-b638-c5e1d062c035-openshift-service-ca\") pod \"perses-operator-5bf474d74f-5b8t8\" (UID: \"cc705ce5-1fcd-4528-b638-c5e1d062c035\") " pod="openshift-operators/perses-operator-5bf474d74f-5b8t8" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.877033 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2f5nx\" (UniqueName: \"kubernetes.io/projected/cc705ce5-1fcd-4528-b638-c5e1d062c035-kube-api-access-2f5nx\") pod \"perses-operator-5bf474d74f-5b8t8\" (UID: \"cc705ce5-1fcd-4528-b638-c5e1d062c035\") " pod="openshift-operators/perses-operator-5bf474d74f-5b8t8" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.979790 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/cc705ce5-1fcd-4528-b638-c5e1d062c035-openshift-service-ca\") pod \"perses-operator-5bf474d74f-5b8t8\" (UID: \"cc705ce5-1fcd-4528-b638-c5e1d062c035\") " pod="openshift-operators/perses-operator-5bf474d74f-5b8t8" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.979897 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2f5nx\" (UniqueName: \"kubernetes.io/projected/cc705ce5-1fcd-4528-b638-c5e1d062c035-kube-api-access-2f5nx\") pod \"perses-operator-5bf474d74f-5b8t8\" (UID: \"cc705ce5-1fcd-4528-b638-c5e1d062c035\") " pod="openshift-operators/perses-operator-5bf474d74f-5b8t8" Jan 22 07:31:28 crc kubenswrapper[4933]: I0122 07:31:28.981314 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/cc705ce5-1fcd-4528-b638-c5e1d062c035-openshift-service-ca\") pod \"perses-operator-5bf474d74f-5b8t8\" (UID: \"cc705ce5-1fcd-4528-b638-c5e1d062c035\") " pod="openshift-operators/perses-operator-5bf474d74f-5b8t8" Jan 22 07:31:29 crc kubenswrapper[4933]: I0122 07:31:29.004797 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2f5nx\" (UniqueName: \"kubernetes.io/projected/cc705ce5-1fcd-4528-b638-c5e1d062c035-kube-api-access-2f5nx\") pod \"perses-operator-5bf474d74f-5b8t8\" (UID: \"cc705ce5-1fcd-4528-b638-c5e1d062c035\") " pod="openshift-operators/perses-operator-5bf474d74f-5b8t8" Jan 22 07:31:29 crc kubenswrapper[4933]: I0122 07:31:29.038653 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-2n7hh" Jan 22 07:31:29 crc kubenswrapper[4933]: I0122 07:31:29.178154 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-5b8t8" Jan 22 07:31:29 crc kubenswrapper[4933]: I0122 07:31:29.420967 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-jxw4c"] Jan 22 07:31:29 crc kubenswrapper[4933]: W0122 07:31:29.432096 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf6675820_e9ac_4e84_8e2f_2971304513ea.slice/crio-a27ae3750504dcd2a2f63f1fcb0684639130103536b568dfa607110e02d26cd5 WatchSource:0}: Error finding container a27ae3750504dcd2a2f63f1fcb0684639130103536b568dfa607110e02d26cd5: Status 404 returned error can't find the container with id a27ae3750504dcd2a2f63f1fcb0684639130103536b568dfa607110e02d26cd5 Jan 22 07:31:29 crc kubenswrapper[4933]: I0122 07:31:29.435364 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-jv7mp"] Jan 22 07:31:29 crc kubenswrapper[4933]: I0122 07:31:29.613462 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-hzfjv"] Jan 22 07:31:29 crc kubenswrapper[4933]: I0122 07:31:29.823511 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-2n7hh"] Jan 22 07:31:29 crc kubenswrapper[4933]: I0122 07:31:29.960479 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-5b8t8"] Jan 22 07:31:30 crc kubenswrapper[4933]: I0122 07:31:30.314315 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-jxw4c" event={"ID":"a79423c3-5348-464f-9866-425625f2a8f1","Type":"ContainerStarted","Data":"b155abf6825869212b06158aa2f245405803280ef0f5ea57fb65ab8231bd59f3"} Jan 22 07:31:30 crc kubenswrapper[4933]: I0122 07:31:30.315385 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-hzfjv" event={"ID":"f4cb4026-e4bf-45f2-9373-71f24b0ecd16","Type":"ContainerStarted","Data":"f0ae5052dc82c9b5c5275c5ffe42afdc99d4b4cb9bf3f4e0285c0602f929200a"} Jan 22 07:31:30 crc kubenswrapper[4933]: I0122 07:31:30.317309 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-5b8t8" event={"ID":"cc705ce5-1fcd-4528-b638-c5e1d062c035","Type":"ContainerStarted","Data":"b805fcf6558c2c6ef8992babf3dcde70d88f69cb1d98e269e22882a483b1604b"} Jan 22 07:31:30 crc kubenswrapper[4933]: I0122 07:31:30.319098 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-jv7mp" event={"ID":"f6675820-e9ac-4e84-8e2f-2971304513ea","Type":"ContainerStarted","Data":"a27ae3750504dcd2a2f63f1fcb0684639130103536b568dfa607110e02d26cd5"} Jan 22 07:31:30 crc kubenswrapper[4933]: I0122 07:31:30.320753 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-2n7hh" event={"ID":"1ec7b8e4-e17f-4d56-8f1c-b04751ad3bf8","Type":"ContainerStarted","Data":"bd3cd50d8e28e0c0a2897bfd377c388fed5a711ebde234706c316e8a6ef363bb"} Jan 22 07:31:40 crc kubenswrapper[4933]: I0122 07:31:40.057427 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-6182-account-create-update-5kkbv"] Jan 22 07:31:40 crc kubenswrapper[4933]: I0122 07:31:40.066012 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-5z6vc"] Jan 22 07:31:40 crc kubenswrapper[4933]: I0122 07:31:40.080434 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-6182-account-create-update-5kkbv"] Jan 22 07:31:40 crc kubenswrapper[4933]: I0122 07:31:40.103869 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-5z6vc"] Jan 22 07:31:40 crc kubenswrapper[4933]: I0122 07:31:40.501180 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00647053-41cd-46da-8ca5-c85caba22fa1" path="/var/lib/kubelet/pods/00647053-41cd-46da-8ca5-c85caba22fa1/volumes" Jan 22 07:31:40 crc kubenswrapper[4933]: I0122 07:31:40.501823 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0b51acb-e0fb-43c4-a533-bdd60e20b081" path="/var/lib/kubelet/pods/e0b51acb-e0fb-43c4-a533-bdd60e20b081/volumes" Jan 22 07:31:43 crc kubenswrapper[4933]: I0122 07:31:43.457367 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-jxw4c" event={"ID":"a79423c3-5348-464f-9866-425625f2a8f1","Type":"ContainerStarted","Data":"c748e729685b69651148e114a98f3b330627784b72efa89a2ade1752c323a40c"} Jan 22 07:31:43 crc kubenswrapper[4933]: I0122 07:31:43.459820 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-hzfjv" event={"ID":"f4cb4026-e4bf-45f2-9373-71f24b0ecd16","Type":"ContainerStarted","Data":"fec5b10e0dd62dd768c40ef3a3856e87d453b6ff8bb5a3961a0fd725f9cd7e9f"} Jan 22 07:31:43 crc kubenswrapper[4933]: I0122 07:31:43.461897 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-5b8t8" event={"ID":"cc705ce5-1fcd-4528-b638-c5e1d062c035","Type":"ContainerStarted","Data":"ce71f0b5bea58b5329504d19e1bac91e7fb1f35989ba17d26ee9bf974b5d72f2"} Jan 22 07:31:43 crc kubenswrapper[4933]: I0122 07:31:43.462636 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5bf474d74f-5b8t8" Jan 22 07:31:43 crc kubenswrapper[4933]: I0122 07:31:43.464103 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-jv7mp" event={"ID":"f6675820-e9ac-4e84-8e2f-2971304513ea","Type":"ContainerStarted","Data":"1ca4571bec9b98394558f7a7be2e7785f052e171a593b5b755e7a988feb0c1a5"} Jan 22 07:31:43 crc kubenswrapper[4933]: I0122 07:31:43.465552 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-2n7hh" event={"ID":"1ec7b8e4-e17f-4d56-8f1c-b04751ad3bf8","Type":"ContainerStarted","Data":"782bf777a08ae1c9fe78ca1c28fbb0941836e27e4fefc7e9647100a787941bc0"} Jan 22 07:31:43 crc kubenswrapper[4933]: I0122 07:31:43.466037 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-59bdc8b94-2n7hh" Jan 22 07:31:43 crc kubenswrapper[4933]: I0122 07:31:43.469443 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-59bdc8b94-2n7hh" Jan 22 07:31:43 crc kubenswrapper[4933]: I0122 07:31:43.477618 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-jxw4c" podStartSLOduration=2.974601092 podStartE2EDuration="15.47760432s" podCreationTimestamp="2026-01-22 07:31:28 +0000 UTC" firstStartedPulling="2026-01-22 07:31:29.42579806 +0000 UTC m=+6337.262923403" lastFinishedPulling="2026-01-22 07:31:41.928801268 +0000 UTC m=+6349.765926631" observedRunningTime="2026-01-22 07:31:43.474190307 +0000 UTC m=+6351.311315660" watchObservedRunningTime="2026-01-22 07:31:43.47760432 +0000 UTC m=+6351.314729673" Jan 22 07:31:43 crc kubenswrapper[4933]: I0122 07:31:43.502038 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-hzfjv" podStartSLOduration=3.202963936 podStartE2EDuration="15.502011774s" podCreationTimestamp="2026-01-22 07:31:28 +0000 UTC" firstStartedPulling="2026-01-22 07:31:29.62731073 +0000 UTC m=+6337.464436083" lastFinishedPulling="2026-01-22 07:31:41.926358568 +0000 UTC m=+6349.763483921" observedRunningTime="2026-01-22 07:31:43.493976029 +0000 UTC m=+6351.331101402" watchObservedRunningTime="2026-01-22 07:31:43.502011774 +0000 UTC m=+6351.339137127" Jan 22 07:31:43 crc kubenswrapper[4933]: I0122 07:31:43.541521 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5bf474d74f-5b8t8" podStartSLOduration=3.60364224 podStartE2EDuration="15.541506427s" podCreationTimestamp="2026-01-22 07:31:28 +0000 UTC" firstStartedPulling="2026-01-22 07:31:29.989284601 +0000 UTC m=+6337.826409964" lastFinishedPulling="2026-01-22 07:31:41.927148798 +0000 UTC m=+6349.764274151" observedRunningTime="2026-01-22 07:31:43.53752013 +0000 UTC m=+6351.374645483" watchObservedRunningTime="2026-01-22 07:31:43.541506427 +0000 UTC m=+6351.378631780" Jan 22 07:31:43 crc kubenswrapper[4933]: I0122 07:31:43.569868 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-697444475f-jv7mp" podStartSLOduration=3.120683512 podStartE2EDuration="15.569849718s" podCreationTimestamp="2026-01-22 07:31:28 +0000 UTC" firstStartedPulling="2026-01-22 07:31:29.454596142 +0000 UTC m=+6337.291721495" lastFinishedPulling="2026-01-22 07:31:41.903762338 +0000 UTC m=+6349.740887701" observedRunningTime="2026-01-22 07:31:43.562515959 +0000 UTC m=+6351.399641312" watchObservedRunningTime="2026-01-22 07:31:43.569849718 +0000 UTC m=+6351.406975071" Jan 22 07:31:43 crc kubenswrapper[4933]: I0122 07:31:43.618324 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-59bdc8b94-2n7hh" podStartSLOduration=3.061886238 podStartE2EDuration="15.618308118s" podCreationTimestamp="2026-01-22 07:31:28 +0000 UTC" firstStartedPulling="2026-01-22 07:31:29.855252595 +0000 UTC m=+6337.692377948" lastFinishedPulling="2026-01-22 07:31:42.411674485 +0000 UTC m=+6350.248799828" observedRunningTime="2026-01-22 07:31:43.615054819 +0000 UTC m=+6351.452180182" watchObservedRunningTime="2026-01-22 07:31:43.618308118 +0000 UTC m=+6351.455433471" Jan 22 07:31:49 crc kubenswrapper[4933]: I0122 07:31:49.181557 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5bf474d74f-5b8t8" Jan 22 07:31:51 crc kubenswrapper[4933]: I0122 07:31:51.045211 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-xt5hm"] Jan 22 07:31:51 crc kubenswrapper[4933]: I0122 07:31:51.053333 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-xt5hm"] Jan 22 07:31:51 crc kubenswrapper[4933]: I0122 07:31:51.935229 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 22 07:31:51 crc kubenswrapper[4933]: I0122 07:31:51.935533 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="35e35bac-ac97-4f82-8358-218c35ada9a5" containerName="openstackclient" containerID="cri-o://52a30d5525917fa1ffcbe11dce16fb1f52d3e80dd2aacf9785848d1522c30246" gracePeriod=2 Jan 22 07:31:51 crc kubenswrapper[4933]: I0122 07:31:51.948411 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.002098 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 22 07:31:52 crc kubenswrapper[4933]: E0122 07:31:52.002518 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35e35bac-ac97-4f82-8358-218c35ada9a5" containerName="openstackclient" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.002534 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="35e35bac-ac97-4f82-8358-218c35ada9a5" containerName="openstackclient" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.002781 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="35e35bac-ac97-4f82-8358-218c35ada9a5" containerName="openstackclient" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.003468 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.021655 4933 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="35e35bac-ac97-4f82-8358-218c35ada9a5" podUID="21b201de-5a21-4492-ae8c-7cef39a89972" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.029610 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.121490 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sq8gx\" (UniqueName: \"kubernetes.io/projected/21b201de-5a21-4492-ae8c-7cef39a89972-kube-api-access-sq8gx\") pod \"openstackclient\" (UID: \"21b201de-5a21-4492-ae8c-7cef39a89972\") " pod="openstack/openstackclient" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.121727 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/21b201de-5a21-4492-ae8c-7cef39a89972-openstack-config\") pod \"openstackclient\" (UID: \"21b201de-5a21-4492-ae8c-7cef39a89972\") " pod="openstack/openstackclient" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.121754 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/21b201de-5a21-4492-ae8c-7cef39a89972-openstack-config-secret\") pod \"openstackclient\" (UID: \"21b201de-5a21-4492-ae8c-7cef39a89972\") " pod="openstack/openstackclient" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.121796 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21b201de-5a21-4492-ae8c-7cef39a89972-combined-ca-bundle\") pod \"openstackclient\" (UID: \"21b201de-5a21-4492-ae8c-7cef39a89972\") " pod="openstack/openstackclient" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.207016 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.209290 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.220510 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-9gflv" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.220580 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.223211 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/21b201de-5a21-4492-ae8c-7cef39a89972-openstack-config\") pod \"openstackclient\" (UID: \"21b201de-5a21-4492-ae8c-7cef39a89972\") " pod="openstack/openstackclient" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.223263 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/21b201de-5a21-4492-ae8c-7cef39a89972-openstack-config-secret\") pod \"openstackclient\" (UID: \"21b201de-5a21-4492-ae8c-7cef39a89972\") " pod="openstack/openstackclient" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.223313 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21b201de-5a21-4492-ae8c-7cef39a89972-combined-ca-bundle\") pod \"openstackclient\" (UID: \"21b201de-5a21-4492-ae8c-7cef39a89972\") " pod="openstack/openstackclient" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.223376 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sq8gx\" (UniqueName: \"kubernetes.io/projected/21b201de-5a21-4492-ae8c-7cef39a89972-kube-api-access-sq8gx\") pod \"openstackclient\" (UID: \"21b201de-5a21-4492-ae8c-7cef39a89972\") " pod="openstack/openstackclient" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.224766 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/21b201de-5a21-4492-ae8c-7cef39a89972-openstack-config\") pod \"openstackclient\" (UID: \"21b201de-5a21-4492-ae8c-7cef39a89972\") " pod="openstack/openstackclient" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.230949 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/21b201de-5a21-4492-ae8c-7cef39a89972-openstack-config-secret\") pod \"openstackclient\" (UID: \"21b201de-5a21-4492-ae8c-7cef39a89972\") " pod="openstack/openstackclient" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.250679 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21b201de-5a21-4492-ae8c-7cef39a89972-combined-ca-bundle\") pod \"openstackclient\" (UID: \"21b201de-5a21-4492-ae8c-7cef39a89972\") " pod="openstack/openstackclient" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.279872 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sq8gx\" (UniqueName: \"kubernetes.io/projected/21b201de-5a21-4492-ae8c-7cef39a89972-kube-api-access-sq8gx\") pod \"openstackclient\" (UID: \"21b201de-5a21-4492-ae8c-7cef39a89972\") " pod="openstack/openstackclient" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.326321 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jr29t\" (UniqueName: \"kubernetes.io/projected/0143bbca-b0b4-47c0-8c6a-4088fdc688a8-kube-api-access-jr29t\") pod \"kube-state-metrics-0\" (UID: \"0143bbca-b0b4-47c0-8c6a-4088fdc688a8\") " pod="openstack/kube-state-metrics-0" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.358920 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.428545 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jr29t\" (UniqueName: \"kubernetes.io/projected/0143bbca-b0b4-47c0-8c6a-4088fdc688a8-kube-api-access-jr29t\") pod \"kube-state-metrics-0\" (UID: \"0143bbca-b0b4-47c0-8c6a-4088fdc688a8\") " pod="openstack/kube-state-metrics-0" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.480985 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jr29t\" (UniqueName: \"kubernetes.io/projected/0143bbca-b0b4-47c0-8c6a-4088fdc688a8-kube-api-access-jr29t\") pod \"kube-state-metrics-0\" (UID: \"0143bbca-b0b4-47c0-8c6a-4088fdc688a8\") " pod="openstack/kube-state-metrics-0" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.534257 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="adfa05f5-5e61-4cc6-8879-2d298697becf" path="/var/lib/kubelet/pods/adfa05f5-5e61-4cc6-8879-2d298697becf/volumes" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.567342 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-9gflv" Jan 22 07:31:52 crc kubenswrapper[4933]: I0122 07:31:52.575217 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.051156 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/alertmanager-metric-storage-0"] Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.055718 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.062946 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-cluster-tls-config" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.063044 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-tls-assets-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.063173 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-web-config" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.063257 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-generated" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.066940 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-alertmanager-dockercfg-dcff7" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.087185 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.166454 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.166529 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.166568 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5926\" (UniqueName: \"kubernetes.io/projected/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-kube-api-access-g5926\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.166650 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.166683 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.166699 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.166728 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.268443 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.268530 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.268556 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.268603 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.268741 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.268773 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.268812 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5926\" (UniqueName: \"kubernetes.io/projected/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-kube-api-access-g5926\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.285762 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.291480 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.291769 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.293876 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.295593 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.295962 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.314910 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5926\" (UniqueName: \"kubernetes.io/projected/d1dd44ad-b83c-4130-8d7f-d42bca2a3113-kube-api-access-g5926\") pod \"alertmanager-metric-storage-0\" (UID: \"d1dd44ad-b83c-4130-8d7f-d42bca2a3113\") " pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.395409 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.428776 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.602426 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.612406 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"21b201de-5a21-4492-ae8c-7cef39a89972","Type":"ContainerStarted","Data":"b0712a62f0fef1b3f0e8b118ab69df5a6c7e55d420ce010f71613f635eaf5120"} Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.812132 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.814610 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.825550 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.825831 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.825951 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.826104 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.826240 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-8xvpr" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.827810 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.828127 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.846832 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.870152 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.903324 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.903414 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.903470 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.903516 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-config\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.903542 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.903622 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.903690 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.903713 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.903742 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:53 crc kubenswrapper[4933]: I0122 07:31:53.903794 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbr2m\" (UniqueName: \"kubernetes.io/projected/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-kube-api-access-dbr2m\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.005374 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.005432 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.005460 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-config\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.005477 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.005529 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.005573 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.005590 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.005608 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.005643 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbr2m\" (UniqueName: \"kubernetes.io/projected/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-kube-api-access-dbr2m\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.005683 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.018664 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.019215 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.049616 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.117414 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.119516 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-config\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.127853 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.128919 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.129498 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbr2m\" (UniqueName: \"kubernetes.io/projected/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-kube-api-access-dbr2m\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.138695 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.153552 4933 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.153812 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/48f41e36b639b6ad7130d50b4930f9463389b7725003e1b861dcccda0a87916e/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.285666 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.411138 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\") pod \"prometheus-metric-storage-0\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.525853 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.623382 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"21b201de-5a21-4492-ae8c-7cef39a89972","Type":"ContainerStarted","Data":"3fda0c6df071902d2252a8767a0a8984e245c44a9fa8472a37c0c54a0c09cbb4"} Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.636809 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"d1dd44ad-b83c-4130-8d7f-d42bca2a3113","Type":"ContainerStarted","Data":"e1de9a1aaca87999431ffecc0a28c3be08a99c2cdb4f78042e401f34e1b5f0be"} Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.639185 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"0143bbca-b0b4-47c0-8c6a-4088fdc688a8","Type":"ContainerStarted","Data":"8b05025130a8183f4899481489d5d8545ac4d7bf9b618cb4443f45f6365270ff"} Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.644908 4933 generic.go:334] "Generic (PLEG): container finished" podID="35e35bac-ac97-4f82-8358-218c35ada9a5" containerID="52a30d5525917fa1ffcbe11dce16fb1f52d3e80dd2aacf9785848d1522c30246" exitCode=137 Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.647113 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.647096132 podStartE2EDuration="3.647096132s" podCreationTimestamp="2026-01-22 07:31:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:31:54.64004724 +0000 UTC m=+6362.477172593" watchObservedRunningTime="2026-01-22 07:31:54.647096132 +0000 UTC m=+6362.484221485" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.707826 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.710865 4933 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="35e35bac-ac97-4f82-8358-218c35ada9a5" podUID="21b201de-5a21-4492-ae8c-7cef39a89972" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.838188 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35e35bac-ac97-4f82-8358-218c35ada9a5-combined-ca-bundle\") pod \"35e35bac-ac97-4f82-8358-218c35ada9a5\" (UID: \"35e35bac-ac97-4f82-8358-218c35ada9a5\") " Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.838631 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r7l7t\" (UniqueName: \"kubernetes.io/projected/35e35bac-ac97-4f82-8358-218c35ada9a5-kube-api-access-r7l7t\") pod \"35e35bac-ac97-4f82-8358-218c35ada9a5\" (UID: \"35e35bac-ac97-4f82-8358-218c35ada9a5\") " Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.838699 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/35e35bac-ac97-4f82-8358-218c35ada9a5-openstack-config\") pod \"35e35bac-ac97-4f82-8358-218c35ada9a5\" (UID: \"35e35bac-ac97-4f82-8358-218c35ada9a5\") " Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.838841 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/35e35bac-ac97-4f82-8358-218c35ada9a5-openstack-config-secret\") pod \"35e35bac-ac97-4f82-8358-218c35ada9a5\" (UID: \"35e35bac-ac97-4f82-8358-218c35ada9a5\") " Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.853384 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35e35bac-ac97-4f82-8358-218c35ada9a5-kube-api-access-r7l7t" (OuterVolumeSpecName: "kube-api-access-r7l7t") pod "35e35bac-ac97-4f82-8358-218c35ada9a5" (UID: "35e35bac-ac97-4f82-8358-218c35ada9a5"). InnerVolumeSpecName "kube-api-access-r7l7t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.902583 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/35e35bac-ac97-4f82-8358-218c35ada9a5-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "35e35bac-ac97-4f82-8358-218c35ada9a5" (UID: "35e35bac-ac97-4f82-8358-218c35ada9a5"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.906296 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35e35bac-ac97-4f82-8358-218c35ada9a5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "35e35bac-ac97-4f82-8358-218c35ada9a5" (UID: "35e35bac-ac97-4f82-8358-218c35ada9a5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.934511 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35e35bac-ac97-4f82-8358-218c35ada9a5-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "35e35bac-ac97-4f82-8358-218c35ada9a5" (UID: "35e35bac-ac97-4f82-8358-218c35ada9a5"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.942967 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35e35bac-ac97-4f82-8358-218c35ada9a5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.943023 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r7l7t\" (UniqueName: \"kubernetes.io/projected/35e35bac-ac97-4f82-8358-218c35ada9a5-kube-api-access-r7l7t\") on node \"crc\" DevicePath \"\"" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.943033 4933 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/35e35bac-ac97-4f82-8358-218c35ada9a5-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:31:54 crc kubenswrapper[4933]: I0122 07:31:54.943042 4933 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/35e35bac-ac97-4f82-8358-218c35ada9a5-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 22 07:31:55 crc kubenswrapper[4933]: I0122 07:31:55.131551 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 22 07:31:55 crc kubenswrapper[4933]: W0122 07:31:55.132444 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7856abfd_5a56_4dd4_9be1_21f176ffe4b6.slice/crio-9533febaeefb505052cb85a9e676d4c88f4c5af8da74a516ff1982361113fcde WatchSource:0}: Error finding container 9533febaeefb505052cb85a9e676d4c88f4c5af8da74a516ff1982361113fcde: Status 404 returned error can't find the container with id 9533febaeefb505052cb85a9e676d4c88f4c5af8da74a516ff1982361113fcde Jan 22 07:31:55 crc kubenswrapper[4933]: I0122 07:31:55.655913 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 07:31:55 crc kubenswrapper[4933]: I0122 07:31:55.655936 4933 scope.go:117] "RemoveContainer" containerID="52a30d5525917fa1ffcbe11dce16fb1f52d3e80dd2aacf9785848d1522c30246" Jan 22 07:31:55 crc kubenswrapper[4933]: I0122 07:31:55.657541 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7856abfd-5a56-4dd4-9be1-21f176ffe4b6","Type":"ContainerStarted","Data":"9533febaeefb505052cb85a9e676d4c88f4c5af8da74a516ff1982361113fcde"} Jan 22 07:31:55 crc kubenswrapper[4933]: I0122 07:31:55.661275 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"0143bbca-b0b4-47c0-8c6a-4088fdc688a8","Type":"ContainerStarted","Data":"8d8b92233ede147874ef163b3aaee4086a656d61d8851c2a5ed341e3a35ff7de"} Jan 22 07:31:55 crc kubenswrapper[4933]: I0122 07:31:55.661540 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 22 07:31:55 crc kubenswrapper[4933]: I0122 07:31:55.661747 4933 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="35e35bac-ac97-4f82-8358-218c35ada9a5" podUID="21b201de-5a21-4492-ae8c-7cef39a89972" Jan 22 07:31:55 crc kubenswrapper[4933]: I0122 07:31:55.683035 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.801377522 podStartE2EDuration="3.683014476s" podCreationTimestamp="2026-01-22 07:31:52 +0000 UTC" firstStartedPulling="2026-01-22 07:31:53.634112358 +0000 UTC m=+6361.471237711" lastFinishedPulling="2026-01-22 07:31:54.515749312 +0000 UTC m=+6362.352874665" observedRunningTime="2026-01-22 07:31:55.682711478 +0000 UTC m=+6363.519836831" watchObservedRunningTime="2026-01-22 07:31:55.683014476 +0000 UTC m=+6363.520139829" Jan 22 07:31:55 crc kubenswrapper[4933]: I0122 07:31:55.685844 4933 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="35e35bac-ac97-4f82-8358-218c35ada9a5" podUID="21b201de-5a21-4492-ae8c-7cef39a89972" Jan 22 07:31:56 crc kubenswrapper[4933]: I0122 07:31:56.519374 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35e35bac-ac97-4f82-8358-218c35ada9a5" path="/var/lib/kubelet/pods/35e35bac-ac97-4f82-8358-218c35ada9a5/volumes" Jan 22 07:32:00 crc kubenswrapper[4933]: I0122 07:32:00.715129 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"d1dd44ad-b83c-4130-8d7f-d42bca2a3113","Type":"ContainerStarted","Data":"2a448de302eba8b2f326b382e8466a437c1e2e173143ddbf1a1076cf83836bbb"} Jan 22 07:32:01 crc kubenswrapper[4933]: I0122 07:32:01.728662 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7856abfd-5a56-4dd4-9be1-21f176ffe4b6","Type":"ContainerStarted","Data":"3b904712a2c4325e777a7ddca16a1f4aa3aec58706a8508077b526246b3cef87"} Jan 22 07:32:02 crc kubenswrapper[4933]: I0122 07:32:02.580449 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 22 07:32:06 crc kubenswrapper[4933]: I0122 07:32:06.531470 4933 scope.go:117] "RemoveContainer" containerID="864b928f4ac7e129ad8497ee7751c0b7b4094b4a7fa6e582f926dbf1c0287ba8" Jan 22 07:32:06 crc kubenswrapper[4933]: I0122 07:32:06.565361 4933 scope.go:117] "RemoveContainer" containerID="77b387574fb21c0b4f690e5c746ffd27323bb7355a884140f938fe4da4b9a95c" Jan 22 07:32:06 crc kubenswrapper[4933]: I0122 07:32:06.624335 4933 scope.go:117] "RemoveContainer" containerID="d3317c261af8a1169715f5844843b1cbb6549d86a511a7e28aab6eda8c5adbfb" Jan 22 07:32:06 crc kubenswrapper[4933]: I0122 07:32:06.674023 4933 scope.go:117] "RemoveContainer" containerID="f9c77645df4c0d171284c077c8d127a11e7c9a134e382b9c2a54b517e00e863b" Jan 22 07:32:06 crc kubenswrapper[4933]: I0122 07:32:06.797207 4933 generic.go:334] "Generic (PLEG): container finished" podID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" containerID="3b904712a2c4325e777a7ddca16a1f4aa3aec58706a8508077b526246b3cef87" exitCode=0 Jan 22 07:32:06 crc kubenswrapper[4933]: I0122 07:32:06.797280 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7856abfd-5a56-4dd4-9be1-21f176ffe4b6","Type":"ContainerDied","Data":"3b904712a2c4325e777a7ddca16a1f4aa3aec58706a8508077b526246b3cef87"} Jan 22 07:32:06 crc kubenswrapper[4933]: I0122 07:32:06.805210 4933 scope.go:117] "RemoveContainer" containerID="25bd4394692a554f3012bc97b2acde6aed5e4f34547a4eee1d724b8560e9c010" Jan 22 07:32:06 crc kubenswrapper[4933]: I0122 07:32:06.857940 4933 scope.go:117] "RemoveContainer" containerID="1ad721049ec2aa2dd596c4a27d5a9689bf2452ffb9cc7625611aabd67f515f00" Jan 22 07:32:07 crc kubenswrapper[4933]: I0122 07:32:07.840996 4933 generic.go:334] "Generic (PLEG): container finished" podID="d1dd44ad-b83c-4130-8d7f-d42bca2a3113" containerID="2a448de302eba8b2f326b382e8466a437c1e2e173143ddbf1a1076cf83836bbb" exitCode=0 Jan 22 07:32:07 crc kubenswrapper[4933]: I0122 07:32:07.841066 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"d1dd44ad-b83c-4130-8d7f-d42bca2a3113","Type":"ContainerDied","Data":"2a448de302eba8b2f326b382e8466a437c1e2e173143ddbf1a1076cf83836bbb"} Jan 22 07:32:13 crc kubenswrapper[4933]: I0122 07:32:13.714238 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vzrp7"] Jan 22 07:32:13 crc kubenswrapper[4933]: I0122 07:32:13.717614 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vzrp7" Jan 22 07:32:13 crc kubenswrapper[4933]: I0122 07:32:13.726550 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vzrp7"] Jan 22 07:32:13 crc kubenswrapper[4933]: I0122 07:32:13.762687 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77wxt\" (UniqueName: \"kubernetes.io/projected/40716a4c-9462-42af-9aea-1e01e79ceeb2-kube-api-access-77wxt\") pod \"redhat-operators-vzrp7\" (UID: \"40716a4c-9462-42af-9aea-1e01e79ceeb2\") " pod="openshift-marketplace/redhat-operators-vzrp7" Jan 22 07:32:13 crc kubenswrapper[4933]: I0122 07:32:13.763313 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40716a4c-9462-42af-9aea-1e01e79ceeb2-utilities\") pod \"redhat-operators-vzrp7\" (UID: \"40716a4c-9462-42af-9aea-1e01e79ceeb2\") " pod="openshift-marketplace/redhat-operators-vzrp7" Jan 22 07:32:13 crc kubenswrapper[4933]: I0122 07:32:13.763364 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40716a4c-9462-42af-9aea-1e01e79ceeb2-catalog-content\") pod \"redhat-operators-vzrp7\" (UID: \"40716a4c-9462-42af-9aea-1e01e79ceeb2\") " pod="openshift-marketplace/redhat-operators-vzrp7" Jan 22 07:32:13 crc kubenswrapper[4933]: I0122 07:32:13.864893 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40716a4c-9462-42af-9aea-1e01e79ceeb2-utilities\") pod \"redhat-operators-vzrp7\" (UID: \"40716a4c-9462-42af-9aea-1e01e79ceeb2\") " pod="openshift-marketplace/redhat-operators-vzrp7" Jan 22 07:32:13 crc kubenswrapper[4933]: I0122 07:32:13.864931 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40716a4c-9462-42af-9aea-1e01e79ceeb2-catalog-content\") pod \"redhat-operators-vzrp7\" (UID: \"40716a4c-9462-42af-9aea-1e01e79ceeb2\") " pod="openshift-marketplace/redhat-operators-vzrp7" Jan 22 07:32:13 crc kubenswrapper[4933]: I0122 07:32:13.865601 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40716a4c-9462-42af-9aea-1e01e79ceeb2-utilities\") pod \"redhat-operators-vzrp7\" (UID: \"40716a4c-9462-42af-9aea-1e01e79ceeb2\") " pod="openshift-marketplace/redhat-operators-vzrp7" Jan 22 07:32:13 crc kubenswrapper[4933]: I0122 07:32:13.865635 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40716a4c-9462-42af-9aea-1e01e79ceeb2-catalog-content\") pod \"redhat-operators-vzrp7\" (UID: \"40716a4c-9462-42af-9aea-1e01e79ceeb2\") " pod="openshift-marketplace/redhat-operators-vzrp7" Jan 22 07:32:13 crc kubenswrapper[4933]: I0122 07:32:13.865671 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77wxt\" (UniqueName: \"kubernetes.io/projected/40716a4c-9462-42af-9aea-1e01e79ceeb2-kube-api-access-77wxt\") pod \"redhat-operators-vzrp7\" (UID: \"40716a4c-9462-42af-9aea-1e01e79ceeb2\") " pod="openshift-marketplace/redhat-operators-vzrp7" Jan 22 07:32:13 crc kubenswrapper[4933]: I0122 07:32:13.885687 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77wxt\" (UniqueName: \"kubernetes.io/projected/40716a4c-9462-42af-9aea-1e01e79ceeb2-kube-api-access-77wxt\") pod \"redhat-operators-vzrp7\" (UID: \"40716a4c-9462-42af-9aea-1e01e79ceeb2\") " pod="openshift-marketplace/redhat-operators-vzrp7" Jan 22 07:32:14 crc kubenswrapper[4933]: I0122 07:32:14.046466 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vzrp7" Jan 22 07:32:17 crc kubenswrapper[4933]: I0122 07:32:17.855227 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vzrp7"] Jan 22 07:32:17 crc kubenswrapper[4933]: W0122 07:32:17.860215 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40716a4c_9462_42af_9aea_1e01e79ceeb2.slice/crio-3aef5953e71b53be4e5679f5d55fd90a56d45b14cf829120df957e971f3520fa WatchSource:0}: Error finding container 3aef5953e71b53be4e5679f5d55fd90a56d45b14cf829120df957e971f3520fa: Status 404 returned error can't find the container with id 3aef5953e71b53be4e5679f5d55fd90a56d45b14cf829120df957e971f3520fa Jan 22 07:32:17 crc kubenswrapper[4933]: I0122 07:32:17.938197 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7856abfd-5a56-4dd4-9be1-21f176ffe4b6","Type":"ContainerStarted","Data":"73d7bbc62427eddff6c3304f7853b72aae4a1adbadb05536d46a20a64eab5dd1"} Jan 22 07:32:17 crc kubenswrapper[4933]: I0122 07:32:17.939276 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzrp7" event={"ID":"40716a4c-9462-42af-9aea-1e01e79ceeb2","Type":"ContainerStarted","Data":"3aef5953e71b53be4e5679f5d55fd90a56d45b14cf829120df957e971f3520fa"} Jan 22 07:32:17 crc kubenswrapper[4933]: I0122 07:32:17.941732 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"d1dd44ad-b83c-4130-8d7f-d42bca2a3113","Type":"ContainerStarted","Data":"c23c8ccdbb8fdd8212bfbfa4d2308ec9a832b0403a22c44ff82da06d2f06bf2b"} Jan 22 07:32:18 crc kubenswrapper[4933]: I0122 07:32:18.955217 4933 generic.go:334] "Generic (PLEG): container finished" podID="40716a4c-9462-42af-9aea-1e01e79ceeb2" containerID="9ef81824d74ab4c9689f3150a27d369dbb3a3e60248e80ecb2bef2507796ed06" exitCode=0 Jan 22 07:32:18 crc kubenswrapper[4933]: I0122 07:32:18.955278 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzrp7" event={"ID":"40716a4c-9462-42af-9aea-1e01e79ceeb2","Type":"ContainerDied","Data":"9ef81824d74ab4c9689f3150a27d369dbb3a3e60248e80ecb2bef2507796ed06"} Jan 22 07:32:19 crc kubenswrapper[4933]: I0122 07:32:19.966144 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzrp7" event={"ID":"40716a4c-9462-42af-9aea-1e01e79ceeb2","Type":"ContainerStarted","Data":"f1305a14d669dd6ed9d8fe7d706166f6296fb138ccd592de6c235c2eeba46bef"} Jan 22 07:32:22 crc kubenswrapper[4933]: I0122 07:32:22.995865 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7856abfd-5a56-4dd4-9be1-21f176ffe4b6","Type":"ContainerStarted","Data":"7a8f2c60856a03c115e036817a37d4e6d0c0efe44270ce1cae8841a995f06405"} Jan 22 07:32:22 crc kubenswrapper[4933]: I0122 07:32:22.998769 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"d1dd44ad-b83c-4130-8d7f-d42bca2a3113","Type":"ContainerStarted","Data":"a7c49a5e01f7b21dccdc89f292c54b3699801b2f7d16b8c50e7d2aacdce58167"} Jan 22 07:32:23 crc kubenswrapper[4933]: I0122 07:32:23.000162 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/alertmanager-metric-storage-0" Jan 22 07:32:23 crc kubenswrapper[4933]: I0122 07:32:23.004284 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/alertmanager-metric-storage-0" Jan 22 07:32:23 crc kubenswrapper[4933]: I0122 07:32:23.028844 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/alertmanager-metric-storage-0" podStartSLOduration=8.127826459 podStartE2EDuration="31.02883008s" podCreationTimestamp="2026-01-22 07:31:52 +0000 UTC" firstStartedPulling="2026-01-22 07:31:54.511942639 +0000 UTC m=+6362.349067982" lastFinishedPulling="2026-01-22 07:32:17.41294625 +0000 UTC m=+6385.250071603" observedRunningTime="2026-01-22 07:32:23.021091141 +0000 UTC m=+6390.858216494" watchObservedRunningTime="2026-01-22 07:32:23.02883008 +0000 UTC m=+6390.865955433" Jan 22 07:32:27 crc kubenswrapper[4933]: I0122 07:32:27.052668 4933 generic.go:334] "Generic (PLEG): container finished" podID="40716a4c-9462-42af-9aea-1e01e79ceeb2" containerID="f1305a14d669dd6ed9d8fe7d706166f6296fb138ccd592de6c235c2eeba46bef" exitCode=0 Jan 22 07:32:27 crc kubenswrapper[4933]: I0122 07:32:27.052707 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzrp7" event={"ID":"40716a4c-9462-42af-9aea-1e01e79ceeb2","Type":"ContainerDied","Data":"f1305a14d669dd6ed9d8fe7d706166f6296fb138ccd592de6c235c2eeba46bef"} Jan 22 07:32:29 crc kubenswrapper[4933]: I0122 07:32:29.077603 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7856abfd-5a56-4dd4-9be1-21f176ffe4b6","Type":"ContainerStarted","Data":"a6236b947f9aeb5b29f8f41c2ae54b3bfae2777e7bfa0a4ee30b4fb292cee1db"} Jan 22 07:32:29 crc kubenswrapper[4933]: I0122 07:32:29.082632 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzrp7" event={"ID":"40716a4c-9462-42af-9aea-1e01e79ceeb2","Type":"ContainerStarted","Data":"0e4643db2046516b294768cd2db4fc2fd773b5d18025ec88dbb912bf614dd3a9"} Jan 22 07:32:29 crc kubenswrapper[4933]: I0122 07:32:29.126972 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=4.70496664 podStartE2EDuration="37.126945852s" podCreationTimestamp="2026-01-22 07:31:52 +0000 UTC" firstStartedPulling="2026-01-22 07:31:55.143316345 +0000 UTC m=+6362.980441698" lastFinishedPulling="2026-01-22 07:32:27.565295547 +0000 UTC m=+6395.402420910" observedRunningTime="2026-01-22 07:32:29.102281451 +0000 UTC m=+6396.939406824" watchObservedRunningTime="2026-01-22 07:32:29.126945852 +0000 UTC m=+6396.964071205" Jan 22 07:32:29 crc kubenswrapper[4933]: I0122 07:32:29.150549 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vzrp7" podStartSLOduration=6.729842279 podStartE2EDuration="16.150525406s" podCreationTimestamp="2026-01-22 07:32:13 +0000 UTC" firstStartedPulling="2026-01-22 07:32:18.957724024 +0000 UTC m=+6386.794849417" lastFinishedPulling="2026-01-22 07:32:28.378407191 +0000 UTC m=+6396.215532544" observedRunningTime="2026-01-22 07:32:29.126163543 +0000 UTC m=+6396.963288916" watchObservedRunningTime="2026-01-22 07:32:29.150525406 +0000 UTC m=+6396.987650759" Jan 22 07:32:29 crc kubenswrapper[4933]: I0122 07:32:29.526921 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:34 crc kubenswrapper[4933]: I0122 07:32:34.047636 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vzrp7" Jan 22 07:32:34 crc kubenswrapper[4933]: I0122 07:32:34.048415 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vzrp7" Jan 22 07:32:35 crc kubenswrapper[4933]: I0122 07:32:35.095721 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vzrp7" podUID="40716a4c-9462-42af-9aea-1e01e79ceeb2" containerName="registry-server" probeResult="failure" output=< Jan 22 07:32:35 crc kubenswrapper[4933]: timeout: failed to connect service ":50051" within 1s Jan 22 07:32:35 crc kubenswrapper[4933]: > Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.388537 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.413656 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.418131 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.423151 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.446470 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.466211 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfsf9\" (UniqueName: \"kubernetes.io/projected/04c71974-04d0-4998-ab3a-e29b4403e920-kube-api-access-jfsf9\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.466393 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.466441 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04c71974-04d0-4998-ab3a-e29b4403e920-run-httpd\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.466479 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04c71974-04d0-4998-ab3a-e29b4403e920-log-httpd\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.466499 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.466609 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-config-data\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.466748 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-scripts\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.568733 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-config-data\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.568907 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-scripts\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.569044 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfsf9\" (UniqueName: \"kubernetes.io/projected/04c71974-04d0-4998-ab3a-e29b4403e920-kube-api-access-jfsf9\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.569171 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.569203 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04c71974-04d0-4998-ab3a-e29b4403e920-run-httpd\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.569238 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04c71974-04d0-4998-ab3a-e29b4403e920-log-httpd\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.569261 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.570315 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04c71974-04d0-4998-ab3a-e29b4403e920-run-httpd\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.570378 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04c71974-04d0-4998-ab3a-e29b4403e920-log-httpd\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.576912 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-scripts\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.577026 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.577104 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-config-data\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.579971 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.588970 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfsf9\" (UniqueName: \"kubernetes.io/projected/04c71974-04d0-4998-ab3a-e29b4403e920-kube-api-access-jfsf9\") pod \"ceilometer-0\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " pod="openstack/ceilometer-0" Jan 22 07:32:37 crc kubenswrapper[4933]: I0122 07:32:37.754592 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 07:32:38 crc kubenswrapper[4933]: W0122 07:32:38.261189 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod04c71974_04d0_4998_ab3a_e29b4403e920.slice/crio-a9f2872978ee5e7bacb81c44b2f8d5e00a43af1108916de2196684d7d96aecc0 WatchSource:0}: Error finding container a9f2872978ee5e7bacb81c44b2f8d5e00a43af1108916de2196684d7d96aecc0: Status 404 returned error can't find the container with id a9f2872978ee5e7bacb81c44b2f8d5e00a43af1108916de2196684d7d96aecc0 Jan 22 07:32:38 crc kubenswrapper[4933]: I0122 07:32:38.262587 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 07:32:39 crc kubenswrapper[4933]: I0122 07:32:39.192735 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04c71974-04d0-4998-ab3a-e29b4403e920","Type":"ContainerStarted","Data":"6ae33b661916a63b4466827d4bfb5f4af8b0def334fef69f24ac2dbef7e8e070"} Jan 22 07:32:39 crc kubenswrapper[4933]: I0122 07:32:39.193295 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04c71974-04d0-4998-ab3a-e29b4403e920","Type":"ContainerStarted","Data":"a9f2872978ee5e7bacb81c44b2f8d5e00a43af1108916de2196684d7d96aecc0"} Jan 22 07:32:39 crc kubenswrapper[4933]: I0122 07:32:39.526922 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:39 crc kubenswrapper[4933]: I0122 07:32:39.529818 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:40 crc kubenswrapper[4933]: I0122 07:32:40.203375 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04c71974-04d0-4998-ab3a-e29b4403e920","Type":"ContainerStarted","Data":"f7e0fd252647186a4e65620f666d4889940a3399a87d26b24ddb992fb692b855"} Jan 22 07:32:40 crc kubenswrapper[4933]: I0122 07:32:40.205937 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:41 crc kubenswrapper[4933]: I0122 07:32:41.785059 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 22 07:32:41 crc kubenswrapper[4933]: I0122 07:32:41.811326 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 22 07:32:41 crc kubenswrapper[4933]: I0122 07:32:41.812684 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="21b201de-5a21-4492-ae8c-7cef39a89972" containerName="openstackclient" containerID="cri-o://3fda0c6df071902d2252a8767a0a8984e245c44a9fa8472a37c0c54a0c09cbb4" gracePeriod=2 Jan 22 07:32:41 crc kubenswrapper[4933]: I0122 07:32:41.826411 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 22 07:32:41 crc kubenswrapper[4933]: E0122 07:32:41.827401 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21b201de-5a21-4492-ae8c-7cef39a89972" containerName="openstackclient" Jan 22 07:32:41 crc kubenswrapper[4933]: I0122 07:32:41.827416 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="21b201de-5a21-4492-ae8c-7cef39a89972" containerName="openstackclient" Jan 22 07:32:41 crc kubenswrapper[4933]: I0122 07:32:41.827604 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="21b201de-5a21-4492-ae8c-7cef39a89972" containerName="openstackclient" Jan 22 07:32:41 crc kubenswrapper[4933]: I0122 07:32:41.828674 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 07:32:41 crc kubenswrapper[4933]: I0122 07:32:41.829765 4933 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="21b201de-5a21-4492-ae8c-7cef39a89972" podUID="2b45d5fe-96da-4cf8-bdc7-986fc63c2071" Jan 22 07:32:41 crc kubenswrapper[4933]: I0122 07:32:41.853390 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 22 07:32:41 crc kubenswrapper[4933]: I0122 07:32:41.969867 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b45d5fe-96da-4cf8-bdc7-986fc63c2071-combined-ca-bundle\") pod \"openstackclient\" (UID: \"2b45d5fe-96da-4cf8-bdc7-986fc63c2071\") " pod="openstack/openstackclient" Jan 22 07:32:41 crc kubenswrapper[4933]: I0122 07:32:41.970167 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2b45d5fe-96da-4cf8-bdc7-986fc63c2071-openstack-config\") pod \"openstackclient\" (UID: \"2b45d5fe-96da-4cf8-bdc7-986fc63c2071\") " pod="openstack/openstackclient" Jan 22 07:32:41 crc kubenswrapper[4933]: I0122 07:32:41.970233 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmdwl\" (UniqueName: \"kubernetes.io/projected/2b45d5fe-96da-4cf8-bdc7-986fc63c2071-kube-api-access-cmdwl\") pod \"openstackclient\" (UID: \"2b45d5fe-96da-4cf8-bdc7-986fc63c2071\") " pod="openstack/openstackclient" Jan 22 07:32:41 crc kubenswrapper[4933]: I0122 07:32:41.970328 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2b45d5fe-96da-4cf8-bdc7-986fc63c2071-openstack-config-secret\") pod \"openstackclient\" (UID: \"2b45d5fe-96da-4cf8-bdc7-986fc63c2071\") " pod="openstack/openstackclient" Jan 22 07:32:42 crc kubenswrapper[4933]: I0122 07:32:42.072289 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b45d5fe-96da-4cf8-bdc7-986fc63c2071-combined-ca-bundle\") pod \"openstackclient\" (UID: \"2b45d5fe-96da-4cf8-bdc7-986fc63c2071\") " pod="openstack/openstackclient" Jan 22 07:32:42 crc kubenswrapper[4933]: I0122 07:32:42.072817 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2b45d5fe-96da-4cf8-bdc7-986fc63c2071-openstack-config\") pod \"openstackclient\" (UID: \"2b45d5fe-96da-4cf8-bdc7-986fc63c2071\") " pod="openstack/openstackclient" Jan 22 07:32:42 crc kubenswrapper[4933]: I0122 07:32:42.072998 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmdwl\" (UniqueName: \"kubernetes.io/projected/2b45d5fe-96da-4cf8-bdc7-986fc63c2071-kube-api-access-cmdwl\") pod \"openstackclient\" (UID: \"2b45d5fe-96da-4cf8-bdc7-986fc63c2071\") " pod="openstack/openstackclient" Jan 22 07:32:42 crc kubenswrapper[4933]: I0122 07:32:42.073210 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2b45d5fe-96da-4cf8-bdc7-986fc63c2071-openstack-config-secret\") pod \"openstackclient\" (UID: \"2b45d5fe-96da-4cf8-bdc7-986fc63c2071\") " pod="openstack/openstackclient" Jan 22 07:32:42 crc kubenswrapper[4933]: I0122 07:32:42.074683 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2b45d5fe-96da-4cf8-bdc7-986fc63c2071-openstack-config\") pod \"openstackclient\" (UID: \"2b45d5fe-96da-4cf8-bdc7-986fc63c2071\") " pod="openstack/openstackclient" Jan 22 07:32:42 crc kubenswrapper[4933]: I0122 07:32:42.080824 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b45d5fe-96da-4cf8-bdc7-986fc63c2071-combined-ca-bundle\") pod \"openstackclient\" (UID: \"2b45d5fe-96da-4cf8-bdc7-986fc63c2071\") " pod="openstack/openstackclient" Jan 22 07:32:42 crc kubenswrapper[4933]: I0122 07:32:42.083406 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2b45d5fe-96da-4cf8-bdc7-986fc63c2071-openstack-config-secret\") pod \"openstackclient\" (UID: \"2b45d5fe-96da-4cf8-bdc7-986fc63c2071\") " pod="openstack/openstackclient" Jan 22 07:32:42 crc kubenswrapper[4933]: I0122 07:32:42.113781 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmdwl\" (UniqueName: \"kubernetes.io/projected/2b45d5fe-96da-4cf8-bdc7-986fc63c2071-kube-api-access-cmdwl\") pod \"openstackclient\" (UID: \"2b45d5fe-96da-4cf8-bdc7-986fc63c2071\") " pod="openstack/openstackclient" Jan 22 07:32:42 crc kubenswrapper[4933]: I0122 07:32:42.152762 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 07:32:42 crc kubenswrapper[4933]: I0122 07:32:42.233297 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04c71974-04d0-4998-ab3a-e29b4403e920","Type":"ContainerStarted","Data":"1a0193627556b6ca7d297a2aad4ed4961febab7ce97109691f24ec12c3663d2a"} Jan 22 07:32:42 crc kubenswrapper[4933]: I0122 07:32:42.875879 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 22 07:32:43 crc kubenswrapper[4933]: I0122 07:32:43.250744 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"2b45d5fe-96da-4cf8-bdc7-986fc63c2071","Type":"ContainerStarted","Data":"d5b8ebb571888cf4e8c7fb078fe22276ea347a1210aecc08f946e72dab9708b5"} Jan 22 07:32:43 crc kubenswrapper[4933]: I0122 07:32:43.250977 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"2b45d5fe-96da-4cf8-bdc7-986fc63c2071","Type":"ContainerStarted","Data":"0de1c2c177060ccef5875714681dc4d0994aeacdf4db73dcfb254fe0ee7c2410"} Jan 22 07:32:43 crc kubenswrapper[4933]: I0122 07:32:43.269798 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.26978113 podStartE2EDuration="2.26978113s" podCreationTimestamp="2026-01-22 07:32:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:32:43.262937693 +0000 UTC m=+6411.100063046" watchObservedRunningTime="2026-01-22 07:32:43.26978113 +0000 UTC m=+6411.106906473" Jan 22 07:32:43 crc kubenswrapper[4933]: I0122 07:32:43.453700 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 22 07:32:43 crc kubenswrapper[4933]: I0122 07:32:43.454280 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" containerName="prometheus" containerID="cri-o://73d7bbc62427eddff6c3304f7853b72aae4a1adbadb05536d46a20a64eab5dd1" gracePeriod=600 Jan 22 07:32:43 crc kubenswrapper[4933]: I0122 07:32:43.454781 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" containerName="thanos-sidecar" containerID="cri-o://a6236b947f9aeb5b29f8f41c2ae54b3bfae2777e7bfa0a4ee30b4fb292cee1db" gracePeriod=600 Jan 22 07:32:43 crc kubenswrapper[4933]: I0122 07:32:43.454867 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" containerName="config-reloader" containerID="cri-o://7a8f2c60856a03c115e036817a37d4e6d0c0efe44270ce1cae8841a995f06405" gracePeriod=600 Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.110199 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vzrp7" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.244893 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vzrp7" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.282434 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.289989 4933 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="21b201de-5a21-4492-ae8c-7cef39a89972" podUID="2b45d5fe-96da-4cf8-bdc7-986fc63c2071" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.293917 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04c71974-04d0-4998-ab3a-e29b4403e920","Type":"ContainerStarted","Data":"905508f051eb8c115eba2f83c8b301db32d755449e23898dce1ab35cc325766a"} Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.294303 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.306701 4933 generic.go:334] "Generic (PLEG): container finished" podID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" containerID="a6236b947f9aeb5b29f8f41c2ae54b3bfae2777e7bfa0a4ee30b4fb292cee1db" exitCode=0 Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.306737 4933 generic.go:334] "Generic (PLEG): container finished" podID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" containerID="7a8f2c60856a03c115e036817a37d4e6d0c0efe44270ce1cae8841a995f06405" exitCode=0 Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.306746 4933 generic.go:334] "Generic (PLEG): container finished" podID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" containerID="73d7bbc62427eddff6c3304f7853b72aae4a1adbadb05536d46a20a64eab5dd1" exitCode=0 Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.306786 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7856abfd-5a56-4dd4-9be1-21f176ffe4b6","Type":"ContainerDied","Data":"a6236b947f9aeb5b29f8f41c2ae54b3bfae2777e7bfa0a4ee30b4fb292cee1db"} Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.306814 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7856abfd-5a56-4dd4-9be1-21f176ffe4b6","Type":"ContainerDied","Data":"7a8f2c60856a03c115e036817a37d4e6d0c0efe44270ce1cae8841a995f06405"} Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.306827 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7856abfd-5a56-4dd4-9be1-21f176ffe4b6","Type":"ContainerDied","Data":"73d7bbc62427eddff6c3304f7853b72aae4a1adbadb05536d46a20a64eab5dd1"} Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.311246 4933 generic.go:334] "Generic (PLEG): container finished" podID="21b201de-5a21-4492-ae8c-7cef39a89972" containerID="3fda0c6df071902d2252a8767a0a8984e245c44a9fa8472a37c0c54a0c09cbb4" exitCode=137 Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.312268 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.312375 4933 scope.go:117] "RemoveContainer" containerID="3fda0c6df071902d2252a8767a0a8984e245c44a9fa8472a37c0c54a0c09cbb4" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.324947 4933 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="21b201de-5a21-4492-ae8c-7cef39a89972" podUID="2b45d5fe-96da-4cf8-bdc7-986fc63c2071" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.357118 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.432903981 podStartE2EDuration="7.357098976s" podCreationTimestamp="2026-01-22 07:32:37 +0000 UTC" firstStartedPulling="2026-01-22 07:32:38.263394923 +0000 UTC m=+6406.100520276" lastFinishedPulling="2026-01-22 07:32:43.187589918 +0000 UTC m=+6411.024715271" observedRunningTime="2026-01-22 07:32:44.322343319 +0000 UTC m=+6412.159468692" watchObservedRunningTime="2026-01-22 07:32:44.357098976 +0000 UTC m=+6412.194224339" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.378782 4933 scope.go:117] "RemoveContainer" containerID="3fda0c6df071902d2252a8767a0a8984e245c44a9fa8472a37c0c54a0c09cbb4" Jan 22 07:32:44 crc kubenswrapper[4933]: E0122 07:32:44.379328 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3fda0c6df071902d2252a8767a0a8984e245c44a9fa8472a37c0c54a0c09cbb4\": container with ID starting with 3fda0c6df071902d2252a8767a0a8984e245c44a9fa8472a37c0c54a0c09cbb4 not found: ID does not exist" containerID="3fda0c6df071902d2252a8767a0a8984e245c44a9fa8472a37c0c54a0c09cbb4" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.379356 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3fda0c6df071902d2252a8767a0a8984e245c44a9fa8472a37c0c54a0c09cbb4"} err="failed to get container status \"3fda0c6df071902d2252a8767a0a8984e245c44a9fa8472a37c0c54a0c09cbb4\": rpc error: code = NotFound desc = could not find container \"3fda0c6df071902d2252a8767a0a8984e245c44a9fa8472a37c0c54a0c09cbb4\": container with ID starting with 3fda0c6df071902d2252a8767a0a8984e245c44a9fa8472a37c0c54a0c09cbb4 not found: ID does not exist" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.431253 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21b201de-5a21-4492-ae8c-7cef39a89972-combined-ca-bundle\") pod \"21b201de-5a21-4492-ae8c-7cef39a89972\" (UID: \"21b201de-5a21-4492-ae8c-7cef39a89972\") " Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.432391 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/21b201de-5a21-4492-ae8c-7cef39a89972-openstack-config\") pod \"21b201de-5a21-4492-ae8c-7cef39a89972\" (UID: \"21b201de-5a21-4492-ae8c-7cef39a89972\") " Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.432515 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/21b201de-5a21-4492-ae8c-7cef39a89972-openstack-config-secret\") pod \"21b201de-5a21-4492-ae8c-7cef39a89972\" (UID: \"21b201de-5a21-4492-ae8c-7cef39a89972\") " Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.432675 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sq8gx\" (UniqueName: \"kubernetes.io/projected/21b201de-5a21-4492-ae8c-7cef39a89972-kube-api-access-sq8gx\") pod \"21b201de-5a21-4492-ae8c-7cef39a89972\" (UID: \"21b201de-5a21-4492-ae8c-7cef39a89972\") " Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.441755 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21b201de-5a21-4492-ae8c-7cef39a89972-kube-api-access-sq8gx" (OuterVolumeSpecName: "kube-api-access-sq8gx") pod "21b201de-5a21-4492-ae8c-7cef39a89972" (UID: "21b201de-5a21-4492-ae8c-7cef39a89972"). InnerVolumeSpecName "kube-api-access-sq8gx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.483126 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/21b201de-5a21-4492-ae8c-7cef39a89972-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "21b201de-5a21-4492-ae8c-7cef39a89972" (UID: "21b201de-5a21-4492-ae8c-7cef39a89972"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.490551 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21b201de-5a21-4492-ae8c-7cef39a89972-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "21b201de-5a21-4492-ae8c-7cef39a89972" (UID: "21b201de-5a21-4492-ae8c-7cef39a89972"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.535869 4933 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/21b201de-5a21-4492-ae8c-7cef39a89972-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.536097 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sq8gx\" (UniqueName: \"kubernetes.io/projected/21b201de-5a21-4492-ae8c-7cef39a89972-kube-api-access-sq8gx\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.536275 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21b201de-5a21-4492-ae8c-7cef39a89972-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.559525 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21b201de-5a21-4492-ae8c-7cef39a89972-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "21b201de-5a21-4492-ae8c-7cef39a89972" (UID: "21b201de-5a21-4492-ae8c-7cef39a89972"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.639655 4933 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/21b201de-5a21-4492-ae8c-7cef39a89972-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.693135 4933 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="21b201de-5a21-4492-ae8c-7cef39a89972" podUID="2b45d5fe-96da-4cf8-bdc7-986fc63c2071" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.693316 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.742141 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-thanos-prometheus-http-client-file\") pod \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.742199 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-config-out\") pod \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.742235 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-web-config\") pod \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.742280 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-prometheus-metric-storage-rulefiles-1\") pod \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.742349 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-tls-assets\") pod \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.742579 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\") pod \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.742686 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-prometheus-metric-storage-rulefiles-2\") pod \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.742741 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-config\") pod \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.742852 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbr2m\" (UniqueName: \"kubernetes.io/projected/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-kube-api-access-dbr2m\") pod \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.742957 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-prometheus-metric-storage-rulefiles-0\") pod \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\" (UID: \"7856abfd-5a56-4dd4-9be1-21f176ffe4b6\") " Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.743997 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "7856abfd-5a56-4dd4-9be1-21f176ffe4b6" (UID: "7856abfd-5a56-4dd4-9be1-21f176ffe4b6"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.746628 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "7856abfd-5a56-4dd4-9be1-21f176ffe4b6" (UID: "7856abfd-5a56-4dd4-9be1-21f176ffe4b6"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.747991 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "7856abfd-5a56-4dd4-9be1-21f176ffe4b6" (UID: "7856abfd-5a56-4dd4-9be1-21f176ffe4b6"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.751138 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "7856abfd-5a56-4dd4-9be1-21f176ffe4b6" (UID: "7856abfd-5a56-4dd4-9be1-21f176ffe4b6"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.751159 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "7856abfd-5a56-4dd4-9be1-21f176ffe4b6" (UID: "7856abfd-5a56-4dd4-9be1-21f176ffe4b6"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.757232 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-kube-api-access-dbr2m" (OuterVolumeSpecName: "kube-api-access-dbr2m") pod "7856abfd-5a56-4dd4-9be1-21f176ffe4b6" (UID: "7856abfd-5a56-4dd4-9be1-21f176ffe4b6"). InnerVolumeSpecName "kube-api-access-dbr2m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.775263 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-config-out" (OuterVolumeSpecName: "config-out") pod "7856abfd-5a56-4dd4-9be1-21f176ffe4b6" (UID: "7856abfd-5a56-4dd4-9be1-21f176ffe4b6"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.778313 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-config" (OuterVolumeSpecName: "config") pod "7856abfd-5a56-4dd4-9be1-21f176ffe4b6" (UID: "7856abfd-5a56-4dd4-9be1-21f176ffe4b6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.816711 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-web-config" (OuterVolumeSpecName: "web-config") pod "7856abfd-5a56-4dd4-9be1-21f176ffe4b6" (UID: "7856abfd-5a56-4dd4-9be1-21f176ffe4b6"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.846003 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.846035 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbr2m\" (UniqueName: \"kubernetes.io/projected/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-kube-api-access-dbr2m\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.846046 4933 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.846059 4933 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.846069 4933 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-config-out\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.846094 4933 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-web-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.846105 4933 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.846125 4933 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-tls-assets\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.846138 4933 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/7856abfd-5a56-4dd4-9be1-21f176ffe4b6-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.855888 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "7856abfd-5a56-4dd4-9be1-21f176ffe4b6" (UID: "7856abfd-5a56-4dd4-9be1-21f176ffe4b6"). InnerVolumeSpecName "pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.908914 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vzrp7"] Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.947880 4933 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\") on node \"crc\" " Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.979854 4933 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 22 07:32:44 crc kubenswrapper[4933]: I0122 07:32:44.980032 4933 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b") on node "crc" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.049393 4933 reconciler_common.go:293] "Volume detached for volume \"pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.321909 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7856abfd-5a56-4dd4-9be1-21f176ffe4b6","Type":"ContainerDied","Data":"9533febaeefb505052cb85a9e676d4c88f4c5af8da74a516ff1982361113fcde"} Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.321971 4933 scope.go:117] "RemoveContainer" containerID="a6236b947f9aeb5b29f8f41c2ae54b3bfae2777e7bfa0a4ee30b4fb292cee1db" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.322149 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.327412 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vzrp7" podUID="40716a4c-9462-42af-9aea-1e01e79ceeb2" containerName="registry-server" containerID="cri-o://0e4643db2046516b294768cd2db4fc2fd773b5d18025ec88dbb912bf614dd3a9" gracePeriod=2 Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.351158 4933 scope.go:117] "RemoveContainer" containerID="7a8f2c60856a03c115e036817a37d4e6d0c0efe44270ce1cae8841a995f06405" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.372138 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.386730 4933 scope.go:117] "RemoveContainer" containerID="73d7bbc62427eddff6c3304f7853b72aae4a1adbadb05536d46a20a64eab5dd1" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.391740 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.424589 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 22 07:32:45 crc kubenswrapper[4933]: E0122 07:32:45.429184 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" containerName="init-config-reloader" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.429227 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" containerName="init-config-reloader" Jan 22 07:32:45 crc kubenswrapper[4933]: E0122 07:32:45.429248 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" containerName="prometheus" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.429257 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" containerName="prometheus" Jan 22 07:32:45 crc kubenswrapper[4933]: E0122 07:32:45.429338 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" containerName="thanos-sidecar" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.429348 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" containerName="thanos-sidecar" Jan 22 07:32:45 crc kubenswrapper[4933]: E0122 07:32:45.429367 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" containerName="config-reloader" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.429375 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" containerName="config-reloader" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.429755 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" containerName="config-reloader" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.429781 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" containerName="prometheus" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.429795 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" containerName="thanos-sidecar" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.434028 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.436855 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.437488 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.437655 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.437772 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-8xvpr" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.437869 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.437978 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.438121 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.443949 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.447283 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.460500 4933 scope.go:117] "RemoveContainer" containerID="3b904712a2c4325e777a7ddca16a1f4aa3aec58706a8508077b526246b3cef87" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.466348 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.466390 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-config\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.466445 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.466482 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.466532 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.466552 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.466569 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.466589 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.466615 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.466636 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.466658 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.466686 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.466720 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6wxm\" (UniqueName: \"kubernetes.io/projected/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-kube-api-access-l6wxm\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.489634 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.569677 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.569717 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.569745 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.569765 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.569806 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6wxm\" (UniqueName: \"kubernetes.io/projected/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-kube-api-access-l6wxm\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.569859 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.569887 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-config\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.569942 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.569988 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.570042 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.570065 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.570101 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.570129 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.575438 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.578504 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.578873 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.579996 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.580213 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.580278 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.580862 4933 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.580892 4933 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/48f41e36b639b6ad7130d50b4930f9463389b7725003e1b861dcccda0a87916e/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.581512 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.590547 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-config\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.599987 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.600363 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.604037 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6wxm\" (UniqueName: \"kubernetes.io/projected/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-kube-api-access-l6wxm\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.604452 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/92749fdb-fe24-4dcd-ba3a-bf8a89509f23-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.677979 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-66fa1f70-96e8-49d8-b7b8-810dd338947b\") pod \"prometheus-metric-storage-0\" (UID: \"92749fdb-fe24-4dcd-ba3a-bf8a89509f23\") " pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:45 crc kubenswrapper[4933]: I0122 07:32:45.817965 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 22 07:32:46 crc kubenswrapper[4933]: I0122 07:32:46.361494 4933 generic.go:334] "Generic (PLEG): container finished" podID="40716a4c-9462-42af-9aea-1e01e79ceeb2" containerID="0e4643db2046516b294768cd2db4fc2fd773b5d18025ec88dbb912bf614dd3a9" exitCode=0 Jan 22 07:32:46 crc kubenswrapper[4933]: I0122 07:32:46.362020 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzrp7" event={"ID":"40716a4c-9462-42af-9aea-1e01e79ceeb2","Type":"ContainerDied","Data":"0e4643db2046516b294768cd2db4fc2fd773b5d18025ec88dbb912bf614dd3a9"} Jan 22 07:32:46 crc kubenswrapper[4933]: I0122 07:32:46.377815 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 22 07:32:46 crc kubenswrapper[4933]: I0122 07:32:46.385865 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vzrp7" Jan 22 07:32:46 crc kubenswrapper[4933]: W0122 07:32:46.429758 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod92749fdb_fe24_4dcd_ba3a_bf8a89509f23.slice/crio-c4b0dd122766268758d809c80ae48015c1f6aad9055cf8630141683d9ec543b0 WatchSource:0}: Error finding container c4b0dd122766268758d809c80ae48015c1f6aad9055cf8630141683d9ec543b0: Status 404 returned error can't find the container with id c4b0dd122766268758d809c80ae48015c1f6aad9055cf8630141683d9ec543b0 Jan 22 07:32:46 crc kubenswrapper[4933]: I0122 07:32:46.534599 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40716a4c-9462-42af-9aea-1e01e79ceeb2-catalog-content\") pod \"40716a4c-9462-42af-9aea-1e01e79ceeb2\" (UID: \"40716a4c-9462-42af-9aea-1e01e79ceeb2\") " Jan 22 07:32:46 crc kubenswrapper[4933]: I0122 07:32:46.534712 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40716a4c-9462-42af-9aea-1e01e79ceeb2-utilities\") pod \"40716a4c-9462-42af-9aea-1e01e79ceeb2\" (UID: \"40716a4c-9462-42af-9aea-1e01e79ceeb2\") " Jan 22 07:32:46 crc kubenswrapper[4933]: I0122 07:32:46.534820 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77wxt\" (UniqueName: \"kubernetes.io/projected/40716a4c-9462-42af-9aea-1e01e79ceeb2-kube-api-access-77wxt\") pod \"40716a4c-9462-42af-9aea-1e01e79ceeb2\" (UID: \"40716a4c-9462-42af-9aea-1e01e79ceeb2\") " Jan 22 07:32:46 crc kubenswrapper[4933]: I0122 07:32:46.536259 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40716a4c-9462-42af-9aea-1e01e79ceeb2-utilities" (OuterVolumeSpecName: "utilities") pod "40716a4c-9462-42af-9aea-1e01e79ceeb2" (UID: "40716a4c-9462-42af-9aea-1e01e79ceeb2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:32:46 crc kubenswrapper[4933]: I0122 07:32:46.543559 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40716a4c-9462-42af-9aea-1e01e79ceeb2-kube-api-access-77wxt" (OuterVolumeSpecName: "kube-api-access-77wxt") pod "40716a4c-9462-42af-9aea-1e01e79ceeb2" (UID: "40716a4c-9462-42af-9aea-1e01e79ceeb2"). InnerVolumeSpecName "kube-api-access-77wxt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:32:46 crc kubenswrapper[4933]: I0122 07:32:46.544947 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40716a4c-9462-42af-9aea-1e01e79ceeb2-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:46 crc kubenswrapper[4933]: I0122 07:32:46.576290 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21b201de-5a21-4492-ae8c-7cef39a89972" path="/var/lib/kubelet/pods/21b201de-5a21-4492-ae8c-7cef39a89972/volumes" Jan 22 07:32:46 crc kubenswrapper[4933]: I0122 07:32:46.578177 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" path="/var/lib/kubelet/pods/7856abfd-5a56-4dd4-9be1-21f176ffe4b6/volumes" Jan 22 07:32:46 crc kubenswrapper[4933]: I0122 07:32:46.646540 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77wxt\" (UniqueName: \"kubernetes.io/projected/40716a4c-9462-42af-9aea-1e01e79ceeb2-kube-api-access-77wxt\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:46 crc kubenswrapper[4933]: I0122 07:32:46.693557 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40716a4c-9462-42af-9aea-1e01e79ceeb2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "40716a4c-9462-42af-9aea-1e01e79ceeb2" (UID: "40716a4c-9462-42af-9aea-1e01e79ceeb2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:32:46 crc kubenswrapper[4933]: I0122 07:32:46.749195 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40716a4c-9462-42af-9aea-1e01e79ceeb2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:47 crc kubenswrapper[4933]: I0122 07:32:47.373408 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"92749fdb-fe24-4dcd-ba3a-bf8a89509f23","Type":"ContainerStarted","Data":"c4b0dd122766268758d809c80ae48015c1f6aad9055cf8630141683d9ec543b0"} Jan 22 07:32:47 crc kubenswrapper[4933]: I0122 07:32:47.376185 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vzrp7" event={"ID":"40716a4c-9462-42af-9aea-1e01e79ceeb2","Type":"ContainerDied","Data":"3aef5953e71b53be4e5679f5d55fd90a56d45b14cf829120df957e971f3520fa"} Jan 22 07:32:47 crc kubenswrapper[4933]: I0122 07:32:47.376221 4933 scope.go:117] "RemoveContainer" containerID="0e4643db2046516b294768cd2db4fc2fd773b5d18025ec88dbb912bf614dd3a9" Jan 22 07:32:47 crc kubenswrapper[4933]: I0122 07:32:47.376289 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vzrp7" Jan 22 07:32:47 crc kubenswrapper[4933]: I0122 07:32:47.411279 4933 scope.go:117] "RemoveContainer" containerID="f1305a14d669dd6ed9d8fe7d706166f6296fb138ccd592de6c235c2eeba46bef" Jan 22 07:32:47 crc kubenswrapper[4933]: I0122 07:32:47.419149 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vzrp7"] Jan 22 07:32:47 crc kubenswrapper[4933]: I0122 07:32:47.428703 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vzrp7"] Jan 22 07:32:47 crc kubenswrapper[4933]: I0122 07:32:47.446522 4933 scope.go:117] "RemoveContainer" containerID="9ef81824d74ab4c9689f3150a27d369dbb3a3e60248e80ecb2bef2507796ed06" Jan 22 07:32:47 crc kubenswrapper[4933]: I0122 07:32:47.529778 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="7856abfd-5a56-4dd4-9be1-21f176ffe4b6" containerName="prometheus" probeResult="failure" output="Get \"http://10.217.1.145:9090/-/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 07:32:48 crc kubenswrapper[4933]: I0122 07:32:48.505189 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40716a4c-9462-42af-9aea-1e01e79ceeb2" path="/var/lib/kubelet/pods/40716a4c-9462-42af-9aea-1e01e79ceeb2/volumes" Jan 22 07:32:48 crc kubenswrapper[4933]: I0122 07:32:48.863054 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-kz4qv"] Jan 22 07:32:48 crc kubenswrapper[4933]: E0122 07:32:48.863805 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40716a4c-9462-42af-9aea-1e01e79ceeb2" containerName="registry-server" Jan 22 07:32:48 crc kubenswrapper[4933]: I0122 07:32:48.863823 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="40716a4c-9462-42af-9aea-1e01e79ceeb2" containerName="registry-server" Jan 22 07:32:48 crc kubenswrapper[4933]: E0122 07:32:48.863847 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40716a4c-9462-42af-9aea-1e01e79ceeb2" containerName="extract-utilities" Jan 22 07:32:48 crc kubenswrapper[4933]: I0122 07:32:48.863854 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="40716a4c-9462-42af-9aea-1e01e79ceeb2" containerName="extract-utilities" Jan 22 07:32:48 crc kubenswrapper[4933]: E0122 07:32:48.863893 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40716a4c-9462-42af-9aea-1e01e79ceeb2" containerName="extract-content" Jan 22 07:32:48 crc kubenswrapper[4933]: I0122 07:32:48.863899 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="40716a4c-9462-42af-9aea-1e01e79ceeb2" containerName="extract-content" Jan 22 07:32:48 crc kubenswrapper[4933]: I0122 07:32:48.864089 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="40716a4c-9462-42af-9aea-1e01e79ceeb2" containerName="registry-server" Jan 22 07:32:48 crc kubenswrapper[4933]: I0122 07:32:48.864821 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-kz4qv" Jan 22 07:32:48 crc kubenswrapper[4933]: I0122 07:32:48.894977 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-7181-account-create-update-st567"] Jan 22 07:32:48 crc kubenswrapper[4933]: I0122 07:32:48.899540 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-7181-account-create-update-st567" Jan 22 07:32:48 crc kubenswrapper[4933]: I0122 07:32:48.904304 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Jan 22 07:32:48 crc kubenswrapper[4933]: I0122 07:32:48.920972 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae2234cf-74af-4918-b1c3-8f646fdcc109-operator-scripts\") pod \"aodh-7181-account-create-update-st567\" (UID: \"ae2234cf-74af-4918-b1c3-8f646fdcc109\") " pod="openstack/aodh-7181-account-create-update-st567" Jan 22 07:32:48 crc kubenswrapper[4933]: I0122 07:32:48.921018 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wt6sm\" (UniqueName: \"kubernetes.io/projected/ae2234cf-74af-4918-b1c3-8f646fdcc109-kube-api-access-wt6sm\") pod \"aodh-7181-account-create-update-st567\" (UID: \"ae2234cf-74af-4918-b1c3-8f646fdcc109\") " pod="openstack/aodh-7181-account-create-update-st567" Jan 22 07:32:48 crc kubenswrapper[4933]: I0122 07:32:48.921057 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a4cdab7-6e8c-4777-b870-e1cae08c72de-operator-scripts\") pod \"aodh-db-create-kz4qv\" (UID: \"8a4cdab7-6e8c-4777-b870-e1cae08c72de\") " pod="openstack/aodh-db-create-kz4qv" Jan 22 07:32:48 crc kubenswrapper[4933]: I0122 07:32:48.921115 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpd6z\" (UniqueName: \"kubernetes.io/projected/8a4cdab7-6e8c-4777-b870-e1cae08c72de-kube-api-access-bpd6z\") pod \"aodh-db-create-kz4qv\" (UID: \"8a4cdab7-6e8c-4777-b870-e1cae08c72de\") " pod="openstack/aodh-db-create-kz4qv" Jan 22 07:32:48 crc kubenswrapper[4933]: I0122 07:32:48.929534 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-kz4qv"] Jan 22 07:32:48 crc kubenswrapper[4933]: I0122 07:32:48.948970 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-7181-account-create-update-st567"] Jan 22 07:32:49 crc kubenswrapper[4933]: I0122 07:32:49.023654 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a4cdab7-6e8c-4777-b870-e1cae08c72de-operator-scripts\") pod \"aodh-db-create-kz4qv\" (UID: \"8a4cdab7-6e8c-4777-b870-e1cae08c72de\") " pod="openstack/aodh-db-create-kz4qv" Jan 22 07:32:49 crc kubenswrapper[4933]: I0122 07:32:49.023749 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bpd6z\" (UniqueName: \"kubernetes.io/projected/8a4cdab7-6e8c-4777-b870-e1cae08c72de-kube-api-access-bpd6z\") pod \"aodh-db-create-kz4qv\" (UID: \"8a4cdab7-6e8c-4777-b870-e1cae08c72de\") " pod="openstack/aodh-db-create-kz4qv" Jan 22 07:32:49 crc kubenswrapper[4933]: I0122 07:32:49.023979 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae2234cf-74af-4918-b1c3-8f646fdcc109-operator-scripts\") pod \"aodh-7181-account-create-update-st567\" (UID: \"ae2234cf-74af-4918-b1c3-8f646fdcc109\") " pod="openstack/aodh-7181-account-create-update-st567" Jan 22 07:32:49 crc kubenswrapper[4933]: I0122 07:32:49.024026 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wt6sm\" (UniqueName: \"kubernetes.io/projected/ae2234cf-74af-4918-b1c3-8f646fdcc109-kube-api-access-wt6sm\") pod \"aodh-7181-account-create-update-st567\" (UID: \"ae2234cf-74af-4918-b1c3-8f646fdcc109\") " pod="openstack/aodh-7181-account-create-update-st567" Jan 22 07:32:49 crc kubenswrapper[4933]: I0122 07:32:49.024783 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae2234cf-74af-4918-b1c3-8f646fdcc109-operator-scripts\") pod \"aodh-7181-account-create-update-st567\" (UID: \"ae2234cf-74af-4918-b1c3-8f646fdcc109\") " pod="openstack/aodh-7181-account-create-update-st567" Jan 22 07:32:49 crc kubenswrapper[4933]: I0122 07:32:49.024834 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a4cdab7-6e8c-4777-b870-e1cae08c72de-operator-scripts\") pod \"aodh-db-create-kz4qv\" (UID: \"8a4cdab7-6e8c-4777-b870-e1cae08c72de\") " pod="openstack/aodh-db-create-kz4qv" Jan 22 07:32:49 crc kubenswrapper[4933]: I0122 07:32:49.043210 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-88n4d"] Jan 22 07:32:49 crc kubenswrapper[4933]: I0122 07:32:49.050479 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bpd6z\" (UniqueName: \"kubernetes.io/projected/8a4cdab7-6e8c-4777-b870-e1cae08c72de-kube-api-access-bpd6z\") pod \"aodh-db-create-kz4qv\" (UID: \"8a4cdab7-6e8c-4777-b870-e1cae08c72de\") " pod="openstack/aodh-db-create-kz4qv" Jan 22 07:32:49 crc kubenswrapper[4933]: I0122 07:32:49.055702 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-88n4d"] Jan 22 07:32:49 crc kubenswrapper[4933]: I0122 07:32:49.059121 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wt6sm\" (UniqueName: \"kubernetes.io/projected/ae2234cf-74af-4918-b1c3-8f646fdcc109-kube-api-access-wt6sm\") pod \"aodh-7181-account-create-update-st567\" (UID: \"ae2234cf-74af-4918-b1c3-8f646fdcc109\") " pod="openstack/aodh-7181-account-create-update-st567" Jan 22 07:32:49 crc kubenswrapper[4933]: I0122 07:32:49.186278 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-kz4qv" Jan 22 07:32:49 crc kubenswrapper[4933]: I0122 07:32:49.224848 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-7181-account-create-update-st567" Jan 22 07:32:49 crc kubenswrapper[4933]: I0122 07:32:49.988365 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-7181-account-create-update-st567"] Jan 22 07:32:50 crc kubenswrapper[4933]: I0122 07:32:50.038536 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-kz4qv"] Jan 22 07:32:50 crc kubenswrapper[4933]: I0122 07:32:50.052985 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-51cb-account-create-update-4r7vn"] Jan 22 07:32:50 crc kubenswrapper[4933]: I0122 07:32:50.103496 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-a924-account-create-update-z6hhh"] Jan 22 07:32:50 crc kubenswrapper[4933]: I0122 07:32:50.116535 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-51cb-account-create-update-4r7vn"] Jan 22 07:32:50 crc kubenswrapper[4933]: I0122 07:32:50.125836 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-a924-account-create-update-z6hhh"] Jan 22 07:32:50 crc kubenswrapper[4933]: I0122 07:32:50.137815 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-pzr6g"] Jan 22 07:32:50 crc kubenswrapper[4933]: I0122 07:32:50.150212 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-pzr6g"] Jan 22 07:32:50 crc kubenswrapper[4933]: I0122 07:32:50.410873 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-kz4qv" event={"ID":"8a4cdab7-6e8c-4777-b870-e1cae08c72de","Type":"ContainerStarted","Data":"869188a58887e3b5bf7334224bff7628bc1e1935d3e3ee1a0503451bf3f13a96"} Jan 22 07:32:50 crc kubenswrapper[4933]: I0122 07:32:50.411224 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-kz4qv" event={"ID":"8a4cdab7-6e8c-4777-b870-e1cae08c72de","Type":"ContainerStarted","Data":"2891a20375057cb5e51c31b92a086acc8a871fcdaa9ebb95442190d5f4a81ad1"} Jan 22 07:32:50 crc kubenswrapper[4933]: I0122 07:32:50.412935 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"92749fdb-fe24-4dcd-ba3a-bf8a89509f23","Type":"ContainerStarted","Data":"82a93e9d6dc8510651669a7fa32cc4a91b8c446fc3abfe6aad3537f86a3fa486"} Jan 22 07:32:50 crc kubenswrapper[4933]: I0122 07:32:50.415180 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-7181-account-create-update-st567" event={"ID":"ae2234cf-74af-4918-b1c3-8f646fdcc109","Type":"ContainerStarted","Data":"8384f88093bf041973e2da4e7bbe48d650b4cf84598e6a10423080f995e95909"} Jan 22 07:32:50 crc kubenswrapper[4933]: I0122 07:32:50.415213 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-7181-account-create-update-st567" event={"ID":"ae2234cf-74af-4918-b1c3-8f646fdcc109","Type":"ContainerStarted","Data":"09c615c7aaa03999e247d9f6032e92828f81edc3d464f73c45b8ef669c762c4d"} Jan 22 07:32:50 crc kubenswrapper[4933]: I0122 07:32:50.431196 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-create-kz4qv" podStartSLOduration=2.4311809220000002 podStartE2EDuration="2.431180922s" podCreationTimestamp="2026-01-22 07:32:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:32:50.426412756 +0000 UTC m=+6418.263538119" watchObservedRunningTime="2026-01-22 07:32:50.431180922 +0000 UTC m=+6418.268306265" Jan 22 07:32:50 crc kubenswrapper[4933]: I0122 07:32:50.444670 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-7181-account-create-update-st567" podStartSLOduration=2.44465057 podStartE2EDuration="2.44465057s" podCreationTimestamp="2026-01-22 07:32:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:32:50.440092659 +0000 UTC m=+6418.277218012" watchObservedRunningTime="2026-01-22 07:32:50.44465057 +0000 UTC m=+6418.281775923" Jan 22 07:32:50 crc kubenswrapper[4933]: I0122 07:32:50.501856 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76c29d0a-c2e2-466b-a3e4-6e26ab04d57c" path="/var/lib/kubelet/pods/76c29d0a-c2e2-466b-a3e4-6e26ab04d57c/volumes" Jan 22 07:32:50 crc kubenswrapper[4933]: I0122 07:32:50.502560 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b02e2932-3602-40b2-b58d-f4c0f3384ad2" path="/var/lib/kubelet/pods/b02e2932-3602-40b2-b58d-f4c0f3384ad2/volumes" Jan 22 07:32:50 crc kubenswrapper[4933]: I0122 07:32:50.503342 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5263403-5544-4431-a713-f43156c25601" path="/var/lib/kubelet/pods/b5263403-5544-4431-a713-f43156c25601/volumes" Jan 22 07:32:50 crc kubenswrapper[4933]: I0122 07:32:50.503937 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec4d3e32-6483-4056-aa0c-fe34326a6c07" path="/var/lib/kubelet/pods/ec4d3e32-6483-4056-aa0c-fe34326a6c07/volumes" Jan 22 07:32:51 crc kubenswrapper[4933]: I0122 07:32:51.029213 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-pmf24"] Jan 22 07:32:51 crc kubenswrapper[4933]: I0122 07:32:51.039055 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-a894-account-create-update-kmtkp"] Jan 22 07:32:51 crc kubenswrapper[4933]: I0122 07:32:51.050008 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-pmf24"] Jan 22 07:32:51 crc kubenswrapper[4933]: I0122 07:32:51.059350 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-a894-account-create-update-kmtkp"] Jan 22 07:32:51 crc kubenswrapper[4933]: I0122 07:32:51.430303 4933 generic.go:334] "Generic (PLEG): container finished" podID="8a4cdab7-6e8c-4777-b870-e1cae08c72de" containerID="869188a58887e3b5bf7334224bff7628bc1e1935d3e3ee1a0503451bf3f13a96" exitCode=0 Jan 22 07:32:51 crc kubenswrapper[4933]: I0122 07:32:51.430660 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-kz4qv" event={"ID":"8a4cdab7-6e8c-4777-b870-e1cae08c72de","Type":"ContainerDied","Data":"869188a58887e3b5bf7334224bff7628bc1e1935d3e3ee1a0503451bf3f13a96"} Jan 22 07:32:51 crc kubenswrapper[4933]: I0122 07:32:51.434245 4933 generic.go:334] "Generic (PLEG): container finished" podID="ae2234cf-74af-4918-b1c3-8f646fdcc109" containerID="8384f88093bf041973e2da4e7bbe48d650b4cf84598e6a10423080f995e95909" exitCode=0 Jan 22 07:32:51 crc kubenswrapper[4933]: I0122 07:32:51.434319 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-7181-account-create-update-st567" event={"ID":"ae2234cf-74af-4918-b1c3-8f646fdcc109","Type":"ContainerDied","Data":"8384f88093bf041973e2da4e7bbe48d650b4cf84598e6a10423080f995e95909"} Jan 22 07:32:52 crc kubenswrapper[4933]: I0122 07:32:52.520754 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928" path="/var/lib/kubelet/pods/0f5c88e1-9441-4f5b-a7d1-a0b90ad0b928/volumes" Jan 22 07:32:52 crc kubenswrapper[4933]: I0122 07:32:52.524994 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5" path="/var/lib/kubelet/pods/12a6bc5b-33ea-4358-ab8b-17ca45d4e6e5/volumes" Jan 22 07:32:52 crc kubenswrapper[4933]: I0122 07:32:52.963419 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-kz4qv" Jan 22 07:32:52 crc kubenswrapper[4933]: I0122 07:32:52.975873 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-7181-account-create-update-st567" Jan 22 07:32:53 crc kubenswrapper[4933]: I0122 07:32:53.118149 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wt6sm\" (UniqueName: \"kubernetes.io/projected/ae2234cf-74af-4918-b1c3-8f646fdcc109-kube-api-access-wt6sm\") pod \"ae2234cf-74af-4918-b1c3-8f646fdcc109\" (UID: \"ae2234cf-74af-4918-b1c3-8f646fdcc109\") " Jan 22 07:32:53 crc kubenswrapper[4933]: I0122 07:32:53.118412 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae2234cf-74af-4918-b1c3-8f646fdcc109-operator-scripts\") pod \"ae2234cf-74af-4918-b1c3-8f646fdcc109\" (UID: \"ae2234cf-74af-4918-b1c3-8f646fdcc109\") " Jan 22 07:32:53 crc kubenswrapper[4933]: I0122 07:32:53.118459 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a4cdab7-6e8c-4777-b870-e1cae08c72de-operator-scripts\") pod \"8a4cdab7-6e8c-4777-b870-e1cae08c72de\" (UID: \"8a4cdab7-6e8c-4777-b870-e1cae08c72de\") " Jan 22 07:32:53 crc kubenswrapper[4933]: I0122 07:32:53.118485 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bpd6z\" (UniqueName: \"kubernetes.io/projected/8a4cdab7-6e8c-4777-b870-e1cae08c72de-kube-api-access-bpd6z\") pod \"8a4cdab7-6e8c-4777-b870-e1cae08c72de\" (UID: \"8a4cdab7-6e8c-4777-b870-e1cae08c72de\") " Jan 22 07:32:53 crc kubenswrapper[4933]: I0122 07:32:53.118940 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae2234cf-74af-4918-b1c3-8f646fdcc109-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ae2234cf-74af-4918-b1c3-8f646fdcc109" (UID: "ae2234cf-74af-4918-b1c3-8f646fdcc109"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:32:53 crc kubenswrapper[4933]: I0122 07:32:53.119016 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a4cdab7-6e8c-4777-b870-e1cae08c72de-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8a4cdab7-6e8c-4777-b870-e1cae08c72de" (UID: "8a4cdab7-6e8c-4777-b870-e1cae08c72de"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:32:53 crc kubenswrapper[4933]: I0122 07:32:53.123654 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae2234cf-74af-4918-b1c3-8f646fdcc109-kube-api-access-wt6sm" (OuterVolumeSpecName: "kube-api-access-wt6sm") pod "ae2234cf-74af-4918-b1c3-8f646fdcc109" (UID: "ae2234cf-74af-4918-b1c3-8f646fdcc109"). InnerVolumeSpecName "kube-api-access-wt6sm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:32:53 crc kubenswrapper[4933]: I0122 07:32:53.124230 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a4cdab7-6e8c-4777-b870-e1cae08c72de-kube-api-access-bpd6z" (OuterVolumeSpecName: "kube-api-access-bpd6z") pod "8a4cdab7-6e8c-4777-b870-e1cae08c72de" (UID: "8a4cdab7-6e8c-4777-b870-e1cae08c72de"). InnerVolumeSpecName "kube-api-access-bpd6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:32:53 crc kubenswrapper[4933]: I0122 07:32:53.221331 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae2234cf-74af-4918-b1c3-8f646fdcc109-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:53 crc kubenswrapper[4933]: I0122 07:32:53.221381 4933 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a4cdab7-6e8c-4777-b870-e1cae08c72de-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:53 crc kubenswrapper[4933]: I0122 07:32:53.221396 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bpd6z\" (UniqueName: \"kubernetes.io/projected/8a4cdab7-6e8c-4777-b870-e1cae08c72de-kube-api-access-bpd6z\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:53 crc kubenswrapper[4933]: I0122 07:32:53.221409 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wt6sm\" (UniqueName: \"kubernetes.io/projected/ae2234cf-74af-4918-b1c3-8f646fdcc109-kube-api-access-wt6sm\") on node \"crc\" DevicePath \"\"" Jan 22 07:32:53 crc kubenswrapper[4933]: I0122 07:32:53.455065 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-kz4qv" event={"ID":"8a4cdab7-6e8c-4777-b870-e1cae08c72de","Type":"ContainerDied","Data":"2891a20375057cb5e51c31b92a086acc8a871fcdaa9ebb95442190d5f4a81ad1"} Jan 22 07:32:53 crc kubenswrapper[4933]: I0122 07:32:53.455135 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2891a20375057cb5e51c31b92a086acc8a871fcdaa9ebb95442190d5f4a81ad1" Jan 22 07:32:53 crc kubenswrapper[4933]: I0122 07:32:53.455093 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-kz4qv" Jan 22 07:32:53 crc kubenswrapper[4933]: I0122 07:32:53.456467 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-7181-account-create-update-st567" event={"ID":"ae2234cf-74af-4918-b1c3-8f646fdcc109","Type":"ContainerDied","Data":"09c615c7aaa03999e247d9f6032e92828f81edc3d464f73c45b8ef669c762c4d"} Jan 22 07:32:53 crc kubenswrapper[4933]: I0122 07:32:53.456503 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="09c615c7aaa03999e247d9f6032e92828f81edc3d464f73c45b8ef669c762c4d" Jan 22 07:32:53 crc kubenswrapper[4933]: I0122 07:32:53.456511 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-7181-account-create-update-st567" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.381840 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-wv6b2"] Jan 22 07:32:54 crc kubenswrapper[4933]: E0122 07:32:54.382677 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a4cdab7-6e8c-4777-b870-e1cae08c72de" containerName="mariadb-database-create" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.382692 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a4cdab7-6e8c-4777-b870-e1cae08c72de" containerName="mariadb-database-create" Jan 22 07:32:54 crc kubenswrapper[4933]: E0122 07:32:54.382725 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae2234cf-74af-4918-b1c3-8f646fdcc109" containerName="mariadb-account-create-update" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.382733 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae2234cf-74af-4918-b1c3-8f646fdcc109" containerName="mariadb-account-create-update" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.382969 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae2234cf-74af-4918-b1c3-8f646fdcc109" containerName="mariadb-account-create-update" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.382991 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a4cdab7-6e8c-4777-b870-e1cae08c72de" containerName="mariadb-database-create" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.383910 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-wv6b2" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.386810 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.386920 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-6cqxc" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.387504 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.387599 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.391316 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-wv6b2"] Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.546988 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-scripts\") pod \"aodh-db-sync-wv6b2\" (UID: \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\") " pod="openstack/aodh-db-sync-wv6b2" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.547095 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-combined-ca-bundle\") pod \"aodh-db-sync-wv6b2\" (UID: \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\") " pod="openstack/aodh-db-sync-wv6b2" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.547145 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pm7t\" (UniqueName: \"kubernetes.io/projected/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-kube-api-access-7pm7t\") pod \"aodh-db-sync-wv6b2\" (UID: \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\") " pod="openstack/aodh-db-sync-wv6b2" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.547186 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-config-data\") pod \"aodh-db-sync-wv6b2\" (UID: \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\") " pod="openstack/aodh-db-sync-wv6b2" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.649649 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-scripts\") pod \"aodh-db-sync-wv6b2\" (UID: \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\") " pod="openstack/aodh-db-sync-wv6b2" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.649767 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-combined-ca-bundle\") pod \"aodh-db-sync-wv6b2\" (UID: \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\") " pod="openstack/aodh-db-sync-wv6b2" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.649805 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pm7t\" (UniqueName: \"kubernetes.io/projected/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-kube-api-access-7pm7t\") pod \"aodh-db-sync-wv6b2\" (UID: \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\") " pod="openstack/aodh-db-sync-wv6b2" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.649852 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-config-data\") pod \"aodh-db-sync-wv6b2\" (UID: \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\") " pod="openstack/aodh-db-sync-wv6b2" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.655283 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-combined-ca-bundle\") pod \"aodh-db-sync-wv6b2\" (UID: \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\") " pod="openstack/aodh-db-sync-wv6b2" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.655797 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-scripts\") pod \"aodh-db-sync-wv6b2\" (UID: \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\") " pod="openstack/aodh-db-sync-wv6b2" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.655836 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-config-data\") pod \"aodh-db-sync-wv6b2\" (UID: \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\") " pod="openstack/aodh-db-sync-wv6b2" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.672885 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pm7t\" (UniqueName: \"kubernetes.io/projected/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-kube-api-access-7pm7t\") pod \"aodh-db-sync-wv6b2\" (UID: \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\") " pod="openstack/aodh-db-sync-wv6b2" Jan 22 07:32:54 crc kubenswrapper[4933]: I0122 07:32:54.715215 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-wv6b2" Jan 22 07:32:55 crc kubenswrapper[4933]: I0122 07:32:55.262967 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-wv6b2"] Jan 22 07:32:55 crc kubenswrapper[4933]: W0122 07:32:55.269288 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc0c5e8a9_ecca_4ebd_91d9_b547bbf620b5.slice/crio-a98b415d21518f311e89947d35032cdf04076a1a8a4b53c199c3ae7e4437380f WatchSource:0}: Error finding container a98b415d21518f311e89947d35032cdf04076a1a8a4b53c199c3ae7e4437380f: Status 404 returned error can't find the container with id a98b415d21518f311e89947d35032cdf04076a1a8a4b53c199c3ae7e4437380f Jan 22 07:32:55 crc kubenswrapper[4933]: I0122 07:32:55.475578 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-wv6b2" event={"ID":"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5","Type":"ContainerStarted","Data":"a98b415d21518f311e89947d35032cdf04076a1a8a4b53c199c3ae7e4437380f"} Jan 22 07:32:56 crc kubenswrapper[4933]: I0122 07:32:56.489058 4933 generic.go:334] "Generic (PLEG): container finished" podID="92749fdb-fe24-4dcd-ba3a-bf8a89509f23" containerID="82a93e9d6dc8510651669a7fa32cc4a91b8c446fc3abfe6aad3537f86a3fa486" exitCode=0 Jan 22 07:32:56 crc kubenswrapper[4933]: I0122 07:32:56.489118 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"92749fdb-fe24-4dcd-ba3a-bf8a89509f23","Type":"ContainerDied","Data":"82a93e9d6dc8510651669a7fa32cc4a91b8c446fc3abfe6aad3537f86a3fa486"} Jan 22 07:32:57 crc kubenswrapper[4933]: I0122 07:32:57.502525 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"92749fdb-fe24-4dcd-ba3a-bf8a89509f23","Type":"ContainerStarted","Data":"d785f338e1a0494e65b1c0cf3ab994674a67f9890fe650c176c258817a9629f6"} Jan 22 07:32:59 crc kubenswrapper[4933]: I0122 07:32:59.523739 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-wv6b2" event={"ID":"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5","Type":"ContainerStarted","Data":"cd6acb4414ddd3c0defddf57390121c969fda38a6aa81a01024cebf1966c416d"} Jan 22 07:32:59 crc kubenswrapper[4933]: I0122 07:32:59.552640 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-wv6b2" podStartSLOduration=1.528069455 podStartE2EDuration="5.552619077s" podCreationTimestamp="2026-01-22 07:32:54 +0000 UTC" firstStartedPulling="2026-01-22 07:32:55.27098245 +0000 UTC m=+6423.108107803" lastFinishedPulling="2026-01-22 07:32:59.295532072 +0000 UTC m=+6427.132657425" observedRunningTime="2026-01-22 07:32:59.544898768 +0000 UTC m=+6427.382024121" watchObservedRunningTime="2026-01-22 07:32:59.552619077 +0000 UTC m=+6427.389744440" Jan 22 07:33:01 crc kubenswrapper[4933]: I0122 07:33:01.031646 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-7nzw9"] Jan 22 07:33:01 crc kubenswrapper[4933]: I0122 07:33:01.046422 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-7nzw9"] Jan 22 07:33:02 crc kubenswrapper[4933]: I0122 07:33:02.503433 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c3d1484-a1be-4965-96f9-3b0b1f7f83f0" path="/var/lib/kubelet/pods/1c3d1484-a1be-4965-96f9-3b0b1f7f83f0/volumes" Jan 22 07:33:02 crc kubenswrapper[4933]: I0122 07:33:02.554730 4933 generic.go:334] "Generic (PLEG): container finished" podID="c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5" containerID="cd6acb4414ddd3c0defddf57390121c969fda38a6aa81a01024cebf1966c416d" exitCode=0 Jan 22 07:33:02 crc kubenswrapper[4933]: I0122 07:33:02.554795 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-wv6b2" event={"ID":"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5","Type":"ContainerDied","Data":"cd6acb4414ddd3c0defddf57390121c969fda38a6aa81a01024cebf1966c416d"} Jan 22 07:33:03 crc kubenswrapper[4933]: I0122 07:33:03.567453 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"92749fdb-fe24-4dcd-ba3a-bf8a89509f23","Type":"ContainerStarted","Data":"4e32992fff47c55f3afa818351951d8c0f1ff5e5a30660ca0b06143b278dfea5"} Jan 22 07:33:03 crc kubenswrapper[4933]: I0122 07:33:03.567515 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"92749fdb-fe24-4dcd-ba3a-bf8a89509f23","Type":"ContainerStarted","Data":"8086ec035745f12c163dfadfae35889ff65df3cc9c1cf724b0d0c86234fe6d4a"} Jan 22 07:33:03 crc kubenswrapper[4933]: I0122 07:33:03.607864 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=18.607848866 podStartE2EDuration="18.607848866s" podCreationTimestamp="2026-01-22 07:32:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:33:03.600577799 +0000 UTC m=+6431.437703172" watchObservedRunningTime="2026-01-22 07:33:03.607848866 +0000 UTC m=+6431.444974219" Jan 22 07:33:04 crc kubenswrapper[4933]: I0122 07:33:04.003825 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-wv6b2" Jan 22 07:33:04 crc kubenswrapper[4933]: I0122 07:33:04.109471 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-scripts\") pod \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\" (UID: \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\") " Jan 22 07:33:04 crc kubenswrapper[4933]: I0122 07:33:04.109817 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-combined-ca-bundle\") pod \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\" (UID: \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\") " Jan 22 07:33:04 crc kubenswrapper[4933]: I0122 07:33:04.109905 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-config-data\") pod \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\" (UID: \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\") " Jan 22 07:33:04 crc kubenswrapper[4933]: I0122 07:33:04.109968 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7pm7t\" (UniqueName: \"kubernetes.io/projected/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-kube-api-access-7pm7t\") pod \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\" (UID: \"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5\") " Jan 22 07:33:04 crc kubenswrapper[4933]: I0122 07:33:04.118381 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-scripts" (OuterVolumeSpecName: "scripts") pod "c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5" (UID: "c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:33:04 crc kubenswrapper[4933]: I0122 07:33:04.118489 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-kube-api-access-7pm7t" (OuterVolumeSpecName: "kube-api-access-7pm7t") pod "c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5" (UID: "c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5"). InnerVolumeSpecName "kube-api-access-7pm7t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:33:04 crc kubenswrapper[4933]: I0122 07:33:04.141702 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-config-data" (OuterVolumeSpecName: "config-data") pod "c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5" (UID: "c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:33:04 crc kubenswrapper[4933]: I0122 07:33:04.149669 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5" (UID: "c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:33:04 crc kubenswrapper[4933]: I0122 07:33:04.212821 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:04 crc kubenswrapper[4933]: I0122 07:33:04.212862 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:04 crc kubenswrapper[4933]: I0122 07:33:04.212877 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:04 crc kubenswrapper[4933]: I0122 07:33:04.212890 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7pm7t\" (UniqueName: \"kubernetes.io/projected/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5-kube-api-access-7pm7t\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:04 crc kubenswrapper[4933]: I0122 07:33:04.581834 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-wv6b2" event={"ID":"c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5","Type":"ContainerDied","Data":"a98b415d21518f311e89947d35032cdf04076a1a8a4b53c199c3ae7e4437380f"} Jan 22 07:33:04 crc kubenswrapper[4933]: I0122 07:33:04.581878 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a98b415d21518f311e89947d35032cdf04076a1a8a4b53c199c3ae7e4437380f" Jan 22 07:33:04 crc kubenswrapper[4933]: I0122 07:33:04.581915 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-wv6b2" Jan 22 07:33:05 crc kubenswrapper[4933]: I0122 07:33:05.819555 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 22 07:33:07 crc kubenswrapper[4933]: I0122 07:33:07.041485 4933 scope.go:117] "RemoveContainer" containerID="38566bba7fe60f7004b85a512051ce0973fccf829f1c9e4c250b0c97c6064992" Jan 22 07:33:07 crc kubenswrapper[4933]: I0122 07:33:07.068419 4933 scope.go:117] "RemoveContainer" containerID="430cacf2b01c899fd6ac8fd6eb06a620fcdd91a01354deb4f8fad4294ad902b7" Jan 22 07:33:07 crc kubenswrapper[4933]: I0122 07:33:07.127987 4933 scope.go:117] "RemoveContainer" containerID="186fbf45413b6b9348b00354eb02c2ecbd9b71e08a6475e01f31b9ef86833b8e" Jan 22 07:33:07 crc kubenswrapper[4933]: I0122 07:33:07.178351 4933 scope.go:117] "RemoveContainer" containerID="fa241839c3a6bf08562dd861f23bd9436c5f15afbcda480f4631b959d2f1137f" Jan 22 07:33:07 crc kubenswrapper[4933]: I0122 07:33:07.218143 4933 scope.go:117] "RemoveContainer" containerID="ce8bb8126c7fa2d71339243af024c9e8ef779d059f1eb80f76566aca8417f6d2" Jan 22 07:33:07 crc kubenswrapper[4933]: I0122 07:33:07.263294 4933 scope.go:117] "RemoveContainer" containerID="14cfc24e318f731f9c159ebb3a4806af9209b2c87aac48f0257087412e0c29bf" Jan 22 07:33:07 crc kubenswrapper[4933]: I0122 07:33:07.322713 4933 scope.go:117] "RemoveContainer" containerID="f83c4f0ee0da98516b8642d22b8acef00175376dbe27f61fdabc4350ed34d7dd" Jan 22 07:33:07 crc kubenswrapper[4933]: I0122 07:33:07.834525 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 22 07:33:08 crc kubenswrapper[4933]: I0122 07:33:08.805431 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 22 07:33:08 crc kubenswrapper[4933]: E0122 07:33:08.806386 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5" containerName="aodh-db-sync" Jan 22 07:33:08 crc kubenswrapper[4933]: I0122 07:33:08.806405 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5" containerName="aodh-db-sync" Jan 22 07:33:08 crc kubenswrapper[4933]: I0122 07:33:08.806665 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5" containerName="aodh-db-sync" Jan 22 07:33:08 crc kubenswrapper[4933]: I0122 07:33:08.809120 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 22 07:33:08 crc kubenswrapper[4933]: I0122 07:33:08.812094 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 22 07:33:08 crc kubenswrapper[4933]: I0122 07:33:08.812354 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 22 07:33:08 crc kubenswrapper[4933]: I0122 07:33:08.812374 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-6cqxc" Jan 22 07:33:08 crc kubenswrapper[4933]: I0122 07:33:08.821955 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 22 07:33:08 crc kubenswrapper[4933]: I0122 07:33:08.919370 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fn5wv\" (UniqueName: \"kubernetes.io/projected/843005da-37de-4407-9798-876c6634ca54-kube-api-access-fn5wv\") pod \"aodh-0\" (UID: \"843005da-37de-4407-9798-876c6634ca54\") " pod="openstack/aodh-0" Jan 22 07:33:08 crc kubenswrapper[4933]: I0122 07:33:08.919486 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/843005da-37de-4407-9798-876c6634ca54-config-data\") pod \"aodh-0\" (UID: \"843005da-37de-4407-9798-876c6634ca54\") " pod="openstack/aodh-0" Jan 22 07:33:08 crc kubenswrapper[4933]: I0122 07:33:08.919566 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/843005da-37de-4407-9798-876c6634ca54-combined-ca-bundle\") pod \"aodh-0\" (UID: \"843005da-37de-4407-9798-876c6634ca54\") " pod="openstack/aodh-0" Jan 22 07:33:08 crc kubenswrapper[4933]: I0122 07:33:08.919695 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/843005da-37de-4407-9798-876c6634ca54-scripts\") pod \"aodh-0\" (UID: \"843005da-37de-4407-9798-876c6634ca54\") " pod="openstack/aodh-0" Jan 22 07:33:09 crc kubenswrapper[4933]: I0122 07:33:09.023154 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/843005da-37de-4407-9798-876c6634ca54-scripts\") pod \"aodh-0\" (UID: \"843005da-37de-4407-9798-876c6634ca54\") " pod="openstack/aodh-0" Jan 22 07:33:09 crc kubenswrapper[4933]: I0122 07:33:09.023292 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fn5wv\" (UniqueName: \"kubernetes.io/projected/843005da-37de-4407-9798-876c6634ca54-kube-api-access-fn5wv\") pod \"aodh-0\" (UID: \"843005da-37de-4407-9798-876c6634ca54\") " pod="openstack/aodh-0" Jan 22 07:33:09 crc kubenswrapper[4933]: I0122 07:33:09.023329 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/843005da-37de-4407-9798-876c6634ca54-config-data\") pod \"aodh-0\" (UID: \"843005da-37de-4407-9798-876c6634ca54\") " pod="openstack/aodh-0" Jan 22 07:33:09 crc kubenswrapper[4933]: I0122 07:33:09.023368 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/843005da-37de-4407-9798-876c6634ca54-combined-ca-bundle\") pod \"aodh-0\" (UID: \"843005da-37de-4407-9798-876c6634ca54\") " pod="openstack/aodh-0" Jan 22 07:33:09 crc kubenswrapper[4933]: I0122 07:33:09.030162 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/843005da-37de-4407-9798-876c6634ca54-config-data\") pod \"aodh-0\" (UID: \"843005da-37de-4407-9798-876c6634ca54\") " pod="openstack/aodh-0" Jan 22 07:33:09 crc kubenswrapper[4933]: I0122 07:33:09.030818 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/843005da-37de-4407-9798-876c6634ca54-combined-ca-bundle\") pod \"aodh-0\" (UID: \"843005da-37de-4407-9798-876c6634ca54\") " pod="openstack/aodh-0" Jan 22 07:33:09 crc kubenswrapper[4933]: I0122 07:33:09.039511 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/843005da-37de-4407-9798-876c6634ca54-scripts\") pod \"aodh-0\" (UID: \"843005da-37de-4407-9798-876c6634ca54\") " pod="openstack/aodh-0" Jan 22 07:33:09 crc kubenswrapper[4933]: I0122 07:33:09.045569 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fn5wv\" (UniqueName: \"kubernetes.io/projected/843005da-37de-4407-9798-876c6634ca54-kube-api-access-fn5wv\") pod \"aodh-0\" (UID: \"843005da-37de-4407-9798-876c6634ca54\") " pod="openstack/aodh-0" Jan 22 07:33:09 crc kubenswrapper[4933]: I0122 07:33:09.136681 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 22 07:33:09 crc kubenswrapper[4933]: I0122 07:33:09.647363 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 22 07:33:10 crc kubenswrapper[4933]: I0122 07:33:10.647942 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"843005da-37de-4407-9798-876c6634ca54","Type":"ContainerStarted","Data":"d86086483037664981aa85b8136e6244a609d32d494740a309a72583bfc8f941"} Jan 22 07:33:11 crc kubenswrapper[4933]: I0122 07:33:11.659031 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"843005da-37de-4407-9798-876c6634ca54","Type":"ContainerStarted","Data":"0c966166dc3517dd598a9f63c7c5829a8a5f047d3766df06966baa3b162c9f65"} Jan 22 07:33:11 crc kubenswrapper[4933]: I0122 07:33:11.769897 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 07:33:11 crc kubenswrapper[4933]: I0122 07:33:11.772191 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="04c71974-04d0-4998-ab3a-e29b4403e920" containerName="ceilometer-central-agent" containerID="cri-o://6ae33b661916a63b4466827d4bfb5f4af8b0def334fef69f24ac2dbef7e8e070" gracePeriod=30 Jan 22 07:33:11 crc kubenswrapper[4933]: I0122 07:33:11.772244 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="04c71974-04d0-4998-ab3a-e29b4403e920" containerName="proxy-httpd" containerID="cri-o://905508f051eb8c115eba2f83c8b301db32d755449e23898dce1ab35cc325766a" gracePeriod=30 Jan 22 07:33:11 crc kubenswrapper[4933]: I0122 07:33:11.772278 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="04c71974-04d0-4998-ab3a-e29b4403e920" containerName="sg-core" containerID="cri-o://1a0193627556b6ca7d297a2aad4ed4961febab7ce97109691f24ec12c3663d2a" gracePeriod=30 Jan 22 07:33:11 crc kubenswrapper[4933]: I0122 07:33:11.772278 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="04c71974-04d0-4998-ab3a-e29b4403e920" containerName="ceilometer-notification-agent" containerID="cri-o://f7e0fd252647186a4e65620f666d4889940a3399a87d26b24ddb992fb692b855" gracePeriod=30 Jan 22 07:33:12 crc kubenswrapper[4933]: I0122 07:33:12.668683 4933 generic.go:334] "Generic (PLEG): container finished" podID="04c71974-04d0-4998-ab3a-e29b4403e920" containerID="905508f051eb8c115eba2f83c8b301db32d755449e23898dce1ab35cc325766a" exitCode=0 Jan 22 07:33:12 crc kubenswrapper[4933]: I0122 07:33:12.668974 4933 generic.go:334] "Generic (PLEG): container finished" podID="04c71974-04d0-4998-ab3a-e29b4403e920" containerID="1a0193627556b6ca7d297a2aad4ed4961febab7ce97109691f24ec12c3663d2a" exitCode=2 Jan 22 07:33:12 crc kubenswrapper[4933]: I0122 07:33:12.668984 4933 generic.go:334] "Generic (PLEG): container finished" podID="04c71974-04d0-4998-ab3a-e29b4403e920" containerID="6ae33b661916a63b4466827d4bfb5f4af8b0def334fef69f24ac2dbef7e8e070" exitCode=0 Jan 22 07:33:12 crc kubenswrapper[4933]: I0122 07:33:12.668764 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04c71974-04d0-4998-ab3a-e29b4403e920","Type":"ContainerDied","Data":"905508f051eb8c115eba2f83c8b301db32d755449e23898dce1ab35cc325766a"} Jan 22 07:33:12 crc kubenswrapper[4933]: I0122 07:33:12.669020 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04c71974-04d0-4998-ab3a-e29b4403e920","Type":"ContainerDied","Data":"1a0193627556b6ca7d297a2aad4ed4961febab7ce97109691f24ec12c3663d2a"} Jan 22 07:33:12 crc kubenswrapper[4933]: I0122 07:33:12.669035 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04c71974-04d0-4998-ab3a-e29b4403e920","Type":"ContainerDied","Data":"6ae33b661916a63b4466827d4bfb5f4af8b0def334fef69f24ac2dbef7e8e070"} Jan 22 07:33:13 crc kubenswrapper[4933]: I0122 07:33:13.680037 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"843005da-37de-4407-9798-876c6634ca54","Type":"ContainerStarted","Data":"10a3b51405c06ee30799a5d9965bf5ff8dca15e003363ace0e041c65f27c4c65"} Jan 22 07:33:14 crc kubenswrapper[4933]: I0122 07:33:14.695542 4933 generic.go:334] "Generic (PLEG): container finished" podID="04c71974-04d0-4998-ab3a-e29b4403e920" containerID="f7e0fd252647186a4e65620f666d4889940a3399a87d26b24ddb992fb692b855" exitCode=0 Jan 22 07:33:14 crc kubenswrapper[4933]: I0122 07:33:14.695591 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04c71974-04d0-4998-ab3a-e29b4403e920","Type":"ContainerDied","Data":"f7e0fd252647186a4e65620f666d4889940a3399a87d26b24ddb992fb692b855"} Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.124108 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.687164 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.710377 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"04c71974-04d0-4998-ab3a-e29b4403e920","Type":"ContainerDied","Data":"a9f2872978ee5e7bacb81c44b2f8d5e00a43af1108916de2196684d7d96aecc0"} Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.710433 4933 scope.go:117] "RemoveContainer" containerID="905508f051eb8c115eba2f83c8b301db32d755449e23898dce1ab35cc325766a" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.710619 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.750428 4933 scope.go:117] "RemoveContainer" containerID="1a0193627556b6ca7d297a2aad4ed4961febab7ce97109691f24ec12c3663d2a" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.777933 4933 scope.go:117] "RemoveContainer" containerID="f7e0fd252647186a4e65620f666d4889940a3399a87d26b24ddb992fb692b855" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.785914 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jfsf9\" (UniqueName: \"kubernetes.io/projected/04c71974-04d0-4998-ab3a-e29b4403e920-kube-api-access-jfsf9\") pod \"04c71974-04d0-4998-ab3a-e29b4403e920\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.786038 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-scripts\") pod \"04c71974-04d0-4998-ab3a-e29b4403e920\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.786067 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-config-data\") pod \"04c71974-04d0-4998-ab3a-e29b4403e920\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.786149 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04c71974-04d0-4998-ab3a-e29b4403e920-log-httpd\") pod \"04c71974-04d0-4998-ab3a-e29b4403e920\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.786246 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-sg-core-conf-yaml\") pod \"04c71974-04d0-4998-ab3a-e29b4403e920\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.786351 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-combined-ca-bundle\") pod \"04c71974-04d0-4998-ab3a-e29b4403e920\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.786404 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04c71974-04d0-4998-ab3a-e29b4403e920-run-httpd\") pod \"04c71974-04d0-4998-ab3a-e29b4403e920\" (UID: \"04c71974-04d0-4998-ab3a-e29b4403e920\") " Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.787494 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04c71974-04d0-4998-ab3a-e29b4403e920-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "04c71974-04d0-4998-ab3a-e29b4403e920" (UID: "04c71974-04d0-4998-ab3a-e29b4403e920"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.787728 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04c71974-04d0-4998-ab3a-e29b4403e920-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "04c71974-04d0-4998-ab3a-e29b4403e920" (UID: "04c71974-04d0-4998-ab3a-e29b4403e920"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.795279 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-scripts" (OuterVolumeSpecName: "scripts") pod "04c71974-04d0-4998-ab3a-e29b4403e920" (UID: "04c71974-04d0-4998-ab3a-e29b4403e920"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.805182 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04c71974-04d0-4998-ab3a-e29b4403e920-kube-api-access-jfsf9" (OuterVolumeSpecName: "kube-api-access-jfsf9") pod "04c71974-04d0-4998-ab3a-e29b4403e920" (UID: "04c71974-04d0-4998-ab3a-e29b4403e920"). InnerVolumeSpecName "kube-api-access-jfsf9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.806198 4933 scope.go:117] "RemoveContainer" containerID="6ae33b661916a63b4466827d4bfb5f4af8b0def334fef69f24ac2dbef7e8e070" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.819351 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.830412 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.852185 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "04c71974-04d0-4998-ab3a-e29b4403e920" (UID: "04c71974-04d0-4998-ab3a-e29b4403e920"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.888776 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jfsf9\" (UniqueName: \"kubernetes.io/projected/04c71974-04d0-4998-ab3a-e29b4403e920-kube-api-access-jfsf9\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.888819 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.888832 4933 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04c71974-04d0-4998-ab3a-e29b4403e920-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.888845 4933 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.888856 4933 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/04c71974-04d0-4998-ab3a-e29b4403e920-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.940381 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "04c71974-04d0-4998-ab3a-e29b4403e920" (UID: "04c71974-04d0-4998-ab3a-e29b4403e920"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.961428 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-config-data" (OuterVolumeSpecName: "config-data") pod "04c71974-04d0-4998-ab3a-e29b4403e920" (UID: "04c71974-04d0-4998-ab3a-e29b4403e920"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.991329 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:15 crc kubenswrapper[4933]: I0122 07:33:15.991378 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04c71974-04d0-4998-ab3a-e29b4403e920-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.057588 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.077668 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.089155 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 07:33:16 crc kubenswrapper[4933]: E0122 07:33:16.089576 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04c71974-04d0-4998-ab3a-e29b4403e920" containerName="ceilometer-central-agent" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.089652 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="04c71974-04d0-4998-ab3a-e29b4403e920" containerName="ceilometer-central-agent" Jan 22 07:33:16 crc kubenswrapper[4933]: E0122 07:33:16.089690 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04c71974-04d0-4998-ab3a-e29b4403e920" containerName="proxy-httpd" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.089698 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="04c71974-04d0-4998-ab3a-e29b4403e920" containerName="proxy-httpd" Jan 22 07:33:16 crc kubenswrapper[4933]: E0122 07:33:16.089712 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04c71974-04d0-4998-ab3a-e29b4403e920" containerName="sg-core" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.089718 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="04c71974-04d0-4998-ab3a-e29b4403e920" containerName="sg-core" Jan 22 07:33:16 crc kubenswrapper[4933]: E0122 07:33:16.089733 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04c71974-04d0-4998-ab3a-e29b4403e920" containerName="ceilometer-notification-agent" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.089739 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="04c71974-04d0-4998-ab3a-e29b4403e920" containerName="ceilometer-notification-agent" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.089921 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="04c71974-04d0-4998-ab3a-e29b4403e920" containerName="ceilometer-notification-agent" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.089936 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="04c71974-04d0-4998-ab3a-e29b4403e920" containerName="proxy-httpd" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.089951 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="04c71974-04d0-4998-ab3a-e29b4403e920" containerName="sg-core" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.089965 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="04c71974-04d0-4998-ab3a-e29b4403e920" containerName="ceilometer-central-agent" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.095033 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.098153 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.098190 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.106040 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.196695 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.196975 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.197041 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78913c8c-01b7-46e7-90ae-2e0b61a9a233-run-httpd\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.197107 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-scripts\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.197142 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78913c8c-01b7-46e7-90ae-2e0b61a9a233-log-httpd\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.197193 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-config-data\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.197221 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msn4d\" (UniqueName: \"kubernetes.io/projected/78913c8c-01b7-46e7-90ae-2e0b61a9a233-kube-api-access-msn4d\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.290936 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.291427 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="0143bbca-b0b4-47c0-8c6a-4088fdc688a8" containerName="kube-state-metrics" containerID="cri-o://8d8b92233ede147874ef163b3aaee4086a656d61d8851c2a5ed341e3a35ff7de" gracePeriod=30 Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.299499 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.299762 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.299953 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78913c8c-01b7-46e7-90ae-2e0b61a9a233-run-httpd\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.300130 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-scripts\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.300276 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78913c8c-01b7-46e7-90ae-2e0b61a9a233-log-httpd\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.300421 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-config-data\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.300533 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msn4d\" (UniqueName: \"kubernetes.io/projected/78913c8c-01b7-46e7-90ae-2e0b61a9a233-kube-api-access-msn4d\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.300461 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78913c8c-01b7-46e7-90ae-2e0b61a9a233-run-httpd\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.300727 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78913c8c-01b7-46e7-90ae-2e0b61a9a233-log-httpd\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.303287 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.303600 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.304252 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-config-data\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.307032 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-scripts\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.318297 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msn4d\" (UniqueName: \"kubernetes.io/projected/78913c8c-01b7-46e7-90ae-2e0b61a9a233-kube-api-access-msn4d\") pod \"ceilometer-0\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.480231 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.509465 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04c71974-04d0-4998-ab3a-e29b4403e920" path="/var/lib/kubelet/pods/04c71974-04d0-4998-ab3a-e29b4403e920/volumes" Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.749586 4933 generic.go:334] "Generic (PLEG): container finished" podID="0143bbca-b0b4-47c0-8c6a-4088fdc688a8" containerID="8d8b92233ede147874ef163b3aaee4086a656d61d8851c2a5ed341e3a35ff7de" exitCode=2 Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.749704 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"0143bbca-b0b4-47c0-8c6a-4088fdc688a8","Type":"ContainerDied","Data":"8d8b92233ede147874ef163b3aaee4086a656d61d8851c2a5ed341e3a35ff7de"} Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.764411 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"843005da-37de-4407-9798-876c6634ca54","Type":"ContainerStarted","Data":"d584ff6112a4ce4b8166a0b8b4ec4a893604ba30922df270f3c9897daaa45269"} Jan 22 07:33:16 crc kubenswrapper[4933]: I0122 07:33:16.769051 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 22 07:33:17 crc kubenswrapper[4933]: I0122 07:33:17.094012 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 07:33:17 crc kubenswrapper[4933]: W0122 07:33:17.301204 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod78913c8c_01b7_46e7_90ae_2e0b61a9a233.slice/crio-a120eeb6ae13e2977de8a6e99e8d0f88291db1c3172a725671ade1b23818bc67 WatchSource:0}: Error finding container a120eeb6ae13e2977de8a6e99e8d0f88291db1c3172a725671ade1b23818bc67: Status 404 returned error can't find the container with id a120eeb6ae13e2977de8a6e99e8d0f88291db1c3172a725671ade1b23818bc67 Jan 22 07:33:17 crc kubenswrapper[4933]: I0122 07:33:17.410588 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 07:33:17 crc kubenswrapper[4933]: I0122 07:33:17.557187 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jr29t\" (UniqueName: \"kubernetes.io/projected/0143bbca-b0b4-47c0-8c6a-4088fdc688a8-kube-api-access-jr29t\") pod \"0143bbca-b0b4-47c0-8c6a-4088fdc688a8\" (UID: \"0143bbca-b0b4-47c0-8c6a-4088fdc688a8\") " Jan 22 07:33:17 crc kubenswrapper[4933]: I0122 07:33:17.562312 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0143bbca-b0b4-47c0-8c6a-4088fdc688a8-kube-api-access-jr29t" (OuterVolumeSpecName: "kube-api-access-jr29t") pod "0143bbca-b0b4-47c0-8c6a-4088fdc688a8" (UID: "0143bbca-b0b4-47c0-8c6a-4088fdc688a8"). InnerVolumeSpecName "kube-api-access-jr29t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:33:17 crc kubenswrapper[4933]: I0122 07:33:17.660979 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jr29t\" (UniqueName: \"kubernetes.io/projected/0143bbca-b0b4-47c0-8c6a-4088fdc688a8-kube-api-access-jr29t\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:17 crc kubenswrapper[4933]: I0122 07:33:17.822257 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78913c8c-01b7-46e7-90ae-2e0b61a9a233","Type":"ContainerStarted","Data":"a120eeb6ae13e2977de8a6e99e8d0f88291db1c3172a725671ade1b23818bc67"} Jan 22 07:33:17 crc kubenswrapper[4933]: I0122 07:33:17.841402 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"843005da-37de-4407-9798-876c6634ca54","Type":"ContainerStarted","Data":"e054e4fe54d92652dc145035f229aabbca8c18e7be1161e717e5a611a9dfd072"} Jan 22 07:33:17 crc kubenswrapper[4933]: I0122 07:33:17.841595 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="843005da-37de-4407-9798-876c6634ca54" containerName="aodh-api" containerID="cri-o://0c966166dc3517dd598a9f63c7c5829a8a5f047d3766df06966baa3b162c9f65" gracePeriod=30 Jan 22 07:33:17 crc kubenswrapper[4933]: I0122 07:33:17.842262 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="843005da-37de-4407-9798-876c6634ca54" containerName="aodh-listener" containerID="cri-o://e054e4fe54d92652dc145035f229aabbca8c18e7be1161e717e5a611a9dfd072" gracePeriod=30 Jan 22 07:33:17 crc kubenswrapper[4933]: I0122 07:33:17.842342 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="843005da-37de-4407-9798-876c6634ca54" containerName="aodh-notifier" containerID="cri-o://d584ff6112a4ce4b8166a0b8b4ec4a893604ba30922df270f3c9897daaa45269" gracePeriod=30 Jan 22 07:33:17 crc kubenswrapper[4933]: I0122 07:33:17.842385 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="843005da-37de-4407-9798-876c6634ca54" containerName="aodh-evaluator" containerID="cri-o://10a3b51405c06ee30799a5d9965bf5ff8dca15e003363ace0e041c65f27c4c65" gracePeriod=30 Jan 22 07:33:17 crc kubenswrapper[4933]: I0122 07:33:17.853933 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 07:33:17 crc kubenswrapper[4933]: I0122 07:33:17.857504 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"0143bbca-b0b4-47c0-8c6a-4088fdc688a8","Type":"ContainerDied","Data":"8b05025130a8183f4899481489d5d8545ac4d7bf9b618cb4443f45f6365270ff"} Jan 22 07:33:17 crc kubenswrapper[4933]: I0122 07:33:17.858952 4933 scope.go:117] "RemoveContainer" containerID="8d8b92233ede147874ef163b3aaee4086a656d61d8851c2a5ed341e3a35ff7de" Jan 22 07:33:17 crc kubenswrapper[4933]: I0122 07:33:17.876316 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.125351658 podStartE2EDuration="9.876292566s" podCreationTimestamp="2026-01-22 07:33:08 +0000 UTC" firstStartedPulling="2026-01-22 07:33:09.650484796 +0000 UTC m=+6437.487610169" lastFinishedPulling="2026-01-22 07:33:17.401425724 +0000 UTC m=+6445.238551077" observedRunningTime="2026-01-22 07:33:17.872252027 +0000 UTC m=+6445.709377390" watchObservedRunningTime="2026-01-22 07:33:17.876292566 +0000 UTC m=+6445.713417919" Jan 22 07:33:17 crc kubenswrapper[4933]: I0122 07:33:17.988469 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.010526 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.027717 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 07:33:18 crc kubenswrapper[4933]: E0122 07:33:18.028212 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0143bbca-b0b4-47c0-8c6a-4088fdc688a8" containerName="kube-state-metrics" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.028231 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="0143bbca-b0b4-47c0-8c6a-4088fdc688a8" containerName="kube-state-metrics" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.028430 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="0143bbca-b0b4-47c0-8c6a-4088fdc688a8" containerName="kube-state-metrics" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.029184 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.030723 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.035552 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.035723 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.106380 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/7328c52a-a9e0-4042-9f6c-007d4480f97f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"7328c52a-a9e0-4042-9f6c-007d4480f97f\") " pod="openstack/kube-state-metrics-0" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.106726 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/7328c52a-a9e0-4042-9f6c-007d4480f97f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"7328c52a-a9e0-4042-9f6c-007d4480f97f\") " pod="openstack/kube-state-metrics-0" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.106748 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhm6f\" (UniqueName: \"kubernetes.io/projected/7328c52a-a9e0-4042-9f6c-007d4480f97f-kube-api-access-hhm6f\") pod \"kube-state-metrics-0\" (UID: \"7328c52a-a9e0-4042-9f6c-007d4480f97f\") " pod="openstack/kube-state-metrics-0" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.106808 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7328c52a-a9e0-4042-9f6c-007d4480f97f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"7328c52a-a9e0-4042-9f6c-007d4480f97f\") " pod="openstack/kube-state-metrics-0" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.209223 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/7328c52a-a9e0-4042-9f6c-007d4480f97f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"7328c52a-a9e0-4042-9f6c-007d4480f97f\") " pod="openstack/kube-state-metrics-0" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.209267 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhm6f\" (UniqueName: \"kubernetes.io/projected/7328c52a-a9e0-4042-9f6c-007d4480f97f-kube-api-access-hhm6f\") pod \"kube-state-metrics-0\" (UID: \"7328c52a-a9e0-4042-9f6c-007d4480f97f\") " pod="openstack/kube-state-metrics-0" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.209329 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7328c52a-a9e0-4042-9f6c-007d4480f97f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"7328c52a-a9e0-4042-9f6c-007d4480f97f\") " pod="openstack/kube-state-metrics-0" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.209490 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/7328c52a-a9e0-4042-9f6c-007d4480f97f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"7328c52a-a9e0-4042-9f6c-007d4480f97f\") " pod="openstack/kube-state-metrics-0" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.213265 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/7328c52a-a9e0-4042-9f6c-007d4480f97f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"7328c52a-a9e0-4042-9f6c-007d4480f97f\") " pod="openstack/kube-state-metrics-0" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.213355 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/7328c52a-a9e0-4042-9f6c-007d4480f97f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"7328c52a-a9e0-4042-9f6c-007d4480f97f\") " pod="openstack/kube-state-metrics-0" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.215802 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7328c52a-a9e0-4042-9f6c-007d4480f97f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"7328c52a-a9e0-4042-9f6c-007d4480f97f\") " pod="openstack/kube-state-metrics-0" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.227008 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhm6f\" (UniqueName: \"kubernetes.io/projected/7328c52a-a9e0-4042-9f6c-007d4480f97f-kube-api-access-hhm6f\") pod \"kube-state-metrics-0\" (UID: \"7328c52a-a9e0-4042-9f6c-007d4480f97f\") " pod="openstack/kube-state-metrics-0" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.382865 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.554371 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0143bbca-b0b4-47c0-8c6a-4088fdc688a8" path="/var/lib/kubelet/pods/0143bbca-b0b4-47c0-8c6a-4088fdc688a8/volumes" Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.888761 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78913c8c-01b7-46e7-90ae-2e0b61a9a233","Type":"ContainerStarted","Data":"8e098a8fde559b2fb9303f4def2d27c23fe5ab783412916e35ede0c49b0152cc"} Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.888810 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78913c8c-01b7-46e7-90ae-2e0b61a9a233","Type":"ContainerStarted","Data":"7c1b23d40a9cef7ff4ed6eb31a8a418946befcadbb1b22d9482b4740e952c236"} Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.892579 4933 generic.go:334] "Generic (PLEG): container finished" podID="843005da-37de-4407-9798-876c6634ca54" containerID="10a3b51405c06ee30799a5d9965bf5ff8dca15e003363ace0e041c65f27c4c65" exitCode=0 Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.892614 4933 generic.go:334] "Generic (PLEG): container finished" podID="843005da-37de-4407-9798-876c6634ca54" containerID="0c966166dc3517dd598a9f63c7c5829a8a5f047d3766df06966baa3b162c9f65" exitCode=0 Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.892638 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"843005da-37de-4407-9798-876c6634ca54","Type":"ContainerDied","Data":"10a3b51405c06ee30799a5d9965bf5ff8dca15e003363ace0e041c65f27c4c65"} Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.892667 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"843005da-37de-4407-9798-876c6634ca54","Type":"ContainerDied","Data":"0c966166dc3517dd598a9f63c7c5829a8a5f047d3766df06966baa3b162c9f65"} Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.913911 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 07:33:18 crc kubenswrapper[4933]: I0122 07:33:18.984571 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 07:33:19 crc kubenswrapper[4933]: I0122 07:33:19.905497 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78913c8c-01b7-46e7-90ae-2e0b61a9a233","Type":"ContainerStarted","Data":"32c90cdd25f7dc620f09bbf3b3d9705d342899648d3883a4a4d1e70ea468b9f8"} Jan 22 07:33:19 crc kubenswrapper[4933]: I0122 07:33:19.907511 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"7328c52a-a9e0-4042-9f6c-007d4480f97f","Type":"ContainerStarted","Data":"85a77b9b5ffdde0f92067c2b0b8651ffabf94ca89543e2ee424bf2102dfaa7d0"} Jan 22 07:33:19 crc kubenswrapper[4933]: I0122 07:33:19.907536 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"7328c52a-a9e0-4042-9f6c-007d4480f97f","Type":"ContainerStarted","Data":"0199f034df01b056da4436de797cca67f62def6db772aa2200996408ecceb6b9"} Jan 22 07:33:19 crc kubenswrapper[4933]: I0122 07:33:19.908769 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 22 07:33:19 crc kubenswrapper[4933]: I0122 07:33:19.934295 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.563806108 podStartE2EDuration="2.934273276s" podCreationTimestamp="2026-01-22 07:33:17 +0000 UTC" firstStartedPulling="2026-01-22 07:33:18.939295239 +0000 UTC m=+6446.776420582" lastFinishedPulling="2026-01-22 07:33:19.309762397 +0000 UTC m=+6447.146887750" observedRunningTime="2026-01-22 07:33:19.921598367 +0000 UTC m=+6447.758723720" watchObservedRunningTime="2026-01-22 07:33:19.934273276 +0000 UTC m=+6447.771398629" Jan 22 07:33:20 crc kubenswrapper[4933]: I0122 07:33:20.050598 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4vth5"] Jan 22 07:33:20 crc kubenswrapper[4933]: I0122 07:33:20.064546 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-4vth5"] Jan 22 07:33:20 crc kubenswrapper[4933]: I0122 07:33:20.508818 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c406803-95c9-46e7-953f-0c56e8daa84e" path="/var/lib/kubelet/pods/6c406803-95c9-46e7-953f-0c56e8daa84e/volumes" Jan 22 07:33:20 crc kubenswrapper[4933]: I0122 07:33:20.918255 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78913c8c-01b7-46e7-90ae-2e0b61a9a233","Type":"ContainerStarted","Data":"fd46f3a17172030688b626c0cfc56cd405832a105f033af5f810834172c662e0"} Jan 22 07:33:20 crc kubenswrapper[4933]: I0122 07:33:20.918417 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerName="ceilometer-central-agent" containerID="cri-o://7c1b23d40a9cef7ff4ed6eb31a8a418946befcadbb1b22d9482b4740e952c236" gracePeriod=30 Jan 22 07:33:20 crc kubenswrapper[4933]: I0122 07:33:20.918656 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 07:33:20 crc kubenswrapper[4933]: I0122 07:33:20.918691 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerName="proxy-httpd" containerID="cri-o://fd46f3a17172030688b626c0cfc56cd405832a105f033af5f810834172c662e0" gracePeriod=30 Jan 22 07:33:20 crc kubenswrapper[4933]: I0122 07:33:20.918787 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerName="sg-core" containerID="cri-o://32c90cdd25f7dc620f09bbf3b3d9705d342899648d3883a4a4d1e70ea468b9f8" gracePeriod=30 Jan 22 07:33:20 crc kubenswrapper[4933]: I0122 07:33:20.918837 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerName="ceilometer-notification-agent" containerID="cri-o://8e098a8fde559b2fb9303f4def2d27c23fe5ab783412916e35ede0c49b0152cc" gracePeriod=30 Jan 22 07:33:20 crc kubenswrapper[4933]: I0122 07:33:20.943807 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.836116417 podStartE2EDuration="4.943782046s" podCreationTimestamp="2026-01-22 07:33:16 +0000 UTC" firstStartedPulling="2026-01-22 07:33:17.306398438 +0000 UTC m=+6445.143523791" lastFinishedPulling="2026-01-22 07:33:20.414064047 +0000 UTC m=+6448.251189420" observedRunningTime="2026-01-22 07:33:20.939176103 +0000 UTC m=+6448.776301466" watchObservedRunningTime="2026-01-22 07:33:20.943782046 +0000 UTC m=+6448.780907399" Jan 22 07:33:21 crc kubenswrapper[4933]: I0122 07:33:21.029708 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-d5cfk"] Jan 22 07:33:21 crc kubenswrapper[4933]: I0122 07:33:21.039212 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-d5cfk"] Jan 22 07:33:21 crc kubenswrapper[4933]: I0122 07:33:21.937861 4933 generic.go:334] "Generic (PLEG): container finished" podID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerID="fd46f3a17172030688b626c0cfc56cd405832a105f033af5f810834172c662e0" exitCode=0 Jan 22 07:33:21 crc kubenswrapper[4933]: I0122 07:33:21.938216 4933 generic.go:334] "Generic (PLEG): container finished" podID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerID="32c90cdd25f7dc620f09bbf3b3d9705d342899648d3883a4a4d1e70ea468b9f8" exitCode=2 Jan 22 07:33:21 crc kubenswrapper[4933]: I0122 07:33:21.938229 4933 generic.go:334] "Generic (PLEG): container finished" podID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerID="8e098a8fde559b2fb9303f4def2d27c23fe5ab783412916e35ede0c49b0152cc" exitCode=0 Jan 22 07:33:21 crc kubenswrapper[4933]: I0122 07:33:21.937932 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78913c8c-01b7-46e7-90ae-2e0b61a9a233","Type":"ContainerDied","Data":"fd46f3a17172030688b626c0cfc56cd405832a105f033af5f810834172c662e0"} Jan 22 07:33:21 crc kubenswrapper[4933]: I0122 07:33:21.938330 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78913c8c-01b7-46e7-90ae-2e0b61a9a233","Type":"ContainerDied","Data":"32c90cdd25f7dc620f09bbf3b3d9705d342899648d3883a4a4d1e70ea468b9f8"} Jan 22 07:33:21 crc kubenswrapper[4933]: I0122 07:33:21.938346 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78913c8c-01b7-46e7-90ae-2e0b61a9a233","Type":"ContainerDied","Data":"8e098a8fde559b2fb9303f4def2d27c23fe5ab783412916e35ede0c49b0152cc"} Jan 22 07:33:22 crc kubenswrapper[4933]: I0122 07:33:22.517692 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e955489f-8aff-49bd-848f-ac5bc3cd398d" path="/var/lib/kubelet/pods/e955489f-8aff-49bd-848f-ac5bc3cd398d/volumes" Jan 22 07:33:24 crc kubenswrapper[4933]: I0122 07:33:24.976563 4933 generic.go:334] "Generic (PLEG): container finished" podID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerID="7c1b23d40a9cef7ff4ed6eb31a8a418946befcadbb1b22d9482b4740e952c236" exitCode=0 Jan 22 07:33:24 crc kubenswrapper[4933]: I0122 07:33:24.976668 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78913c8c-01b7-46e7-90ae-2e0b61a9a233","Type":"ContainerDied","Data":"7c1b23d40a9cef7ff4ed6eb31a8a418946befcadbb1b22d9482b4740e952c236"} Jan 22 07:33:24 crc kubenswrapper[4933]: I0122 07:33:24.977191 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78913c8c-01b7-46e7-90ae-2e0b61a9a233","Type":"ContainerDied","Data":"a120eeb6ae13e2977de8a6e99e8d0f88291db1c3172a725671ade1b23818bc67"} Jan 22 07:33:24 crc kubenswrapper[4933]: I0122 07:33:24.977216 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a120eeb6ae13e2977de8a6e99e8d0f88291db1c3172a725671ade1b23818bc67" Jan 22 07:33:24 crc kubenswrapper[4933]: I0122 07:33:24.988278 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.100017 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78913c8c-01b7-46e7-90ae-2e0b61a9a233-log-httpd\") pod \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.100063 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-combined-ca-bundle\") pod \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.100127 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-config-data\") pod \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.100253 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-sg-core-conf-yaml\") pod \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.100418 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-scripts\") pod \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.100451 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-msn4d\" (UniqueName: \"kubernetes.io/projected/78913c8c-01b7-46e7-90ae-2e0b61a9a233-kube-api-access-msn4d\") pod \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.100488 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/78913c8c-01b7-46e7-90ae-2e0b61a9a233-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "78913c8c-01b7-46e7-90ae-2e0b61a9a233" (UID: "78913c8c-01b7-46e7-90ae-2e0b61a9a233"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.100512 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78913c8c-01b7-46e7-90ae-2e0b61a9a233-run-httpd\") pod \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\" (UID: \"78913c8c-01b7-46e7-90ae-2e0b61a9a233\") " Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.100987 4933 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78913c8c-01b7-46e7-90ae-2e0b61a9a233-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.101251 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/78913c8c-01b7-46e7-90ae-2e0b61a9a233-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "78913c8c-01b7-46e7-90ae-2e0b61a9a233" (UID: "78913c8c-01b7-46e7-90ae-2e0b61a9a233"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.108369 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-scripts" (OuterVolumeSpecName: "scripts") pod "78913c8c-01b7-46e7-90ae-2e0b61a9a233" (UID: "78913c8c-01b7-46e7-90ae-2e0b61a9a233"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.109127 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78913c8c-01b7-46e7-90ae-2e0b61a9a233-kube-api-access-msn4d" (OuterVolumeSpecName: "kube-api-access-msn4d") pod "78913c8c-01b7-46e7-90ae-2e0b61a9a233" (UID: "78913c8c-01b7-46e7-90ae-2e0b61a9a233"). InnerVolumeSpecName "kube-api-access-msn4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.141451 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "78913c8c-01b7-46e7-90ae-2e0b61a9a233" (UID: "78913c8c-01b7-46e7-90ae-2e0b61a9a233"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.197249 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "78913c8c-01b7-46e7-90ae-2e0b61a9a233" (UID: "78913c8c-01b7-46e7-90ae-2e0b61a9a233"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.204862 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.204892 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-msn4d\" (UniqueName: \"kubernetes.io/projected/78913c8c-01b7-46e7-90ae-2e0b61a9a233-kube-api-access-msn4d\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.204907 4933 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78913c8c-01b7-46e7-90ae-2e0b61a9a233-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.204923 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.204940 4933 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.231723 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-config-data" (OuterVolumeSpecName: "config-data") pod "78913c8c-01b7-46e7-90ae-2e0b61a9a233" (UID: "78913c8c-01b7-46e7-90ae-2e0b61a9a233"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.306698 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78913c8c-01b7-46e7-90ae-2e0b61a9a233-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:25 crc kubenswrapper[4933]: I0122 07:33:25.985480 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.019583 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.032459 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.046840 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 07:33:26 crc kubenswrapper[4933]: E0122 07:33:26.047311 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerName="ceilometer-notification-agent" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.047324 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerName="ceilometer-notification-agent" Jan 22 07:33:26 crc kubenswrapper[4933]: E0122 07:33:26.047333 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerName="proxy-httpd" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.047339 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerName="proxy-httpd" Jan 22 07:33:26 crc kubenswrapper[4933]: E0122 07:33:26.047355 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerName="ceilometer-central-agent" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.047361 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerName="ceilometer-central-agent" Jan 22 07:33:26 crc kubenswrapper[4933]: E0122 07:33:26.047403 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerName="sg-core" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.047410 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerName="sg-core" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.047783 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerName="ceilometer-central-agent" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.047808 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerName="proxy-httpd" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.047818 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerName="sg-core" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.047832 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" containerName="ceilometer-notification-agent" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.049560 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.052194 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.052391 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.052427 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.068487 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.224970 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8c4ae160-7c59-4237-8476-9d3da141fa96-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.225044 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c4ae160-7c59-4237-8476-9d3da141fa96-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.225218 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c4ae160-7c59-4237-8476-9d3da141fa96-config-data\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.225315 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c4ae160-7c59-4237-8476-9d3da141fa96-run-httpd\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.225356 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8t4ww\" (UniqueName: \"kubernetes.io/projected/8c4ae160-7c59-4237-8476-9d3da141fa96-kube-api-access-8t4ww\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.225502 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c4ae160-7c59-4237-8476-9d3da141fa96-log-httpd\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.225552 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c4ae160-7c59-4237-8476-9d3da141fa96-scripts\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.225707 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c4ae160-7c59-4237-8476-9d3da141fa96-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.327455 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8c4ae160-7c59-4237-8476-9d3da141fa96-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.327725 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c4ae160-7c59-4237-8476-9d3da141fa96-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.327815 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c4ae160-7c59-4237-8476-9d3da141fa96-config-data\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.327900 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c4ae160-7c59-4237-8476-9d3da141fa96-run-httpd\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.328010 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8t4ww\" (UniqueName: \"kubernetes.io/projected/8c4ae160-7c59-4237-8476-9d3da141fa96-kube-api-access-8t4ww\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.328435 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c4ae160-7c59-4237-8476-9d3da141fa96-log-httpd\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.328818 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c4ae160-7c59-4237-8476-9d3da141fa96-scripts\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.329260 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c4ae160-7c59-4237-8476-9d3da141fa96-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.328298 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c4ae160-7c59-4237-8476-9d3da141fa96-run-httpd\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.328772 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c4ae160-7c59-4237-8476-9d3da141fa96-log-httpd\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.332944 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c4ae160-7c59-4237-8476-9d3da141fa96-scripts\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.333161 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8c4ae160-7c59-4237-8476-9d3da141fa96-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.333440 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c4ae160-7c59-4237-8476-9d3da141fa96-config-data\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.333548 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c4ae160-7c59-4237-8476-9d3da141fa96-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.337409 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c4ae160-7c59-4237-8476-9d3da141fa96-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.345188 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8t4ww\" (UniqueName: \"kubernetes.io/projected/8c4ae160-7c59-4237-8476-9d3da141fa96-kube-api-access-8t4ww\") pod \"ceilometer-0\" (UID: \"8c4ae160-7c59-4237-8476-9d3da141fa96\") " pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.370368 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.503347 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78913c8c-01b7-46e7-90ae-2e0b61a9a233" path="/var/lib/kubelet/pods/78913c8c-01b7-46e7-90ae-2e0b61a9a233/volumes" Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.850586 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 07:33:26 crc kubenswrapper[4933]: I0122 07:33:26.994501 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c4ae160-7c59-4237-8476-9d3da141fa96","Type":"ContainerStarted","Data":"6b0748826c23c6851db5b0023219c954c5c6fbbd2ad5c4d94dd35586e7ce26f8"} Jan 22 07:33:28 crc kubenswrapper[4933]: I0122 07:33:28.021522 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c4ae160-7c59-4237-8476-9d3da141fa96","Type":"ContainerStarted","Data":"1dacc7f215f422f960b8b4acb7e21e35bbb0f8c25c3550b6bc169e553a360bc8"} Jan 22 07:33:28 crc kubenswrapper[4933]: I0122 07:33:28.395743 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 22 07:33:29 crc kubenswrapper[4933]: I0122 07:33:29.033413 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c4ae160-7c59-4237-8476-9d3da141fa96","Type":"ContainerStarted","Data":"69582822c6f5f24cd4b7bd2f3de0adf7b0210f76a95d8a1c8cfdcca81ded0ede"} Jan 22 07:33:29 crc kubenswrapper[4933]: I0122 07:33:29.033464 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c4ae160-7c59-4237-8476-9d3da141fa96","Type":"ContainerStarted","Data":"2522457b70231cf74ef3e0a8ddc730ae3ae502b3f2433acf1b1aaa6edf9f15a1"} Jan 22 07:33:31 crc kubenswrapper[4933]: I0122 07:33:31.054162 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c4ae160-7c59-4237-8476-9d3da141fa96","Type":"ContainerStarted","Data":"eac7fb0b0f46701248c5bf5af6fbdaec19221cb70ab4e000be0dd03842994e4b"} Jan 22 07:33:31 crc kubenswrapper[4933]: I0122 07:33:31.054676 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 07:33:31 crc kubenswrapper[4933]: I0122 07:33:31.079953 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.006715427 podStartE2EDuration="5.079929967s" podCreationTimestamp="2026-01-22 07:33:26 +0000 UTC" firstStartedPulling="2026-01-22 07:33:26.859378649 +0000 UTC m=+6454.696504002" lastFinishedPulling="2026-01-22 07:33:29.932593179 +0000 UTC m=+6457.769718542" observedRunningTime="2026-01-22 07:33:31.071511122 +0000 UTC m=+6458.908636465" watchObservedRunningTime="2026-01-22 07:33:31.079929967 +0000 UTC m=+6458.917055320" Jan 22 07:33:40 crc kubenswrapper[4933]: I0122 07:33:40.048120 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-m8wzk"] Jan 22 07:33:40 crc kubenswrapper[4933]: I0122 07:33:40.058382 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-m8wzk"] Jan 22 07:33:40 crc kubenswrapper[4933]: I0122 07:33:40.506485 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6759c870-c931-4a37-9062-fde2dfada3f1" path="/var/lib/kubelet/pods/6759c870-c931-4a37-9062-fde2dfada3f1/volumes" Jan 22 07:33:40 crc kubenswrapper[4933]: I0122 07:33:40.943031 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:33:40 crc kubenswrapper[4933]: I0122 07:33:40.943422 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:33:48 crc kubenswrapper[4933]: I0122 07:33:48.234728 4933 generic.go:334] "Generic (PLEG): container finished" podID="843005da-37de-4407-9798-876c6634ca54" containerID="e054e4fe54d92652dc145035f229aabbca8c18e7be1161e717e5a611a9dfd072" exitCode=137 Jan 22 07:33:48 crc kubenswrapper[4933]: I0122 07:33:48.235369 4933 generic.go:334] "Generic (PLEG): container finished" podID="843005da-37de-4407-9798-876c6634ca54" containerID="d584ff6112a4ce4b8166a0b8b4ec4a893604ba30922df270f3c9897daaa45269" exitCode=137 Jan 22 07:33:48 crc kubenswrapper[4933]: I0122 07:33:48.235308 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"843005da-37de-4407-9798-876c6634ca54","Type":"ContainerDied","Data":"e054e4fe54d92652dc145035f229aabbca8c18e7be1161e717e5a611a9dfd072"} Jan 22 07:33:48 crc kubenswrapper[4933]: I0122 07:33:48.235446 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"843005da-37de-4407-9798-876c6634ca54","Type":"ContainerDied","Data":"d584ff6112a4ce4b8166a0b8b4ec4a893604ba30922df270f3c9897daaa45269"} Jan 22 07:33:48 crc kubenswrapper[4933]: I0122 07:33:48.357742 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 22 07:33:48 crc kubenswrapper[4933]: I0122 07:33:48.452730 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/843005da-37de-4407-9798-876c6634ca54-combined-ca-bundle\") pod \"843005da-37de-4407-9798-876c6634ca54\" (UID: \"843005da-37de-4407-9798-876c6634ca54\") " Jan 22 07:33:48 crc kubenswrapper[4933]: I0122 07:33:48.452827 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fn5wv\" (UniqueName: \"kubernetes.io/projected/843005da-37de-4407-9798-876c6634ca54-kube-api-access-fn5wv\") pod \"843005da-37de-4407-9798-876c6634ca54\" (UID: \"843005da-37de-4407-9798-876c6634ca54\") " Jan 22 07:33:48 crc kubenswrapper[4933]: I0122 07:33:48.452932 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/843005da-37de-4407-9798-876c6634ca54-scripts\") pod \"843005da-37de-4407-9798-876c6634ca54\" (UID: \"843005da-37de-4407-9798-876c6634ca54\") " Jan 22 07:33:48 crc kubenswrapper[4933]: I0122 07:33:48.453110 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/843005da-37de-4407-9798-876c6634ca54-config-data\") pod \"843005da-37de-4407-9798-876c6634ca54\" (UID: \"843005da-37de-4407-9798-876c6634ca54\") " Jan 22 07:33:48 crc kubenswrapper[4933]: I0122 07:33:48.473993 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/843005da-37de-4407-9798-876c6634ca54-kube-api-access-fn5wv" (OuterVolumeSpecName: "kube-api-access-fn5wv") pod "843005da-37de-4407-9798-876c6634ca54" (UID: "843005da-37de-4407-9798-876c6634ca54"). InnerVolumeSpecName "kube-api-access-fn5wv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:33:48 crc kubenswrapper[4933]: I0122 07:33:48.474316 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/843005da-37de-4407-9798-876c6634ca54-scripts" (OuterVolumeSpecName: "scripts") pod "843005da-37de-4407-9798-876c6634ca54" (UID: "843005da-37de-4407-9798-876c6634ca54"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:33:48 crc kubenswrapper[4933]: I0122 07:33:48.555881 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fn5wv\" (UniqueName: \"kubernetes.io/projected/843005da-37de-4407-9798-876c6634ca54-kube-api-access-fn5wv\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:48 crc kubenswrapper[4933]: I0122 07:33:48.555923 4933 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/843005da-37de-4407-9798-876c6634ca54-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:48 crc kubenswrapper[4933]: I0122 07:33:48.577181 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/843005da-37de-4407-9798-876c6634ca54-config-data" (OuterVolumeSpecName: "config-data") pod "843005da-37de-4407-9798-876c6634ca54" (UID: "843005da-37de-4407-9798-876c6634ca54"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:33:48 crc kubenswrapper[4933]: I0122 07:33:48.592917 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/843005da-37de-4407-9798-876c6634ca54-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "843005da-37de-4407-9798-876c6634ca54" (UID: "843005da-37de-4407-9798-876c6634ca54"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:33:48 crc kubenswrapper[4933]: I0122 07:33:48.657732 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/843005da-37de-4407-9798-876c6634ca54-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:48 crc kubenswrapper[4933]: I0122 07:33:48.657767 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/843005da-37de-4407-9798-876c6634ca54-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.246824 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"843005da-37de-4407-9798-876c6634ca54","Type":"ContainerDied","Data":"d86086483037664981aa85b8136e6244a609d32d494740a309a72583bfc8f941"} Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.246882 4933 scope.go:117] "RemoveContainer" containerID="e054e4fe54d92652dc145035f229aabbca8c18e7be1161e717e5a611a9dfd072" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.246887 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.279162 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.284789 4933 scope.go:117] "RemoveContainer" containerID="d584ff6112a4ce4b8166a0b8b4ec4a893604ba30922df270f3c9897daaa45269" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.287489 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.309120 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 22 07:33:49 crc kubenswrapper[4933]: E0122 07:33:49.309647 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="843005da-37de-4407-9798-876c6634ca54" containerName="aodh-notifier" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.309672 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="843005da-37de-4407-9798-876c6634ca54" containerName="aodh-notifier" Jan 22 07:33:49 crc kubenswrapper[4933]: E0122 07:33:49.309715 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="843005da-37de-4407-9798-876c6634ca54" containerName="aodh-api" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.309725 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="843005da-37de-4407-9798-876c6634ca54" containerName="aodh-api" Jan 22 07:33:49 crc kubenswrapper[4933]: E0122 07:33:49.309739 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="843005da-37de-4407-9798-876c6634ca54" containerName="aodh-evaluator" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.309748 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="843005da-37de-4407-9798-876c6634ca54" containerName="aodh-evaluator" Jan 22 07:33:49 crc kubenswrapper[4933]: E0122 07:33:49.309768 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="843005da-37de-4407-9798-876c6634ca54" containerName="aodh-listener" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.309778 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="843005da-37de-4407-9798-876c6634ca54" containerName="aodh-listener" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.310011 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="843005da-37de-4407-9798-876c6634ca54" containerName="aodh-evaluator" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.310031 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="843005da-37de-4407-9798-876c6634ca54" containerName="aodh-notifier" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.310047 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="843005da-37de-4407-9798-876c6634ca54" containerName="aodh-api" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.310087 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="843005da-37de-4407-9798-876c6634ca54" containerName="aodh-listener" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.312809 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.316836 4933 scope.go:117] "RemoveContainer" containerID="10a3b51405c06ee30799a5d9965bf5ff8dca15e003363ace0e041c65f27c4c65" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.317206 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.320922 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.320999 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-6cqxc" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.321234 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.321808 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.327616 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.358969 4933 scope.go:117] "RemoveContainer" containerID="0c966166dc3517dd598a9f63c7c5829a8a5f047d3766df06966baa3b162c9f65" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.375933 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efa9140a-0903-4fe7-8bc6-6f82d7e27e56-config-data\") pod \"aodh-0\" (UID: \"efa9140a-0903-4fe7-8bc6-6f82d7e27e56\") " pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.375994 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2mxx\" (UniqueName: \"kubernetes.io/projected/efa9140a-0903-4fe7-8bc6-6f82d7e27e56-kube-api-access-d2mxx\") pod \"aodh-0\" (UID: \"efa9140a-0903-4fe7-8bc6-6f82d7e27e56\") " pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.376058 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/efa9140a-0903-4fe7-8bc6-6f82d7e27e56-internal-tls-certs\") pod \"aodh-0\" (UID: \"efa9140a-0903-4fe7-8bc6-6f82d7e27e56\") " pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.376160 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/efa9140a-0903-4fe7-8bc6-6f82d7e27e56-public-tls-certs\") pod \"aodh-0\" (UID: \"efa9140a-0903-4fe7-8bc6-6f82d7e27e56\") " pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.376237 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efa9140a-0903-4fe7-8bc6-6f82d7e27e56-combined-ca-bundle\") pod \"aodh-0\" (UID: \"efa9140a-0903-4fe7-8bc6-6f82d7e27e56\") " pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.376344 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/efa9140a-0903-4fe7-8bc6-6f82d7e27e56-scripts\") pod \"aodh-0\" (UID: \"efa9140a-0903-4fe7-8bc6-6f82d7e27e56\") " pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.478033 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efa9140a-0903-4fe7-8bc6-6f82d7e27e56-combined-ca-bundle\") pod \"aodh-0\" (UID: \"efa9140a-0903-4fe7-8bc6-6f82d7e27e56\") " pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.478408 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/efa9140a-0903-4fe7-8bc6-6f82d7e27e56-scripts\") pod \"aodh-0\" (UID: \"efa9140a-0903-4fe7-8bc6-6f82d7e27e56\") " pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.478445 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efa9140a-0903-4fe7-8bc6-6f82d7e27e56-config-data\") pod \"aodh-0\" (UID: \"efa9140a-0903-4fe7-8bc6-6f82d7e27e56\") " pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.478475 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2mxx\" (UniqueName: \"kubernetes.io/projected/efa9140a-0903-4fe7-8bc6-6f82d7e27e56-kube-api-access-d2mxx\") pod \"aodh-0\" (UID: \"efa9140a-0903-4fe7-8bc6-6f82d7e27e56\") " pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.478512 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/efa9140a-0903-4fe7-8bc6-6f82d7e27e56-internal-tls-certs\") pod \"aodh-0\" (UID: \"efa9140a-0903-4fe7-8bc6-6f82d7e27e56\") " pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.478534 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/efa9140a-0903-4fe7-8bc6-6f82d7e27e56-public-tls-certs\") pod \"aodh-0\" (UID: \"efa9140a-0903-4fe7-8bc6-6f82d7e27e56\") " pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.486040 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/efa9140a-0903-4fe7-8bc6-6f82d7e27e56-combined-ca-bundle\") pod \"aodh-0\" (UID: \"efa9140a-0903-4fe7-8bc6-6f82d7e27e56\") " pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.486652 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/efa9140a-0903-4fe7-8bc6-6f82d7e27e56-internal-tls-certs\") pod \"aodh-0\" (UID: \"efa9140a-0903-4fe7-8bc6-6f82d7e27e56\") " pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.492092 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/efa9140a-0903-4fe7-8bc6-6f82d7e27e56-config-data\") pod \"aodh-0\" (UID: \"efa9140a-0903-4fe7-8bc6-6f82d7e27e56\") " pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.493563 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/efa9140a-0903-4fe7-8bc6-6f82d7e27e56-public-tls-certs\") pod \"aodh-0\" (UID: \"efa9140a-0903-4fe7-8bc6-6f82d7e27e56\") " pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.495397 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/efa9140a-0903-4fe7-8bc6-6f82d7e27e56-scripts\") pod \"aodh-0\" (UID: \"efa9140a-0903-4fe7-8bc6-6f82d7e27e56\") " pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.497682 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2mxx\" (UniqueName: \"kubernetes.io/projected/efa9140a-0903-4fe7-8bc6-6f82d7e27e56-kube-api-access-d2mxx\") pod \"aodh-0\" (UID: \"efa9140a-0903-4fe7-8bc6-6f82d7e27e56\") " pod="openstack/aodh-0" Jan 22 07:33:49 crc kubenswrapper[4933]: I0122 07:33:49.633824 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 22 07:33:50 crc kubenswrapper[4933]: I0122 07:33:50.111798 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 22 07:33:50 crc kubenswrapper[4933]: I0122 07:33:50.258733 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"efa9140a-0903-4fe7-8bc6-6f82d7e27e56","Type":"ContainerStarted","Data":"7f0fca6a44553a47fc8a3f64d3eef9a02bf481f4eff3977cb8a9377771ed3d50"} Jan 22 07:33:50 crc kubenswrapper[4933]: I0122 07:33:50.505055 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="843005da-37de-4407-9798-876c6634ca54" path="/var/lib/kubelet/pods/843005da-37de-4407-9798-876c6634ca54/volumes" Jan 22 07:33:51 crc kubenswrapper[4933]: I0122 07:33:51.273459 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"efa9140a-0903-4fe7-8bc6-6f82d7e27e56","Type":"ContainerStarted","Data":"72c236a8a423c390c7874e56ca66126d7f09c6ccdf198a80a5d49d3182761149"} Jan 22 07:33:52 crc kubenswrapper[4933]: I0122 07:33:52.284846 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"efa9140a-0903-4fe7-8bc6-6f82d7e27e56","Type":"ContainerStarted","Data":"18994e9df99e11cf66d45276eb0cfece4caa5acc6fc4f1b6251201bcc5ca027b"} Jan 22 07:33:53 crc kubenswrapper[4933]: I0122 07:33:53.300288 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"efa9140a-0903-4fe7-8bc6-6f82d7e27e56","Type":"ContainerStarted","Data":"6a4deef2a9896e3abdd4ba6282ab235e2586fddc45048d99633aaf1e1826c34f"} Jan 22 07:33:54 crc kubenswrapper[4933]: I0122 07:33:54.970117 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55b464b9d9-s8fz9"] Jan 22 07:33:54 crc kubenswrapper[4933]: I0122 07:33:54.972525 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:54 crc kubenswrapper[4933]: I0122 07:33:54.974266 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1" Jan 22 07:33:54 crc kubenswrapper[4933]: I0122 07:33:54.983916 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55b464b9d9-s8fz9"] Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.099473 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-ovsdbserver-nb\") pod \"dnsmasq-dns-55b464b9d9-s8fz9\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.099607 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-openstack-cell1\") pod \"dnsmasq-dns-55b464b9d9-s8fz9\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.099662 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-config\") pod \"dnsmasq-dns-55b464b9d9-s8fz9\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.099700 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5rsv\" (UniqueName: \"kubernetes.io/projected/bfe0045e-46ce-4302-8d36-a55b5eff0184-kube-api-access-s5rsv\") pod \"dnsmasq-dns-55b464b9d9-s8fz9\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.099820 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-dns-svc\") pod \"dnsmasq-dns-55b464b9d9-s8fz9\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.099855 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-ovsdbserver-sb\") pod \"dnsmasq-dns-55b464b9d9-s8fz9\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.202065 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-openstack-cell1\") pod \"dnsmasq-dns-55b464b9d9-s8fz9\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.202213 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-config\") pod \"dnsmasq-dns-55b464b9d9-s8fz9\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.202268 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5rsv\" (UniqueName: \"kubernetes.io/projected/bfe0045e-46ce-4302-8d36-a55b5eff0184-kube-api-access-s5rsv\") pod \"dnsmasq-dns-55b464b9d9-s8fz9\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.202402 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-dns-svc\") pod \"dnsmasq-dns-55b464b9d9-s8fz9\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.202448 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-ovsdbserver-sb\") pod \"dnsmasq-dns-55b464b9d9-s8fz9\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.202657 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-ovsdbserver-nb\") pod \"dnsmasq-dns-55b464b9d9-s8fz9\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.203423 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-ovsdbserver-sb\") pod \"dnsmasq-dns-55b464b9d9-s8fz9\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.203455 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-dns-svc\") pod \"dnsmasq-dns-55b464b9d9-s8fz9\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.203619 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-ovsdbserver-nb\") pod \"dnsmasq-dns-55b464b9d9-s8fz9\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.204239 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-openstack-cell1\") pod \"dnsmasq-dns-55b464b9d9-s8fz9\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.204246 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-config\") pod \"dnsmasq-dns-55b464b9d9-s8fz9\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.221116 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5rsv\" (UniqueName: \"kubernetes.io/projected/bfe0045e-46ce-4302-8d36-a55b5eff0184-kube-api-access-s5rsv\") pod \"dnsmasq-dns-55b464b9d9-s8fz9\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.288713 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.334315 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"efa9140a-0903-4fe7-8bc6-6f82d7e27e56","Type":"ContainerStarted","Data":"abaed454d5c3cca0e6b4de78585a4d3b0ee2cc2ed0b4ea1bc234454c6015548e"} Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.890804 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.963102089 podStartE2EDuration="6.89078289s" podCreationTimestamp="2026-01-22 07:33:49 +0000 UTC" firstStartedPulling="2026-01-22 07:33:50.122748082 +0000 UTC m=+6477.959873435" lastFinishedPulling="2026-01-22 07:33:54.050428883 +0000 UTC m=+6481.887554236" observedRunningTime="2026-01-22 07:33:55.366622617 +0000 UTC m=+6483.203747970" watchObservedRunningTime="2026-01-22 07:33:55.89078289 +0000 UTC m=+6483.727908243" Jan 22 07:33:55 crc kubenswrapper[4933]: I0122 07:33:55.901123 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55b464b9d9-s8fz9"] Jan 22 07:33:56 crc kubenswrapper[4933]: I0122 07:33:56.355502 4933 generic.go:334] "Generic (PLEG): container finished" podID="bfe0045e-46ce-4302-8d36-a55b5eff0184" containerID="5b51e355749b30de8a876e0235d73537675adcb118b560e67045b16896c710e5" exitCode=0 Jan 22 07:33:56 crc kubenswrapper[4933]: I0122 07:33:56.355836 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" event={"ID":"bfe0045e-46ce-4302-8d36-a55b5eff0184","Type":"ContainerDied","Data":"5b51e355749b30de8a876e0235d73537675adcb118b560e67045b16896c710e5"} Jan 22 07:33:56 crc kubenswrapper[4933]: I0122 07:33:56.356281 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" event={"ID":"bfe0045e-46ce-4302-8d36-a55b5eff0184","Type":"ContainerStarted","Data":"45c523e630ccc1c4cb186a484f938200e65676ccbbdfa80d331e2bc1824c5dbd"} Jan 22 07:33:56 crc kubenswrapper[4933]: I0122 07:33:56.396644 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 22 07:33:57 crc kubenswrapper[4933]: I0122 07:33:57.366706 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" event={"ID":"bfe0045e-46ce-4302-8d36-a55b5eff0184","Type":"ContainerStarted","Data":"955005e0b0c176b5dbbb30932c6460dd305769d2af7b76a6a4d4b5e16d39e333"} Jan 22 07:33:57 crc kubenswrapper[4933]: I0122 07:33:57.366960 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:33:57 crc kubenswrapper[4933]: I0122 07:33:57.421773 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" podStartSLOduration=3.421751277 podStartE2EDuration="3.421751277s" podCreationTimestamp="2026-01-22 07:33:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:33:57.38533334 +0000 UTC m=+6485.222458703" watchObservedRunningTime="2026-01-22 07:33:57.421751277 +0000 UTC m=+6485.258876640" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.291279 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.346689 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675d97df87-x6hlh"] Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.347838 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-675d97df87-x6hlh" podUID="59d07063-6db8-4225-9c77-f916c2747adf" containerName="dnsmasq-dns" containerID="cri-o://13bc2861ae1bca76ca67d1612ed13b3d344d97ecd683d5b1268bf93ffba78592" gracePeriod=10 Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.558689 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-9c79574d7-xmjkq"] Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.560819 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.591890 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-9c79574d7-xmjkq"] Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.648765 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/84a13c36-50da-4671-ac3a-fc75c150adfc-openstack-cell1\") pod \"dnsmasq-dns-9c79574d7-xmjkq\" (UID: \"84a13c36-50da-4671-ac3a-fc75c150adfc\") " pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.648869 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mw6rl\" (UniqueName: \"kubernetes.io/projected/84a13c36-50da-4671-ac3a-fc75c150adfc-kube-api-access-mw6rl\") pod \"dnsmasq-dns-9c79574d7-xmjkq\" (UID: \"84a13c36-50da-4671-ac3a-fc75c150adfc\") " pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.649834 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84a13c36-50da-4671-ac3a-fc75c150adfc-ovsdbserver-nb\") pod \"dnsmasq-dns-9c79574d7-xmjkq\" (UID: \"84a13c36-50da-4671-ac3a-fc75c150adfc\") " pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.649869 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84a13c36-50da-4671-ac3a-fc75c150adfc-ovsdbserver-sb\") pod \"dnsmasq-dns-9c79574d7-xmjkq\" (UID: \"84a13c36-50da-4671-ac3a-fc75c150adfc\") " pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.650143 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84a13c36-50da-4671-ac3a-fc75c150adfc-config\") pod \"dnsmasq-dns-9c79574d7-xmjkq\" (UID: \"84a13c36-50da-4671-ac3a-fc75c150adfc\") " pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.650712 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84a13c36-50da-4671-ac3a-fc75c150adfc-dns-svc\") pod \"dnsmasq-dns-9c79574d7-xmjkq\" (UID: \"84a13c36-50da-4671-ac3a-fc75c150adfc\") " pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.712981 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-675d97df87-x6hlh" podUID="59d07063-6db8-4225-9c77-f916c2747adf" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.1.96:5353: connect: connection refused" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.754214 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84a13c36-50da-4671-ac3a-fc75c150adfc-config\") pod \"dnsmasq-dns-9c79574d7-xmjkq\" (UID: \"84a13c36-50da-4671-ac3a-fc75c150adfc\") " pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.754285 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84a13c36-50da-4671-ac3a-fc75c150adfc-dns-svc\") pod \"dnsmasq-dns-9c79574d7-xmjkq\" (UID: \"84a13c36-50da-4671-ac3a-fc75c150adfc\") " pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.754329 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/84a13c36-50da-4671-ac3a-fc75c150adfc-openstack-cell1\") pod \"dnsmasq-dns-9c79574d7-xmjkq\" (UID: \"84a13c36-50da-4671-ac3a-fc75c150adfc\") " pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.754361 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mw6rl\" (UniqueName: \"kubernetes.io/projected/84a13c36-50da-4671-ac3a-fc75c150adfc-kube-api-access-mw6rl\") pod \"dnsmasq-dns-9c79574d7-xmjkq\" (UID: \"84a13c36-50da-4671-ac3a-fc75c150adfc\") " pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.754416 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84a13c36-50da-4671-ac3a-fc75c150adfc-ovsdbserver-nb\") pod \"dnsmasq-dns-9c79574d7-xmjkq\" (UID: \"84a13c36-50da-4671-ac3a-fc75c150adfc\") " pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.754436 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84a13c36-50da-4671-ac3a-fc75c150adfc-ovsdbserver-sb\") pod \"dnsmasq-dns-9c79574d7-xmjkq\" (UID: \"84a13c36-50da-4671-ac3a-fc75c150adfc\") " pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.755336 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84a13c36-50da-4671-ac3a-fc75c150adfc-ovsdbserver-sb\") pod \"dnsmasq-dns-9c79574d7-xmjkq\" (UID: \"84a13c36-50da-4671-ac3a-fc75c150adfc\") " pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.755601 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84a13c36-50da-4671-ac3a-fc75c150adfc-dns-svc\") pod \"dnsmasq-dns-9c79574d7-xmjkq\" (UID: \"84a13c36-50da-4671-ac3a-fc75c150adfc\") " pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.755656 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/84a13c36-50da-4671-ac3a-fc75c150adfc-openstack-cell1\") pod \"dnsmasq-dns-9c79574d7-xmjkq\" (UID: \"84a13c36-50da-4671-ac3a-fc75c150adfc\") " pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.755747 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84a13c36-50da-4671-ac3a-fc75c150adfc-ovsdbserver-nb\") pod \"dnsmasq-dns-9c79574d7-xmjkq\" (UID: \"84a13c36-50da-4671-ac3a-fc75c150adfc\") " pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.756376 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84a13c36-50da-4671-ac3a-fc75c150adfc-config\") pod \"dnsmasq-dns-9c79574d7-xmjkq\" (UID: \"84a13c36-50da-4671-ac3a-fc75c150adfc\") " pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.776131 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mw6rl\" (UniqueName: \"kubernetes.io/projected/84a13c36-50da-4671-ac3a-fc75c150adfc-kube-api-access-mw6rl\") pod \"dnsmasq-dns-9c79574d7-xmjkq\" (UID: \"84a13c36-50da-4671-ac3a-fc75c150adfc\") " pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:05 crc kubenswrapper[4933]: I0122 07:34:05.881969 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.375873 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-9c79574d7-xmjkq"] Jan 22 07:34:06 crc kubenswrapper[4933]: W0122 07:34:06.385379 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod84a13c36_50da_4671_ac3a_fc75c150adfc.slice/crio-7877bd5fdba56a79288dd17e98eabce807be185b2ef92a0e629616e11a1e5567 WatchSource:0}: Error finding container 7877bd5fdba56a79288dd17e98eabce807be185b2ef92a0e629616e11a1e5567: Status 404 returned error can't find the container with id 7877bd5fdba56a79288dd17e98eabce807be185b2ef92a0e629616e11a1e5567 Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.456117 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" event={"ID":"84a13c36-50da-4671-ac3a-fc75c150adfc","Type":"ContainerStarted","Data":"7877bd5fdba56a79288dd17e98eabce807be185b2ef92a0e629616e11a1e5567"} Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.458212 4933 generic.go:334] "Generic (PLEG): container finished" podID="59d07063-6db8-4225-9c77-f916c2747adf" containerID="13bc2861ae1bca76ca67d1612ed13b3d344d97ecd683d5b1268bf93ffba78592" exitCode=0 Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.458263 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675d97df87-x6hlh" event={"ID":"59d07063-6db8-4225-9c77-f916c2747adf","Type":"ContainerDied","Data":"13bc2861ae1bca76ca67d1612ed13b3d344d97ecd683d5b1268bf93ffba78592"} Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.738827 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.885440 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rth74\" (UniqueName: \"kubernetes.io/projected/59d07063-6db8-4225-9c77-f916c2747adf-kube-api-access-rth74\") pod \"59d07063-6db8-4225-9c77-f916c2747adf\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.885582 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-config\") pod \"59d07063-6db8-4225-9c77-f916c2747adf\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.885605 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-ovsdbserver-sb\") pod \"59d07063-6db8-4225-9c77-f916c2747adf\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.885710 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-dns-svc\") pod \"59d07063-6db8-4225-9c77-f916c2747adf\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.885768 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-ovsdbserver-nb\") pod \"59d07063-6db8-4225-9c77-f916c2747adf\" (UID: \"59d07063-6db8-4225-9c77-f916c2747adf\") " Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.911062 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59d07063-6db8-4225-9c77-f916c2747adf-kube-api-access-rth74" (OuterVolumeSpecName: "kube-api-access-rth74") pod "59d07063-6db8-4225-9c77-f916c2747adf" (UID: "59d07063-6db8-4225-9c77-f916c2747adf"). InnerVolumeSpecName "kube-api-access-rth74". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.961019 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "59d07063-6db8-4225-9c77-f916c2747adf" (UID: "59d07063-6db8-4225-9c77-f916c2747adf"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.963262 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "59d07063-6db8-4225-9c77-f916c2747adf" (UID: "59d07063-6db8-4225-9c77-f916c2747adf"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.971202 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-config" (OuterVolumeSpecName: "config") pod "59d07063-6db8-4225-9c77-f916c2747adf" (UID: "59d07063-6db8-4225-9c77-f916c2747adf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.976262 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "59d07063-6db8-4225-9c77-f916c2747adf" (UID: "59d07063-6db8-4225-9c77-f916c2747adf"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.987730 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.987765 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.987775 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.987784 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59d07063-6db8-4225-9c77-f916c2747adf-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 07:34:06 crc kubenswrapper[4933]: I0122 07:34:06.987793 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rth74\" (UniqueName: \"kubernetes.io/projected/59d07063-6db8-4225-9c77-f916c2747adf-kube-api-access-rth74\") on node \"crc\" DevicePath \"\"" Jan 22 07:34:07 crc kubenswrapper[4933]: I0122 07:34:07.471308 4933 generic.go:334] "Generic (PLEG): container finished" podID="84a13c36-50da-4671-ac3a-fc75c150adfc" containerID="ce08e31b7954aeb74b6585c3d43c44bfb01aa75f566d127d868c816c2eba4118" exitCode=0 Jan 22 07:34:07 crc kubenswrapper[4933]: I0122 07:34:07.471347 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" event={"ID":"84a13c36-50da-4671-ac3a-fc75c150adfc","Type":"ContainerDied","Data":"ce08e31b7954aeb74b6585c3d43c44bfb01aa75f566d127d868c816c2eba4118"} Jan 22 07:34:07 crc kubenswrapper[4933]: I0122 07:34:07.475561 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675d97df87-x6hlh" event={"ID":"59d07063-6db8-4225-9c77-f916c2747adf","Type":"ContainerDied","Data":"db8cd78add3ea741607023cd99314fe139db28c64861f2b82b621dc3e692dfb3"} Jan 22 07:34:07 crc kubenswrapper[4933]: I0122 07:34:07.475615 4933 scope.go:117] "RemoveContainer" containerID="13bc2861ae1bca76ca67d1612ed13b3d344d97ecd683d5b1268bf93ffba78592" Jan 22 07:34:07 crc kubenswrapper[4933]: I0122 07:34:07.475633 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675d97df87-x6hlh" Jan 22 07:34:07 crc kubenswrapper[4933]: I0122 07:34:07.564841 4933 scope.go:117] "RemoveContainer" containerID="0fde45fd0dc6d082b5a8f3593101c80382325a03801bdb649cc19a1f0f0b1ae8" Jan 22 07:34:07 crc kubenswrapper[4933]: I0122 07:34:07.656706 4933 scope.go:117] "RemoveContainer" containerID="0fde45fd0dc6d082b5a8f3593101c80382325a03801bdb649cc19a1f0f0b1ae8" Jan 22 07:34:07 crc kubenswrapper[4933]: I0122 07:34:07.666779 4933 scope.go:117] "RemoveContainer" containerID="13bc2861ae1bca76ca67d1612ed13b3d344d97ecd683d5b1268bf93ffba78592" Jan 22 07:34:07 crc kubenswrapper[4933]: E0122 07:34:07.666784 4933 log.go:32] "RemoveContainer from runtime service failed" err="rpc error: code = Unknown desc = failed to delete container k8s_init_dnsmasq-dns-675d97df87-x6hlh_openstack_59d07063-6db8-4225-9c77-f916c2747adf_0 in pod sandbox db8cd78add3ea741607023cd99314fe139db28c64861f2b82b621dc3e692dfb3 from index: no such id: '0fde45fd0dc6d082b5a8f3593101c80382325a03801bdb649cc19a1f0f0b1ae8'" containerID="0fde45fd0dc6d082b5a8f3593101c80382325a03801bdb649cc19a1f0f0b1ae8" Jan 22 07:34:07 crc kubenswrapper[4933]: I0122 07:34:07.666875 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0fde45fd0dc6d082b5a8f3593101c80382325a03801bdb649cc19a1f0f0b1ae8"} err="rpc error: code = Unknown desc = failed to delete container k8s_init_dnsmasq-dns-675d97df87-x6hlh_openstack_59d07063-6db8-4225-9c77-f916c2747adf_0 in pod sandbox db8cd78add3ea741607023cd99314fe139db28c64861f2b82b621dc3e692dfb3 from index: no such id: '0fde45fd0dc6d082b5a8f3593101c80382325a03801bdb649cc19a1f0f0b1ae8'" Jan 22 07:34:07 crc kubenswrapper[4933]: E0122 07:34:07.667242 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13bc2861ae1bca76ca67d1612ed13b3d344d97ecd683d5b1268bf93ffba78592\": container with ID starting with 13bc2861ae1bca76ca67d1612ed13b3d344d97ecd683d5b1268bf93ffba78592 not found: ID does not exist" containerID="13bc2861ae1bca76ca67d1612ed13b3d344d97ecd683d5b1268bf93ffba78592" Jan 22 07:34:07 crc kubenswrapper[4933]: E0122 07:34:07.667317 4933 kuberuntime_gc.go:150] "Failed to remove container" err="failed to get container status \"13bc2861ae1bca76ca67d1612ed13b3d344d97ecd683d5b1268bf93ffba78592\": rpc error: code = NotFound desc = could not find container \"13bc2861ae1bca76ca67d1612ed13b3d344d97ecd683d5b1268bf93ffba78592\": container with ID starting with 13bc2861ae1bca76ca67d1612ed13b3d344d97ecd683d5b1268bf93ffba78592 not found: ID does not exist" containerID="13bc2861ae1bca76ca67d1612ed13b3d344d97ecd683d5b1268bf93ffba78592" Jan 22 07:34:07 crc kubenswrapper[4933]: I0122 07:34:07.667356 4933 scope.go:117] "RemoveContainer" containerID="3c0938bfe5d57f0b82fe52c68f493cdafaf0b09fd545d01e19e95ea0c36345ed" Jan 22 07:34:07 crc kubenswrapper[4933]: I0122 07:34:07.740960 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675d97df87-x6hlh"] Jan 22 07:34:07 crc kubenswrapper[4933]: I0122 07:34:07.752319 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675d97df87-x6hlh"] Jan 22 07:34:07 crc kubenswrapper[4933]: I0122 07:34:07.823766 4933 scope.go:117] "RemoveContainer" containerID="14867d1e7cf0f45df7187c3d96dcfee03d2eedefd52f33223a1434fc1b64aff7" Jan 22 07:34:07 crc kubenswrapper[4933]: I0122 07:34:07.848946 4933 scope.go:117] "RemoveContainer" containerID="047c85e476622cfa484c6fa7dcff291d3f9e45a5a861763c8dd2d93f2612f665" Jan 22 07:34:08 crc kubenswrapper[4933]: I0122 07:34:08.488055 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" event={"ID":"84a13c36-50da-4671-ac3a-fc75c150adfc","Type":"ContainerStarted","Data":"cb07941e627af58fb43e27b4bca801ecdeb0396d90d4d90708923d9841579fcf"} Jan 22 07:34:08 crc kubenswrapper[4933]: I0122 07:34:08.488251 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:08 crc kubenswrapper[4933]: I0122 07:34:08.505689 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59d07063-6db8-4225-9c77-f916c2747adf" path="/var/lib/kubelet/pods/59d07063-6db8-4225-9c77-f916c2747adf/volumes" Jan 22 07:34:08 crc kubenswrapper[4933]: I0122 07:34:08.508599 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" podStartSLOduration=3.508580485 podStartE2EDuration="3.508580485s" podCreationTimestamp="2026-01-22 07:34:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:34:08.507899739 +0000 UTC m=+6496.345025102" watchObservedRunningTime="2026-01-22 07:34:08.508580485 +0000 UTC m=+6496.345705838" Jan 22 07:34:10 crc kubenswrapper[4933]: I0122 07:34:10.943803 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:34:10 crc kubenswrapper[4933]: I0122 07:34:10.944130 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:34:15 crc kubenswrapper[4933]: I0122 07:34:15.884316 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-9c79574d7-xmjkq" Jan 22 07:34:15 crc kubenswrapper[4933]: I0122 07:34:15.978418 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55b464b9d9-s8fz9"] Jan 22 07:34:15 crc kubenswrapper[4933]: I0122 07:34:15.978691 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" podUID="bfe0045e-46ce-4302-8d36-a55b5eff0184" containerName="dnsmasq-dns" containerID="cri-o://955005e0b0c176b5dbbb30932c6460dd305769d2af7b76a6a4d4b5e16d39e333" gracePeriod=10 Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.573347 4933 generic.go:334] "Generic (PLEG): container finished" podID="bfe0045e-46ce-4302-8d36-a55b5eff0184" containerID="955005e0b0c176b5dbbb30932c6460dd305769d2af7b76a6a4d4b5e16d39e333" exitCode=0 Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.573396 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" event={"ID":"bfe0045e-46ce-4302-8d36-a55b5eff0184","Type":"ContainerDied","Data":"955005e0b0c176b5dbbb30932c6460dd305769d2af7b76a6a4d4b5e16d39e333"} Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.573693 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" event={"ID":"bfe0045e-46ce-4302-8d36-a55b5eff0184","Type":"ContainerDied","Data":"45c523e630ccc1c4cb186a484f938200e65676ccbbdfa80d331e2bc1824c5dbd"} Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.573712 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45c523e630ccc1c4cb186a484f938200e65676ccbbdfa80d331e2bc1824c5dbd" Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.609157 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.720900 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-ovsdbserver-sb\") pod \"bfe0045e-46ce-4302-8d36-a55b5eff0184\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.720970 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-ovsdbserver-nb\") pod \"bfe0045e-46ce-4302-8d36-a55b5eff0184\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.721029 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-dns-svc\") pod \"bfe0045e-46ce-4302-8d36-a55b5eff0184\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.721138 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s5rsv\" (UniqueName: \"kubernetes.io/projected/bfe0045e-46ce-4302-8d36-a55b5eff0184-kube-api-access-s5rsv\") pod \"bfe0045e-46ce-4302-8d36-a55b5eff0184\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.721202 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-config\") pod \"bfe0045e-46ce-4302-8d36-a55b5eff0184\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.721239 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-openstack-cell1\") pod \"bfe0045e-46ce-4302-8d36-a55b5eff0184\" (UID: \"bfe0045e-46ce-4302-8d36-a55b5eff0184\") " Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.726578 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfe0045e-46ce-4302-8d36-a55b5eff0184-kube-api-access-s5rsv" (OuterVolumeSpecName: "kube-api-access-s5rsv") pod "bfe0045e-46ce-4302-8d36-a55b5eff0184" (UID: "bfe0045e-46ce-4302-8d36-a55b5eff0184"). InnerVolumeSpecName "kube-api-access-s5rsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.774345 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bfe0045e-46ce-4302-8d36-a55b5eff0184" (UID: "bfe0045e-46ce-4302-8d36-a55b5eff0184"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.778144 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-config" (OuterVolumeSpecName: "config") pod "bfe0045e-46ce-4302-8d36-a55b5eff0184" (UID: "bfe0045e-46ce-4302-8d36-a55b5eff0184"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.785809 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bfe0045e-46ce-4302-8d36-a55b5eff0184" (UID: "bfe0045e-46ce-4302-8d36-a55b5eff0184"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.787543 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-openstack-cell1" (OuterVolumeSpecName: "openstack-cell1") pod "bfe0045e-46ce-4302-8d36-a55b5eff0184" (UID: "bfe0045e-46ce-4302-8d36-a55b5eff0184"). InnerVolumeSpecName "openstack-cell1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.789965 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bfe0045e-46ce-4302-8d36-a55b5eff0184" (UID: "bfe0045e-46ce-4302-8d36-a55b5eff0184"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.824914 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.825093 4933 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.825195 4933 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.825255 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s5rsv\" (UniqueName: \"kubernetes.io/projected/bfe0045e-46ce-4302-8d36-a55b5eff0184-kube-api-access-s5rsv\") on node \"crc\" DevicePath \"\"" Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.825370 4933 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:34:16 crc kubenswrapper[4933]: I0122 07:34:16.825427 4933 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/bfe0045e-46ce-4302-8d36-a55b5eff0184-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 22 07:34:17 crc kubenswrapper[4933]: I0122 07:34:17.586041 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55b464b9d9-s8fz9" Jan 22 07:34:17 crc kubenswrapper[4933]: I0122 07:34:17.627248 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55b464b9d9-s8fz9"] Jan 22 07:34:17 crc kubenswrapper[4933]: I0122 07:34:17.634919 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55b464b9d9-s8fz9"] Jan 22 07:34:18 crc kubenswrapper[4933]: I0122 07:34:18.507096 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bfe0045e-46ce-4302-8d36-a55b5eff0184" path="/var/lib/kubelet/pods/bfe0045e-46ce-4302-8d36-a55b5eff0184/volumes" Jan 22 07:34:25 crc kubenswrapper[4933]: I0122 07:34:25.897348 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf"] Jan 22 07:34:25 crc kubenswrapper[4933]: E0122 07:34:25.898549 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59d07063-6db8-4225-9c77-f916c2747adf" containerName="init" Jan 22 07:34:25 crc kubenswrapper[4933]: I0122 07:34:25.898573 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="59d07063-6db8-4225-9c77-f916c2747adf" containerName="init" Jan 22 07:34:25 crc kubenswrapper[4933]: E0122 07:34:25.898593 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfe0045e-46ce-4302-8d36-a55b5eff0184" containerName="init" Jan 22 07:34:25 crc kubenswrapper[4933]: I0122 07:34:25.898601 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfe0045e-46ce-4302-8d36-a55b5eff0184" containerName="init" Jan 22 07:34:25 crc kubenswrapper[4933]: E0122 07:34:25.898626 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfe0045e-46ce-4302-8d36-a55b5eff0184" containerName="dnsmasq-dns" Jan 22 07:34:25 crc kubenswrapper[4933]: I0122 07:34:25.898636 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfe0045e-46ce-4302-8d36-a55b5eff0184" containerName="dnsmasq-dns" Jan 22 07:34:25 crc kubenswrapper[4933]: E0122 07:34:25.898649 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59d07063-6db8-4225-9c77-f916c2747adf" containerName="dnsmasq-dns" Jan 22 07:34:25 crc kubenswrapper[4933]: I0122 07:34:25.898657 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="59d07063-6db8-4225-9c77-f916c2747adf" containerName="dnsmasq-dns" Jan 22 07:34:25 crc kubenswrapper[4933]: I0122 07:34:25.898925 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="59d07063-6db8-4225-9c77-f916c2747adf" containerName="dnsmasq-dns" Jan 22 07:34:25 crc kubenswrapper[4933]: I0122 07:34:25.898944 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfe0045e-46ce-4302-8d36-a55b5eff0184" containerName="dnsmasq-dns" Jan 22 07:34:25 crc kubenswrapper[4933]: I0122 07:34:25.900004 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" Jan 22 07:34:25 crc kubenswrapper[4933]: I0122 07:34:25.903484 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 07:34:25 crc kubenswrapper[4933]: I0122 07:34:25.903808 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 22 07:34:25 crc kubenswrapper[4933]: I0122 07:34:25.903882 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 22 07:34:25 crc kubenswrapper[4933]: I0122 07:34:25.904009 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-d9x6d" Jan 22 07:34:25 crc kubenswrapper[4933]: I0122 07:34:25.916463 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf"] Jan 22 07:34:25 crc kubenswrapper[4933]: I0122 07:34:25.998405 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf\" (UID: \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" Jan 22 07:34:25 crc kubenswrapper[4933]: I0122 07:34:25.998452 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-ssh-key-openstack-cell1\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf\" (UID: \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" Jan 22 07:34:25 crc kubenswrapper[4933]: I0122 07:34:25.998500 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf\" (UID: \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" Jan 22 07:34:25 crc kubenswrapper[4933]: I0122 07:34:25.998557 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhzc6\" (UniqueName: \"kubernetes.io/projected/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-kube-api-access-hhzc6\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf\" (UID: \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" Jan 22 07:34:26 crc kubenswrapper[4933]: I0122 07:34:26.100812 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhzc6\" (UniqueName: \"kubernetes.io/projected/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-kube-api-access-hhzc6\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf\" (UID: \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" Jan 22 07:34:26 crc kubenswrapper[4933]: I0122 07:34:26.101006 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf\" (UID: \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" Jan 22 07:34:26 crc kubenswrapper[4933]: I0122 07:34:26.101043 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-ssh-key-openstack-cell1\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf\" (UID: \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" Jan 22 07:34:26 crc kubenswrapper[4933]: I0122 07:34:26.101125 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf\" (UID: \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" Jan 22 07:34:26 crc kubenswrapper[4933]: I0122 07:34:26.108804 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf\" (UID: \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" Jan 22 07:34:26 crc kubenswrapper[4933]: I0122 07:34:26.108998 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf\" (UID: \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" Jan 22 07:34:26 crc kubenswrapper[4933]: I0122 07:34:26.109290 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-ssh-key-openstack-cell1\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf\" (UID: \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" Jan 22 07:34:26 crc kubenswrapper[4933]: I0122 07:34:26.118104 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhzc6\" (UniqueName: \"kubernetes.io/projected/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-kube-api-access-hhzc6\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf\" (UID: \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" Jan 22 07:34:26 crc kubenswrapper[4933]: I0122 07:34:26.225635 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" Jan 22 07:34:27 crc kubenswrapper[4933]: I0122 07:34:27.010185 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf"] Jan 22 07:34:27 crc kubenswrapper[4933]: I0122 07:34:27.685012 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" event={"ID":"2e0dfb8c-dfb3-487d-8391-78d4a3bee130","Type":"ContainerStarted","Data":"a59cd02bcbea6bca5ac0e15bc98378c5caacd52d6eccbfeb47268afcfcaf6b5c"} Jan 22 07:34:37 crc kubenswrapper[4933]: I0122 07:34:37.800132 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" event={"ID":"2e0dfb8c-dfb3-487d-8391-78d4a3bee130","Type":"ContainerStarted","Data":"6113513eff39ef3a5241b1519cc472d582a5723dc521fad9fc2eb2a964c1c1e6"} Jan 22 07:34:40 crc kubenswrapper[4933]: I0122 07:34:40.943345 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:34:40 crc kubenswrapper[4933]: I0122 07:34:40.943822 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:34:40 crc kubenswrapper[4933]: I0122 07:34:40.943872 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 07:34:40 crc kubenswrapper[4933]: I0122 07:34:40.944817 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1b32e660a15a3b4635250c60c37311d6f65b1e17832e99f37c6d9d25bd8e5087"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 07:34:40 crc kubenswrapper[4933]: I0122 07:34:40.944889 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://1b32e660a15a3b4635250c60c37311d6f65b1e17832e99f37c6d9d25bd8e5087" gracePeriod=600 Jan 22 07:34:41 crc kubenswrapper[4933]: I0122 07:34:41.855118 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="1b32e660a15a3b4635250c60c37311d6f65b1e17832e99f37c6d9d25bd8e5087" exitCode=0 Jan 22 07:34:41 crc kubenswrapper[4933]: I0122 07:34:41.855184 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"1b32e660a15a3b4635250c60c37311d6f65b1e17832e99f37c6d9d25bd8e5087"} Jan 22 07:34:41 crc kubenswrapper[4933]: I0122 07:34:41.855760 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804"} Jan 22 07:34:41 crc kubenswrapper[4933]: I0122 07:34:41.855827 4933 scope.go:117] "RemoveContainer" containerID="16fd824342f037f0104eaf6d48b751a02eeec5a84e0c2a32f1a295811edc0336" Jan 22 07:34:41 crc kubenswrapper[4933]: I0122 07:34:41.872088 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" podStartSLOduration=6.665928435 podStartE2EDuration="16.872059952s" podCreationTimestamp="2026-01-22 07:34:25 +0000 UTC" firstStartedPulling="2026-01-22 07:34:27.010812977 +0000 UTC m=+6514.847938330" lastFinishedPulling="2026-01-22 07:34:37.216944494 +0000 UTC m=+6525.054069847" observedRunningTime="2026-01-22 07:34:37.827579324 +0000 UTC m=+6525.664704697" watchObservedRunningTime="2026-01-22 07:34:41.872059952 +0000 UTC m=+6529.709185305" Jan 22 07:34:50 crc kubenswrapper[4933]: I0122 07:34:50.937811 4933 generic.go:334] "Generic (PLEG): container finished" podID="2e0dfb8c-dfb3-487d-8391-78d4a3bee130" containerID="6113513eff39ef3a5241b1519cc472d582a5723dc521fad9fc2eb2a964c1c1e6" exitCode=0 Jan 22 07:34:50 crc kubenswrapper[4933]: I0122 07:34:50.937932 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" event={"ID":"2e0dfb8c-dfb3-487d-8391-78d4a3bee130","Type":"ContainerDied","Data":"6113513eff39ef3a5241b1519cc472d582a5723dc521fad9fc2eb2a964c1c1e6"} Jan 22 07:34:52 crc kubenswrapper[4933]: I0122 07:34:52.390924 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" Jan 22 07:34:52 crc kubenswrapper[4933]: I0122 07:34:52.440745 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hhzc6\" (UniqueName: \"kubernetes.io/projected/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-kube-api-access-hhzc6\") pod \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\" (UID: \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\") " Jan 22 07:34:52 crc kubenswrapper[4933]: I0122 07:34:52.440887 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-ssh-key-openstack-cell1\") pod \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\" (UID: \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\") " Jan 22 07:34:52 crc kubenswrapper[4933]: I0122 07:34:52.440996 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-pre-adoption-validation-combined-ca-bundle\") pod \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\" (UID: \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\") " Jan 22 07:34:52 crc kubenswrapper[4933]: I0122 07:34:52.441268 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-inventory\") pod \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\" (UID: \"2e0dfb8c-dfb3-487d-8391-78d4a3bee130\") " Jan 22 07:34:52 crc kubenswrapper[4933]: I0122 07:34:52.448385 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-pre-adoption-validation-combined-ca-bundle" (OuterVolumeSpecName: "pre-adoption-validation-combined-ca-bundle") pod "2e0dfb8c-dfb3-487d-8391-78d4a3bee130" (UID: "2e0dfb8c-dfb3-487d-8391-78d4a3bee130"). InnerVolumeSpecName "pre-adoption-validation-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:34:52 crc kubenswrapper[4933]: I0122 07:34:52.456106 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-kube-api-access-hhzc6" (OuterVolumeSpecName: "kube-api-access-hhzc6") pod "2e0dfb8c-dfb3-487d-8391-78d4a3bee130" (UID: "2e0dfb8c-dfb3-487d-8391-78d4a3bee130"). InnerVolumeSpecName "kube-api-access-hhzc6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:34:52 crc kubenswrapper[4933]: I0122 07:34:52.479682 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "2e0dfb8c-dfb3-487d-8391-78d4a3bee130" (UID: "2e0dfb8c-dfb3-487d-8391-78d4a3bee130"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:34:52 crc kubenswrapper[4933]: I0122 07:34:52.494863 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-inventory" (OuterVolumeSpecName: "inventory") pod "2e0dfb8c-dfb3-487d-8391-78d4a3bee130" (UID: "2e0dfb8c-dfb3-487d-8391-78d4a3bee130"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:34:52 crc kubenswrapper[4933]: I0122 07:34:52.543724 4933 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 07:34:52 crc kubenswrapper[4933]: I0122 07:34:52.543754 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hhzc6\" (UniqueName: \"kubernetes.io/projected/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-kube-api-access-hhzc6\") on node \"crc\" DevicePath \"\"" Jan 22 07:34:52 crc kubenswrapper[4933]: I0122 07:34:52.543767 4933 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 22 07:34:52 crc kubenswrapper[4933]: I0122 07:34:52.543781 4933 reconciler_common.go:293] "Volume detached for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e0dfb8c-dfb3-487d-8391-78d4a3bee130-pre-adoption-validation-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:34:52 crc kubenswrapper[4933]: I0122 07:34:52.966568 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" event={"ID":"2e0dfb8c-dfb3-487d-8391-78d4a3bee130","Type":"ContainerDied","Data":"a59cd02bcbea6bca5ac0e15bc98378c5caacd52d6eccbfeb47268afcfcaf6b5c"} Jan 22 07:34:52 crc kubenswrapper[4933]: I0122 07:34:52.966886 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a59cd02bcbea6bca5ac0e15bc98378c5caacd52d6eccbfeb47268afcfcaf6b5c" Jan 22 07:34:52 crc kubenswrapper[4933]: I0122 07:34:52.966640 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.546986 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq"] Jan 22 07:34:58 crc kubenswrapper[4933]: E0122 07:34:58.548147 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e0dfb8c-dfb3-487d-8391-78d4a3bee130" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.548170 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e0dfb8c-dfb3-487d-8391-78d4a3bee130" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.548481 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e0dfb8c-dfb3-487d-8391-78d4a3bee130" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.549517 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.552763 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.552880 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-d9x6d" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.552960 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.553289 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.564127 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq"] Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.702545 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mc45l\" (UniqueName: \"kubernetes.io/projected/c722274d-78ae-420e-8487-f52eac7984d7-kube-api-access-mc45l\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq\" (UID: \"c722274d-78ae-420e-8487-f52eac7984d7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.702614 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c722274d-78ae-420e-8487-f52eac7984d7-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq\" (UID: \"c722274d-78ae-420e-8487-f52eac7984d7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.702646 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c722274d-78ae-420e-8487-f52eac7984d7-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq\" (UID: \"c722274d-78ae-420e-8487-f52eac7984d7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.702805 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c722274d-78ae-420e-8487-f52eac7984d7-ssh-key-openstack-cell1\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq\" (UID: \"c722274d-78ae-420e-8487-f52eac7984d7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.805196 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mc45l\" (UniqueName: \"kubernetes.io/projected/c722274d-78ae-420e-8487-f52eac7984d7-kube-api-access-mc45l\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq\" (UID: \"c722274d-78ae-420e-8487-f52eac7984d7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.805641 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c722274d-78ae-420e-8487-f52eac7984d7-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq\" (UID: \"c722274d-78ae-420e-8487-f52eac7984d7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.806589 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c722274d-78ae-420e-8487-f52eac7984d7-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq\" (UID: \"c722274d-78ae-420e-8487-f52eac7984d7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.806989 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c722274d-78ae-420e-8487-f52eac7984d7-ssh-key-openstack-cell1\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq\" (UID: \"c722274d-78ae-420e-8487-f52eac7984d7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.811978 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c722274d-78ae-420e-8487-f52eac7984d7-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq\" (UID: \"c722274d-78ae-420e-8487-f52eac7984d7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.812421 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c722274d-78ae-420e-8487-f52eac7984d7-ssh-key-openstack-cell1\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq\" (UID: \"c722274d-78ae-420e-8487-f52eac7984d7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.820659 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c722274d-78ae-420e-8487-f52eac7984d7-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq\" (UID: \"c722274d-78ae-420e-8487-f52eac7984d7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.823731 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mc45l\" (UniqueName: \"kubernetes.io/projected/c722274d-78ae-420e-8487-f52eac7984d7-kube-api-access-mc45l\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq\" (UID: \"c722274d-78ae-420e-8487-f52eac7984d7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" Jan 22 07:34:58 crc kubenswrapper[4933]: I0122 07:34:58.866757 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" Jan 22 07:34:59 crc kubenswrapper[4933]: I0122 07:34:59.447356 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq"] Jan 22 07:35:00 crc kubenswrapper[4933]: I0122 07:35:00.037669 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" event={"ID":"c722274d-78ae-420e-8487-f52eac7984d7","Type":"ContainerStarted","Data":"9a3f741fdf01b12465a75d10e59014f169788d14ee72c41eee7ea87ef3e27844"} Jan 22 07:35:01 crc kubenswrapper[4933]: I0122 07:35:01.049716 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" event={"ID":"c722274d-78ae-420e-8487-f52eac7984d7","Type":"ContainerStarted","Data":"a115b66ae22ad5334408def3487df63503be5826a15a2b37c93c3ace15ec2783"} Jan 22 07:35:01 crc kubenswrapper[4933]: I0122 07:35:01.067120 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" podStartSLOduration=2.554466882 podStartE2EDuration="3.067096504s" podCreationTimestamp="2026-01-22 07:34:58 +0000 UTC" firstStartedPulling="2026-01-22 07:34:59.454086868 +0000 UTC m=+6547.291212221" lastFinishedPulling="2026-01-22 07:34:59.96671648 +0000 UTC m=+6547.803841843" observedRunningTime="2026-01-22 07:35:01.065411103 +0000 UTC m=+6548.902536486" watchObservedRunningTime="2026-01-22 07:35:01.067096504 +0000 UTC m=+6548.904221857" Jan 22 07:35:08 crc kubenswrapper[4933]: I0122 07:35:08.005088 4933 scope.go:117] "RemoveContainer" containerID="e755fb15a1df5046702ecfc8397f2bfcef3891d8d8a141bb7a0ae217f94ea160" Jan 22 07:35:08 crc kubenswrapper[4933]: I0122 07:35:08.037222 4933 scope.go:117] "RemoveContainer" containerID="2090202e08f4df1495dbf60793a8990961529f01c45d63a443c7b1afb1191cea" Jan 22 07:35:13 crc kubenswrapper[4933]: I0122 07:35:13.036908 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-create-97zkc"] Jan 22 07:35:13 crc kubenswrapper[4933]: I0122 07:35:13.047557 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-create-97zkc"] Jan 22 07:35:14 crc kubenswrapper[4933]: I0122 07:35:14.502710 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6eb108c3-9c44-480e-ac8f-b0c0c74db54f" path="/var/lib/kubelet/pods/6eb108c3-9c44-480e-ac8f-b0c0c74db54f/volumes" Jan 22 07:35:15 crc kubenswrapper[4933]: I0122 07:35:15.029835 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-40d7-account-create-update-t4rsr"] Jan 22 07:35:15 crc kubenswrapper[4933]: I0122 07:35:15.038689 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-40d7-account-create-update-t4rsr"] Jan 22 07:35:16 crc kubenswrapper[4933]: I0122 07:35:16.521223 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58379571-6fee-4de8-8919-293ea15e8c07" path="/var/lib/kubelet/pods/58379571-6fee-4de8-8919-293ea15e8c07/volumes" Jan 22 07:35:20 crc kubenswrapper[4933]: I0122 07:35:20.246436 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-persistence-db-create-tcfnc"] Jan 22 07:35:20 crc kubenswrapper[4933]: I0122 07:35:20.259194 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-persistence-db-create-tcfnc"] Jan 22 07:35:20 crc kubenswrapper[4933]: I0122 07:35:20.506545 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04576677-4607-48ab-b786-a763b254ad3f" path="/var/lib/kubelet/pods/04576677-4607-48ab-b786-a763b254ad3f/volumes" Jan 22 07:35:21 crc kubenswrapper[4933]: I0122 07:35:21.027316 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-c826-account-create-update-2pjwm"] Jan 22 07:35:21 crc kubenswrapper[4933]: I0122 07:35:21.037401 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-c826-account-create-update-2pjwm"] Jan 22 07:35:22 crc kubenswrapper[4933]: I0122 07:35:22.503861 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8c8cb03-9525-41d1-b307-badc66ef535f" path="/var/lib/kubelet/pods/a8c8cb03-9525-41d1-b307-badc66ef535f/volumes" Jan 22 07:36:08 crc kubenswrapper[4933]: I0122 07:36:08.263940 4933 scope.go:117] "RemoveContainer" containerID="c7afe3ef93cd30c177f607ca171a697672305c1ef9790c6d497ae3758424e87c" Jan 22 07:36:08 crc kubenswrapper[4933]: I0122 07:36:08.308168 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hwq8c"] Jan 22 07:36:08 crc kubenswrapper[4933]: I0122 07:36:08.311945 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hwq8c" Jan 22 07:36:08 crc kubenswrapper[4933]: I0122 07:36:08.318721 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hwq8c"] Jan 22 07:36:08 crc kubenswrapper[4933]: I0122 07:36:08.320515 4933 scope.go:117] "RemoveContainer" containerID="2136f47d770777e539728e6e53388ee952d2762f387317a7e77f3d0c5f424f60" Jan 22 07:36:08 crc kubenswrapper[4933]: I0122 07:36:08.394120 4933 scope.go:117] "RemoveContainer" containerID="a2454353ef91e4c783dbe25c6ad27150573e5b6da3e01fca7e60831c997ea4f2" Jan 22 07:36:08 crc kubenswrapper[4933]: I0122 07:36:08.442503 4933 scope.go:117] "RemoveContainer" containerID="3dc4adca8bbf2e3374f739695c2a6d83deddf74227851407e568e2f124b0ee35" Jan 22 07:36:08 crc kubenswrapper[4933]: I0122 07:36:08.483831 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ltjxk\" (UniqueName: \"kubernetes.io/projected/c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6-kube-api-access-ltjxk\") pod \"community-operators-hwq8c\" (UID: \"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6\") " pod="openshift-marketplace/community-operators-hwq8c" Jan 22 07:36:08 crc kubenswrapper[4933]: I0122 07:36:08.484058 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6-catalog-content\") pod \"community-operators-hwq8c\" (UID: \"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6\") " pod="openshift-marketplace/community-operators-hwq8c" Jan 22 07:36:08 crc kubenswrapper[4933]: I0122 07:36:08.484532 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6-utilities\") pod \"community-operators-hwq8c\" (UID: \"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6\") " pod="openshift-marketplace/community-operators-hwq8c" Jan 22 07:36:08 crc kubenswrapper[4933]: I0122 07:36:08.586488 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6-utilities\") pod \"community-operators-hwq8c\" (UID: \"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6\") " pod="openshift-marketplace/community-operators-hwq8c" Jan 22 07:36:08 crc kubenswrapper[4933]: I0122 07:36:08.586592 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ltjxk\" (UniqueName: \"kubernetes.io/projected/c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6-kube-api-access-ltjxk\") pod \"community-operators-hwq8c\" (UID: \"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6\") " pod="openshift-marketplace/community-operators-hwq8c" Jan 22 07:36:08 crc kubenswrapper[4933]: I0122 07:36:08.586704 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6-catalog-content\") pod \"community-operators-hwq8c\" (UID: \"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6\") " pod="openshift-marketplace/community-operators-hwq8c" Jan 22 07:36:08 crc kubenswrapper[4933]: I0122 07:36:08.587046 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6-utilities\") pod \"community-operators-hwq8c\" (UID: \"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6\") " pod="openshift-marketplace/community-operators-hwq8c" Jan 22 07:36:08 crc kubenswrapper[4933]: I0122 07:36:08.587148 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6-catalog-content\") pod \"community-operators-hwq8c\" (UID: \"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6\") " pod="openshift-marketplace/community-operators-hwq8c" Jan 22 07:36:08 crc kubenswrapper[4933]: I0122 07:36:08.611278 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ltjxk\" (UniqueName: \"kubernetes.io/projected/c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6-kube-api-access-ltjxk\") pod \"community-operators-hwq8c\" (UID: \"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6\") " pod="openshift-marketplace/community-operators-hwq8c" Jan 22 07:36:08 crc kubenswrapper[4933]: I0122 07:36:08.770292 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hwq8c" Jan 22 07:36:09 crc kubenswrapper[4933]: W0122 07:36:09.242522 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc6a0aa58_ab8f_43ef_95f7_9f3e97e0ccb6.slice/crio-a606242a772b3be3cb38baaa7c681186af4835ce0a4a1dc53973ecef64cfb036 WatchSource:0}: Error finding container a606242a772b3be3cb38baaa7c681186af4835ce0a4a1dc53973ecef64cfb036: Status 404 returned error can't find the container with id a606242a772b3be3cb38baaa7c681186af4835ce0a4a1dc53973ecef64cfb036 Jan 22 07:36:09 crc kubenswrapper[4933]: I0122 07:36:09.245012 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hwq8c"] Jan 22 07:36:09 crc kubenswrapper[4933]: I0122 07:36:09.712876 4933 generic.go:334] "Generic (PLEG): container finished" podID="c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6" containerID="d5ae926fdc6e7c481e6fcfd9c35105d89144d5f47f4362eb800aba90ff258b49" exitCode=0 Jan 22 07:36:09 crc kubenswrapper[4933]: I0122 07:36:09.712979 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hwq8c" event={"ID":"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6","Type":"ContainerDied","Data":"d5ae926fdc6e7c481e6fcfd9c35105d89144d5f47f4362eb800aba90ff258b49"} Jan 22 07:36:09 crc kubenswrapper[4933]: I0122 07:36:09.713229 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hwq8c" event={"ID":"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6","Type":"ContainerStarted","Data":"a606242a772b3be3cb38baaa7c681186af4835ce0a4a1dc53973ecef64cfb036"} Jan 22 07:36:10 crc kubenswrapper[4933]: I0122 07:36:10.687742 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-d2j4f"] Jan 22 07:36:10 crc kubenswrapper[4933]: I0122 07:36:10.692357 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d2j4f" Jan 22 07:36:10 crc kubenswrapper[4933]: I0122 07:36:10.706805 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2j4f"] Jan 22 07:36:10 crc kubenswrapper[4933]: I0122 07:36:10.753223 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvjkw\" (UniqueName: \"kubernetes.io/projected/ad33d192-6e0f-4ff4-8799-8ac191364ae5-kube-api-access-fvjkw\") pod \"redhat-marketplace-d2j4f\" (UID: \"ad33d192-6e0f-4ff4-8799-8ac191364ae5\") " pod="openshift-marketplace/redhat-marketplace-d2j4f" Jan 22 07:36:10 crc kubenswrapper[4933]: I0122 07:36:10.756171 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad33d192-6e0f-4ff4-8799-8ac191364ae5-catalog-content\") pod \"redhat-marketplace-d2j4f\" (UID: \"ad33d192-6e0f-4ff4-8799-8ac191364ae5\") " pod="openshift-marketplace/redhat-marketplace-d2j4f" Jan 22 07:36:10 crc kubenswrapper[4933]: I0122 07:36:10.757684 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad33d192-6e0f-4ff4-8799-8ac191364ae5-utilities\") pod \"redhat-marketplace-d2j4f\" (UID: \"ad33d192-6e0f-4ff4-8799-8ac191364ae5\") " pod="openshift-marketplace/redhat-marketplace-d2j4f" Jan 22 07:36:10 crc kubenswrapper[4933]: I0122 07:36:10.860124 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvjkw\" (UniqueName: \"kubernetes.io/projected/ad33d192-6e0f-4ff4-8799-8ac191364ae5-kube-api-access-fvjkw\") pod \"redhat-marketplace-d2j4f\" (UID: \"ad33d192-6e0f-4ff4-8799-8ac191364ae5\") " pod="openshift-marketplace/redhat-marketplace-d2j4f" Jan 22 07:36:10 crc kubenswrapper[4933]: I0122 07:36:10.860305 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad33d192-6e0f-4ff4-8799-8ac191364ae5-catalog-content\") pod \"redhat-marketplace-d2j4f\" (UID: \"ad33d192-6e0f-4ff4-8799-8ac191364ae5\") " pod="openshift-marketplace/redhat-marketplace-d2j4f" Jan 22 07:36:10 crc kubenswrapper[4933]: I0122 07:36:10.860377 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad33d192-6e0f-4ff4-8799-8ac191364ae5-utilities\") pod \"redhat-marketplace-d2j4f\" (UID: \"ad33d192-6e0f-4ff4-8799-8ac191364ae5\") " pod="openshift-marketplace/redhat-marketplace-d2j4f" Jan 22 07:36:10 crc kubenswrapper[4933]: I0122 07:36:10.860859 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad33d192-6e0f-4ff4-8799-8ac191364ae5-catalog-content\") pod \"redhat-marketplace-d2j4f\" (UID: \"ad33d192-6e0f-4ff4-8799-8ac191364ae5\") " pod="openshift-marketplace/redhat-marketplace-d2j4f" Jan 22 07:36:10 crc kubenswrapper[4933]: I0122 07:36:10.860899 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad33d192-6e0f-4ff4-8799-8ac191364ae5-utilities\") pod \"redhat-marketplace-d2j4f\" (UID: \"ad33d192-6e0f-4ff4-8799-8ac191364ae5\") " pod="openshift-marketplace/redhat-marketplace-d2j4f" Jan 22 07:36:10 crc kubenswrapper[4933]: I0122 07:36:10.891213 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvjkw\" (UniqueName: \"kubernetes.io/projected/ad33d192-6e0f-4ff4-8799-8ac191364ae5-kube-api-access-fvjkw\") pod \"redhat-marketplace-d2j4f\" (UID: \"ad33d192-6e0f-4ff4-8799-8ac191364ae5\") " pod="openshift-marketplace/redhat-marketplace-d2j4f" Jan 22 07:36:11 crc kubenswrapper[4933]: I0122 07:36:11.029191 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d2j4f" Jan 22 07:36:11 crc kubenswrapper[4933]: I0122 07:36:11.042846 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-sync-kws6f"] Jan 22 07:36:11 crc kubenswrapper[4933]: I0122 07:36:11.050012 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-sync-kws6f"] Jan 22 07:36:11 crc kubenswrapper[4933]: W0122 07:36:11.547833 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podad33d192_6e0f_4ff4_8799_8ac191364ae5.slice/crio-f0979787775fe970c739439c12564ef77e343347cda35bd56776541cc6afd497 WatchSource:0}: Error finding container f0979787775fe970c739439c12564ef77e343347cda35bd56776541cc6afd497: Status 404 returned error can't find the container with id f0979787775fe970c739439c12564ef77e343347cda35bd56776541cc6afd497 Jan 22 07:36:11 crc kubenswrapper[4933]: I0122 07:36:11.552619 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2j4f"] Jan 22 07:36:11 crc kubenswrapper[4933]: I0122 07:36:11.762473 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2j4f" event={"ID":"ad33d192-6e0f-4ff4-8799-8ac191364ae5","Type":"ContainerStarted","Data":"f0979787775fe970c739439c12564ef77e343347cda35bd56776541cc6afd497"} Jan 22 07:36:12 crc kubenswrapper[4933]: I0122 07:36:12.517128 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a01cbd8-9d77-42b9-9848-bc2329258052" path="/var/lib/kubelet/pods/6a01cbd8-9d77-42b9-9848-bc2329258052/volumes" Jan 22 07:36:12 crc kubenswrapper[4933]: I0122 07:36:12.773368 4933 generic.go:334] "Generic (PLEG): container finished" podID="ad33d192-6e0f-4ff4-8799-8ac191364ae5" containerID="e214b78d37201a8c258d43446803733ba19d6eddc98b88ea29dbff321693cb5e" exitCode=0 Jan 22 07:36:12 crc kubenswrapper[4933]: I0122 07:36:12.773424 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2j4f" event={"ID":"ad33d192-6e0f-4ff4-8799-8ac191364ae5","Type":"ContainerDied","Data":"e214b78d37201a8c258d43446803733ba19d6eddc98b88ea29dbff321693cb5e"} Jan 22 07:36:13 crc kubenswrapper[4933]: I0122 07:36:13.922886 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 07:36:14 crc kubenswrapper[4933]: I0122 07:36:14.799472 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hwq8c" event={"ID":"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6","Type":"ContainerStarted","Data":"e5c76025167706f815770296ecabc27ca0da568f0b82e69c99287f22472da44b"} Jan 22 07:36:14 crc kubenswrapper[4933]: I0122 07:36:14.801637 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2j4f" event={"ID":"ad33d192-6e0f-4ff4-8799-8ac191364ae5","Type":"ContainerStarted","Data":"8d523b284a0c44592502275ccee58685e8cb94b1fcf447d82a22879e2bd72012"} Jan 22 07:36:16 crc kubenswrapper[4933]: I0122 07:36:16.824329 4933 generic.go:334] "Generic (PLEG): container finished" podID="ad33d192-6e0f-4ff4-8799-8ac191364ae5" containerID="8d523b284a0c44592502275ccee58685e8cb94b1fcf447d82a22879e2bd72012" exitCode=0 Jan 22 07:36:16 crc kubenswrapper[4933]: I0122 07:36:16.824438 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2j4f" event={"ID":"ad33d192-6e0f-4ff4-8799-8ac191364ae5","Type":"ContainerDied","Data":"8d523b284a0c44592502275ccee58685e8cb94b1fcf447d82a22879e2bd72012"} Jan 22 07:36:16 crc kubenswrapper[4933]: I0122 07:36:16.827735 4933 generic.go:334] "Generic (PLEG): container finished" podID="c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6" containerID="e5c76025167706f815770296ecabc27ca0da568f0b82e69c99287f22472da44b" exitCode=0 Jan 22 07:36:16 crc kubenswrapper[4933]: I0122 07:36:16.827767 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hwq8c" event={"ID":"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6","Type":"ContainerDied","Data":"e5c76025167706f815770296ecabc27ca0da568f0b82e69c99287f22472da44b"} Jan 22 07:36:17 crc kubenswrapper[4933]: I0122 07:36:17.840809 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2j4f" event={"ID":"ad33d192-6e0f-4ff4-8799-8ac191364ae5","Type":"ContainerStarted","Data":"723a6f6e3a03ee2bf00b11fbc254623f83a79c8875b60cf5d464d3eb70a93f20"} Jan 22 07:36:17 crc kubenswrapper[4933]: I0122 07:36:17.847350 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hwq8c" event={"ID":"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6","Type":"ContainerStarted","Data":"0507487f48dbe3ccbc5db2bf8ae30e1b4624acad03ae9620437b34e24f28bf39"} Jan 22 07:36:17 crc kubenswrapper[4933]: I0122 07:36:17.861500 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-d2j4f" podStartSLOduration=4.585751946 podStartE2EDuration="7.86148258s" podCreationTimestamp="2026-01-22 07:36:10 +0000 UTC" firstStartedPulling="2026-01-22 07:36:13.922625697 +0000 UTC m=+6621.759751050" lastFinishedPulling="2026-01-22 07:36:17.198356331 +0000 UTC m=+6625.035481684" observedRunningTime="2026-01-22 07:36:17.86110032 +0000 UTC m=+6625.698225683" watchObservedRunningTime="2026-01-22 07:36:17.86148258 +0000 UTC m=+6625.698607943" Jan 22 07:36:17 crc kubenswrapper[4933]: I0122 07:36:17.878110 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hwq8c" podStartSLOduration=2.3018795340000002 podStartE2EDuration="9.878091915s" podCreationTimestamp="2026-01-22 07:36:08 +0000 UTC" firstStartedPulling="2026-01-22 07:36:09.71570905 +0000 UTC m=+6617.552834403" lastFinishedPulling="2026-01-22 07:36:17.291921431 +0000 UTC m=+6625.129046784" observedRunningTime="2026-01-22 07:36:17.875560043 +0000 UTC m=+6625.712685406" watchObservedRunningTime="2026-01-22 07:36:17.878091915 +0000 UTC m=+6625.715217268" Jan 22 07:36:18 crc kubenswrapper[4933]: I0122 07:36:18.771527 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hwq8c" Jan 22 07:36:18 crc kubenswrapper[4933]: I0122 07:36:18.771942 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hwq8c" Jan 22 07:36:19 crc kubenswrapper[4933]: I0122 07:36:19.818518 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-hwq8c" podUID="c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6" containerName="registry-server" probeResult="failure" output=< Jan 22 07:36:19 crc kubenswrapper[4933]: timeout: failed to connect service ":50051" within 1s Jan 22 07:36:19 crc kubenswrapper[4933]: > Jan 22 07:36:21 crc kubenswrapper[4933]: I0122 07:36:21.030152 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-d2j4f" Jan 22 07:36:21 crc kubenswrapper[4933]: I0122 07:36:21.030590 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-d2j4f" Jan 22 07:36:21 crc kubenswrapper[4933]: I0122 07:36:21.082966 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-d2j4f" Jan 22 07:36:28 crc kubenswrapper[4933]: I0122 07:36:28.819980 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hwq8c" Jan 22 07:36:28 crc kubenswrapper[4933]: I0122 07:36:28.878658 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hwq8c" Jan 22 07:36:29 crc kubenswrapper[4933]: I0122 07:36:29.054529 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hwq8c"] Jan 22 07:36:29 crc kubenswrapper[4933]: I0122 07:36:29.965624 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-hwq8c" podUID="c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6" containerName="registry-server" containerID="cri-o://0507487f48dbe3ccbc5db2bf8ae30e1b4624acad03ae9620437b34e24f28bf39" gracePeriod=2 Jan 22 07:36:30 crc kubenswrapper[4933]: I0122 07:36:30.526223 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hwq8c" Jan 22 07:36:30 crc kubenswrapper[4933]: I0122 07:36:30.623488 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ltjxk\" (UniqueName: \"kubernetes.io/projected/c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6-kube-api-access-ltjxk\") pod \"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6\" (UID: \"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6\") " Jan 22 07:36:30 crc kubenswrapper[4933]: I0122 07:36:30.623776 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6-catalog-content\") pod \"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6\" (UID: \"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6\") " Jan 22 07:36:30 crc kubenswrapper[4933]: I0122 07:36:30.623819 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6-utilities\") pod \"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6\" (UID: \"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6\") " Jan 22 07:36:30 crc kubenswrapper[4933]: I0122 07:36:30.624896 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6-utilities" (OuterVolumeSpecName: "utilities") pod "c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6" (UID: "c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:36:30 crc kubenswrapper[4933]: I0122 07:36:30.625288 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:36:30 crc kubenswrapper[4933]: I0122 07:36:30.629387 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6-kube-api-access-ltjxk" (OuterVolumeSpecName: "kube-api-access-ltjxk") pod "c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6" (UID: "c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6"). InnerVolumeSpecName "kube-api-access-ltjxk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:36:30 crc kubenswrapper[4933]: I0122 07:36:30.676505 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6" (UID: "c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:36:30 crc kubenswrapper[4933]: I0122 07:36:30.727153 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ltjxk\" (UniqueName: \"kubernetes.io/projected/c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6-kube-api-access-ltjxk\") on node \"crc\" DevicePath \"\"" Jan 22 07:36:30 crc kubenswrapper[4933]: I0122 07:36:30.727211 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:36:30 crc kubenswrapper[4933]: I0122 07:36:30.977137 4933 generic.go:334] "Generic (PLEG): container finished" podID="c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6" containerID="0507487f48dbe3ccbc5db2bf8ae30e1b4624acad03ae9620437b34e24f28bf39" exitCode=0 Jan 22 07:36:30 crc kubenswrapper[4933]: I0122 07:36:30.977178 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hwq8c" event={"ID":"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6","Type":"ContainerDied","Data":"0507487f48dbe3ccbc5db2bf8ae30e1b4624acad03ae9620437b34e24f28bf39"} Jan 22 07:36:30 crc kubenswrapper[4933]: I0122 07:36:30.977219 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hwq8c" Jan 22 07:36:30 crc kubenswrapper[4933]: I0122 07:36:30.977236 4933 scope.go:117] "RemoveContainer" containerID="0507487f48dbe3ccbc5db2bf8ae30e1b4624acad03ae9620437b34e24f28bf39" Jan 22 07:36:30 crc kubenswrapper[4933]: I0122 07:36:30.977224 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hwq8c" event={"ID":"c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6","Type":"ContainerDied","Data":"a606242a772b3be3cb38baaa7c681186af4835ce0a4a1dc53973ecef64cfb036"} Jan 22 07:36:31 crc kubenswrapper[4933]: I0122 07:36:31.006453 4933 scope.go:117] "RemoveContainer" containerID="e5c76025167706f815770296ecabc27ca0da568f0b82e69c99287f22472da44b" Jan 22 07:36:31 crc kubenswrapper[4933]: I0122 07:36:31.013666 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hwq8c"] Jan 22 07:36:31 crc kubenswrapper[4933]: I0122 07:36:31.022962 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-hwq8c"] Jan 22 07:36:31 crc kubenswrapper[4933]: I0122 07:36:31.064529 4933 scope.go:117] "RemoveContainer" containerID="d5ae926fdc6e7c481e6fcfd9c35105d89144d5f47f4362eb800aba90ff258b49" Jan 22 07:36:31 crc kubenswrapper[4933]: I0122 07:36:31.082448 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-d2j4f" Jan 22 07:36:31 crc kubenswrapper[4933]: I0122 07:36:31.086874 4933 scope.go:117] "RemoveContainer" containerID="0507487f48dbe3ccbc5db2bf8ae30e1b4624acad03ae9620437b34e24f28bf39" Jan 22 07:36:31 crc kubenswrapper[4933]: E0122 07:36:31.087264 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0507487f48dbe3ccbc5db2bf8ae30e1b4624acad03ae9620437b34e24f28bf39\": container with ID starting with 0507487f48dbe3ccbc5db2bf8ae30e1b4624acad03ae9620437b34e24f28bf39 not found: ID does not exist" containerID="0507487f48dbe3ccbc5db2bf8ae30e1b4624acad03ae9620437b34e24f28bf39" Jan 22 07:36:31 crc kubenswrapper[4933]: I0122 07:36:31.087305 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0507487f48dbe3ccbc5db2bf8ae30e1b4624acad03ae9620437b34e24f28bf39"} err="failed to get container status \"0507487f48dbe3ccbc5db2bf8ae30e1b4624acad03ae9620437b34e24f28bf39\": rpc error: code = NotFound desc = could not find container \"0507487f48dbe3ccbc5db2bf8ae30e1b4624acad03ae9620437b34e24f28bf39\": container with ID starting with 0507487f48dbe3ccbc5db2bf8ae30e1b4624acad03ae9620437b34e24f28bf39 not found: ID does not exist" Jan 22 07:36:31 crc kubenswrapper[4933]: I0122 07:36:31.087332 4933 scope.go:117] "RemoveContainer" containerID="e5c76025167706f815770296ecabc27ca0da568f0b82e69c99287f22472da44b" Jan 22 07:36:31 crc kubenswrapper[4933]: E0122 07:36:31.087948 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5c76025167706f815770296ecabc27ca0da568f0b82e69c99287f22472da44b\": container with ID starting with e5c76025167706f815770296ecabc27ca0da568f0b82e69c99287f22472da44b not found: ID does not exist" containerID="e5c76025167706f815770296ecabc27ca0da568f0b82e69c99287f22472da44b" Jan 22 07:36:31 crc kubenswrapper[4933]: I0122 07:36:31.087997 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5c76025167706f815770296ecabc27ca0da568f0b82e69c99287f22472da44b"} err="failed to get container status \"e5c76025167706f815770296ecabc27ca0da568f0b82e69c99287f22472da44b\": rpc error: code = NotFound desc = could not find container \"e5c76025167706f815770296ecabc27ca0da568f0b82e69c99287f22472da44b\": container with ID starting with e5c76025167706f815770296ecabc27ca0da568f0b82e69c99287f22472da44b not found: ID does not exist" Jan 22 07:36:31 crc kubenswrapper[4933]: I0122 07:36:31.088028 4933 scope.go:117] "RemoveContainer" containerID="d5ae926fdc6e7c481e6fcfd9c35105d89144d5f47f4362eb800aba90ff258b49" Jan 22 07:36:31 crc kubenswrapper[4933]: E0122 07:36:31.088450 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5ae926fdc6e7c481e6fcfd9c35105d89144d5f47f4362eb800aba90ff258b49\": container with ID starting with d5ae926fdc6e7c481e6fcfd9c35105d89144d5f47f4362eb800aba90ff258b49 not found: ID does not exist" containerID="d5ae926fdc6e7c481e6fcfd9c35105d89144d5f47f4362eb800aba90ff258b49" Jan 22 07:36:31 crc kubenswrapper[4933]: I0122 07:36:31.088478 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5ae926fdc6e7c481e6fcfd9c35105d89144d5f47f4362eb800aba90ff258b49"} err="failed to get container status \"d5ae926fdc6e7c481e6fcfd9c35105d89144d5f47f4362eb800aba90ff258b49\": rpc error: code = NotFound desc = could not find container \"d5ae926fdc6e7c481e6fcfd9c35105d89144d5f47f4362eb800aba90ff258b49\": container with ID starting with d5ae926fdc6e7c481e6fcfd9c35105d89144d5f47f4362eb800aba90ff258b49 not found: ID does not exist" Jan 22 07:36:32 crc kubenswrapper[4933]: I0122 07:36:32.503239 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6" path="/var/lib/kubelet/pods/c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6/volumes" Jan 22 07:36:33 crc kubenswrapper[4933]: I0122 07:36:33.461566 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2j4f"] Jan 22 07:36:33 crc kubenswrapper[4933]: I0122 07:36:33.462477 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-d2j4f" podUID="ad33d192-6e0f-4ff4-8799-8ac191364ae5" containerName="registry-server" containerID="cri-o://723a6f6e3a03ee2bf00b11fbc254623f83a79c8875b60cf5d464d3eb70a93f20" gracePeriod=2 Jan 22 07:36:33 crc kubenswrapper[4933]: I0122 07:36:33.966979 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d2j4f" Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.011387 4933 generic.go:334] "Generic (PLEG): container finished" podID="ad33d192-6e0f-4ff4-8799-8ac191364ae5" containerID="723a6f6e3a03ee2bf00b11fbc254623f83a79c8875b60cf5d464d3eb70a93f20" exitCode=0 Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.011455 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2j4f" event={"ID":"ad33d192-6e0f-4ff4-8799-8ac191364ae5","Type":"ContainerDied","Data":"723a6f6e3a03ee2bf00b11fbc254623f83a79c8875b60cf5d464d3eb70a93f20"} Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.011504 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d2j4f" event={"ID":"ad33d192-6e0f-4ff4-8799-8ac191364ae5","Type":"ContainerDied","Data":"f0979787775fe970c739439c12564ef77e343347cda35bd56776541cc6afd497"} Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.011528 4933 scope.go:117] "RemoveContainer" containerID="723a6f6e3a03ee2bf00b11fbc254623f83a79c8875b60cf5d464d3eb70a93f20" Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.011753 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d2j4f" Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.034659 4933 scope.go:117] "RemoveContainer" containerID="8d523b284a0c44592502275ccee58685e8cb94b1fcf447d82a22879e2bd72012" Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.053505 4933 scope.go:117] "RemoveContainer" containerID="e214b78d37201a8c258d43446803733ba19d6eddc98b88ea29dbff321693cb5e" Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.104815 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad33d192-6e0f-4ff4-8799-8ac191364ae5-utilities\") pod \"ad33d192-6e0f-4ff4-8799-8ac191364ae5\" (UID: \"ad33d192-6e0f-4ff4-8799-8ac191364ae5\") " Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.105323 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fvjkw\" (UniqueName: \"kubernetes.io/projected/ad33d192-6e0f-4ff4-8799-8ac191364ae5-kube-api-access-fvjkw\") pod \"ad33d192-6e0f-4ff4-8799-8ac191364ae5\" (UID: \"ad33d192-6e0f-4ff4-8799-8ac191364ae5\") " Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.105449 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad33d192-6e0f-4ff4-8799-8ac191364ae5-catalog-content\") pod \"ad33d192-6e0f-4ff4-8799-8ac191364ae5\" (UID: \"ad33d192-6e0f-4ff4-8799-8ac191364ae5\") " Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.105833 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad33d192-6e0f-4ff4-8799-8ac191364ae5-utilities" (OuterVolumeSpecName: "utilities") pod "ad33d192-6e0f-4ff4-8799-8ac191364ae5" (UID: "ad33d192-6e0f-4ff4-8799-8ac191364ae5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.106202 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad33d192-6e0f-4ff4-8799-8ac191364ae5-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.111042 4933 scope.go:117] "RemoveContainer" containerID="723a6f6e3a03ee2bf00b11fbc254623f83a79c8875b60cf5d464d3eb70a93f20" Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.111768 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad33d192-6e0f-4ff4-8799-8ac191364ae5-kube-api-access-fvjkw" (OuterVolumeSpecName: "kube-api-access-fvjkw") pod "ad33d192-6e0f-4ff4-8799-8ac191364ae5" (UID: "ad33d192-6e0f-4ff4-8799-8ac191364ae5"). InnerVolumeSpecName "kube-api-access-fvjkw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:36:34 crc kubenswrapper[4933]: E0122 07:36:34.111938 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"723a6f6e3a03ee2bf00b11fbc254623f83a79c8875b60cf5d464d3eb70a93f20\": container with ID starting with 723a6f6e3a03ee2bf00b11fbc254623f83a79c8875b60cf5d464d3eb70a93f20 not found: ID does not exist" containerID="723a6f6e3a03ee2bf00b11fbc254623f83a79c8875b60cf5d464d3eb70a93f20" Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.111988 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"723a6f6e3a03ee2bf00b11fbc254623f83a79c8875b60cf5d464d3eb70a93f20"} err="failed to get container status \"723a6f6e3a03ee2bf00b11fbc254623f83a79c8875b60cf5d464d3eb70a93f20\": rpc error: code = NotFound desc = could not find container \"723a6f6e3a03ee2bf00b11fbc254623f83a79c8875b60cf5d464d3eb70a93f20\": container with ID starting with 723a6f6e3a03ee2bf00b11fbc254623f83a79c8875b60cf5d464d3eb70a93f20 not found: ID does not exist" Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.112019 4933 scope.go:117] "RemoveContainer" containerID="8d523b284a0c44592502275ccee58685e8cb94b1fcf447d82a22879e2bd72012" Jan 22 07:36:34 crc kubenswrapper[4933]: E0122 07:36:34.112374 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d523b284a0c44592502275ccee58685e8cb94b1fcf447d82a22879e2bd72012\": container with ID starting with 8d523b284a0c44592502275ccee58685e8cb94b1fcf447d82a22879e2bd72012 not found: ID does not exist" containerID="8d523b284a0c44592502275ccee58685e8cb94b1fcf447d82a22879e2bd72012" Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.112421 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d523b284a0c44592502275ccee58685e8cb94b1fcf447d82a22879e2bd72012"} err="failed to get container status \"8d523b284a0c44592502275ccee58685e8cb94b1fcf447d82a22879e2bd72012\": rpc error: code = NotFound desc = could not find container \"8d523b284a0c44592502275ccee58685e8cb94b1fcf447d82a22879e2bd72012\": container with ID starting with 8d523b284a0c44592502275ccee58685e8cb94b1fcf447d82a22879e2bd72012 not found: ID does not exist" Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.112457 4933 scope.go:117] "RemoveContainer" containerID="e214b78d37201a8c258d43446803733ba19d6eddc98b88ea29dbff321693cb5e" Jan 22 07:36:34 crc kubenswrapper[4933]: E0122 07:36:34.112835 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e214b78d37201a8c258d43446803733ba19d6eddc98b88ea29dbff321693cb5e\": container with ID starting with e214b78d37201a8c258d43446803733ba19d6eddc98b88ea29dbff321693cb5e not found: ID does not exist" containerID="e214b78d37201a8c258d43446803733ba19d6eddc98b88ea29dbff321693cb5e" Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.112883 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e214b78d37201a8c258d43446803733ba19d6eddc98b88ea29dbff321693cb5e"} err="failed to get container status \"e214b78d37201a8c258d43446803733ba19d6eddc98b88ea29dbff321693cb5e\": rpc error: code = NotFound desc = could not find container \"e214b78d37201a8c258d43446803733ba19d6eddc98b88ea29dbff321693cb5e\": container with ID starting with e214b78d37201a8c258d43446803733ba19d6eddc98b88ea29dbff321693cb5e not found: ID does not exist" Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.128733 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad33d192-6e0f-4ff4-8799-8ac191364ae5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ad33d192-6e0f-4ff4-8799-8ac191364ae5" (UID: "ad33d192-6e0f-4ff4-8799-8ac191364ae5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.209203 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad33d192-6e0f-4ff4-8799-8ac191364ae5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.209255 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fvjkw\" (UniqueName: \"kubernetes.io/projected/ad33d192-6e0f-4ff4-8799-8ac191364ae5-kube-api-access-fvjkw\") on node \"crc\" DevicePath \"\"" Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.351145 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2j4f"] Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.360265 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-d2j4f"] Jan 22 07:36:34 crc kubenswrapper[4933]: I0122 07:36:34.505394 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad33d192-6e0f-4ff4-8799-8ac191364ae5" path="/var/lib/kubelet/pods/ad33d192-6e0f-4ff4-8799-8ac191364ae5/volumes" Jan 22 07:37:08 crc kubenswrapper[4933]: I0122 07:37:08.564376 4933 scope.go:117] "RemoveContainer" containerID="ec4363925d8bf174e46cffd00cb8c7eec6934d637ce74c1bb8c9bbd7da023f30" Jan 22 07:37:08 crc kubenswrapper[4933]: I0122 07:37:08.601732 4933 scope.go:117] "RemoveContainer" containerID="65ae41d1f038e952a5889a5077ee28582fa20712ea60f085952cdfebc0d4ae9d" Jan 22 07:37:10 crc kubenswrapper[4933]: I0122 07:37:10.943633 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:37:10 crc kubenswrapper[4933]: I0122 07:37:10.944884 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:37:40 crc kubenswrapper[4933]: I0122 07:37:40.942812 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:37:40 crc kubenswrapper[4933]: I0122 07:37:40.943425 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.306587 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fgwtn"] Jan 22 07:37:55 crc kubenswrapper[4933]: E0122 07:37:55.307747 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6" containerName="extract-utilities" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.307766 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6" containerName="extract-utilities" Jan 22 07:37:55 crc kubenswrapper[4933]: E0122 07:37:55.307776 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad33d192-6e0f-4ff4-8799-8ac191364ae5" containerName="extract-utilities" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.307784 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad33d192-6e0f-4ff4-8799-8ac191364ae5" containerName="extract-utilities" Jan 22 07:37:55 crc kubenswrapper[4933]: E0122 07:37:55.307799 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6" containerName="registry-server" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.307809 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6" containerName="registry-server" Jan 22 07:37:55 crc kubenswrapper[4933]: E0122 07:37:55.307823 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6" containerName="extract-content" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.307831 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6" containerName="extract-content" Jan 22 07:37:55 crc kubenswrapper[4933]: E0122 07:37:55.307862 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad33d192-6e0f-4ff4-8799-8ac191364ae5" containerName="extract-content" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.307869 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad33d192-6e0f-4ff4-8799-8ac191364ae5" containerName="extract-content" Jan 22 07:37:55 crc kubenswrapper[4933]: E0122 07:37:55.307883 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad33d192-6e0f-4ff4-8799-8ac191364ae5" containerName="registry-server" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.307890 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad33d192-6e0f-4ff4-8799-8ac191364ae5" containerName="registry-server" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.308131 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad33d192-6e0f-4ff4-8799-8ac191364ae5" containerName="registry-server" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.308155 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6a0aa58-ab8f-43ef-95f7-9f3e97e0ccb6" containerName="registry-server" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.309903 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fgwtn" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.320465 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fgwtn"] Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.332193 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7896986b-1462-4b45-a6ad-3a2636e6c76c-utilities\") pod \"certified-operators-fgwtn\" (UID: \"7896986b-1462-4b45-a6ad-3a2636e6c76c\") " pod="openshift-marketplace/certified-operators-fgwtn" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.332586 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msk8w\" (UniqueName: \"kubernetes.io/projected/7896986b-1462-4b45-a6ad-3a2636e6c76c-kube-api-access-msk8w\") pod \"certified-operators-fgwtn\" (UID: \"7896986b-1462-4b45-a6ad-3a2636e6c76c\") " pod="openshift-marketplace/certified-operators-fgwtn" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.332620 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7896986b-1462-4b45-a6ad-3a2636e6c76c-catalog-content\") pod \"certified-operators-fgwtn\" (UID: \"7896986b-1462-4b45-a6ad-3a2636e6c76c\") " pod="openshift-marketplace/certified-operators-fgwtn" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.434315 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msk8w\" (UniqueName: \"kubernetes.io/projected/7896986b-1462-4b45-a6ad-3a2636e6c76c-kube-api-access-msk8w\") pod \"certified-operators-fgwtn\" (UID: \"7896986b-1462-4b45-a6ad-3a2636e6c76c\") " pod="openshift-marketplace/certified-operators-fgwtn" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.434370 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7896986b-1462-4b45-a6ad-3a2636e6c76c-catalog-content\") pod \"certified-operators-fgwtn\" (UID: \"7896986b-1462-4b45-a6ad-3a2636e6c76c\") " pod="openshift-marketplace/certified-operators-fgwtn" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.434531 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7896986b-1462-4b45-a6ad-3a2636e6c76c-utilities\") pod \"certified-operators-fgwtn\" (UID: \"7896986b-1462-4b45-a6ad-3a2636e6c76c\") " pod="openshift-marketplace/certified-operators-fgwtn" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.435003 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7896986b-1462-4b45-a6ad-3a2636e6c76c-catalog-content\") pod \"certified-operators-fgwtn\" (UID: \"7896986b-1462-4b45-a6ad-3a2636e6c76c\") " pod="openshift-marketplace/certified-operators-fgwtn" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.435107 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7896986b-1462-4b45-a6ad-3a2636e6c76c-utilities\") pod \"certified-operators-fgwtn\" (UID: \"7896986b-1462-4b45-a6ad-3a2636e6c76c\") " pod="openshift-marketplace/certified-operators-fgwtn" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.453845 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msk8w\" (UniqueName: \"kubernetes.io/projected/7896986b-1462-4b45-a6ad-3a2636e6c76c-kube-api-access-msk8w\") pod \"certified-operators-fgwtn\" (UID: \"7896986b-1462-4b45-a6ad-3a2636e6c76c\") " pod="openshift-marketplace/certified-operators-fgwtn" Jan 22 07:37:55 crc kubenswrapper[4933]: I0122 07:37:55.634705 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fgwtn" Jan 22 07:37:56 crc kubenswrapper[4933]: I0122 07:37:56.022776 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fgwtn"] Jan 22 07:37:56 crc kubenswrapper[4933]: I0122 07:37:56.791888 4933 generic.go:334] "Generic (PLEG): container finished" podID="7896986b-1462-4b45-a6ad-3a2636e6c76c" containerID="095c9c9f13cb47b5348bd434bfc017a3c04ec465220feebe52d7e781a4f86d64" exitCode=0 Jan 22 07:37:56 crc kubenswrapper[4933]: I0122 07:37:56.792044 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fgwtn" event={"ID":"7896986b-1462-4b45-a6ad-3a2636e6c76c","Type":"ContainerDied","Data":"095c9c9f13cb47b5348bd434bfc017a3c04ec465220feebe52d7e781a4f86d64"} Jan 22 07:37:56 crc kubenswrapper[4933]: I0122 07:37:56.792345 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fgwtn" event={"ID":"7896986b-1462-4b45-a6ad-3a2636e6c76c","Type":"ContainerStarted","Data":"ac780fb66b255f4a2a6c0b91110c8838fa5154ea776a4a90660087bf85ec725c"} Jan 22 07:37:58 crc kubenswrapper[4933]: I0122 07:37:58.817705 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fgwtn" event={"ID":"7896986b-1462-4b45-a6ad-3a2636e6c76c","Type":"ContainerStarted","Data":"3ac60a2264f5153716c4d3ceba720420e5afe4130553cc218d5c56666b0b30ee"} Jan 22 07:38:00 crc kubenswrapper[4933]: I0122 07:38:00.838220 4933 generic.go:334] "Generic (PLEG): container finished" podID="7896986b-1462-4b45-a6ad-3a2636e6c76c" containerID="3ac60a2264f5153716c4d3ceba720420e5afe4130553cc218d5c56666b0b30ee" exitCode=0 Jan 22 07:38:00 crc kubenswrapper[4933]: I0122 07:38:00.838293 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fgwtn" event={"ID":"7896986b-1462-4b45-a6ad-3a2636e6c76c","Type":"ContainerDied","Data":"3ac60a2264f5153716c4d3ceba720420e5afe4130553cc218d5c56666b0b30ee"} Jan 22 07:38:01 crc kubenswrapper[4933]: I0122 07:38:01.849493 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fgwtn" event={"ID":"7896986b-1462-4b45-a6ad-3a2636e6c76c","Type":"ContainerStarted","Data":"8749b7376658880a4ff27f7c06d83b2123d396d816ae1d0889e92e89044ab241"} Jan 22 07:38:01 crc kubenswrapper[4933]: I0122 07:38:01.874285 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-fgwtn" podStartSLOduration=2.3739442410000002 podStartE2EDuration="6.874265957s" podCreationTimestamp="2026-01-22 07:37:55 +0000 UTC" firstStartedPulling="2026-01-22 07:37:56.794487921 +0000 UTC m=+6724.631613274" lastFinishedPulling="2026-01-22 07:38:01.294809647 +0000 UTC m=+6729.131934990" observedRunningTime="2026-01-22 07:38:01.870243649 +0000 UTC m=+6729.707369002" watchObservedRunningTime="2026-01-22 07:38:01.874265957 +0000 UTC m=+6729.711391310" Jan 22 07:38:05 crc kubenswrapper[4933]: I0122 07:38:05.635292 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-fgwtn" Jan 22 07:38:05 crc kubenswrapper[4933]: I0122 07:38:05.635860 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-fgwtn" Jan 22 07:38:05 crc kubenswrapper[4933]: I0122 07:38:05.693485 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-fgwtn" Jan 22 07:38:10 crc kubenswrapper[4933]: I0122 07:38:10.943591 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:38:10 crc kubenswrapper[4933]: I0122 07:38:10.945476 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:38:10 crc kubenswrapper[4933]: I0122 07:38:10.945616 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 07:38:10 crc kubenswrapper[4933]: I0122 07:38:10.947060 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 07:38:10 crc kubenswrapper[4933]: I0122 07:38:10.947226 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" gracePeriod=600 Jan 22 07:38:11 crc kubenswrapper[4933]: E0122 07:38:11.084955 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:38:11 crc kubenswrapper[4933]: I0122 07:38:11.943062 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" exitCode=0 Jan 22 07:38:11 crc kubenswrapper[4933]: I0122 07:38:11.943155 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804"} Jan 22 07:38:11 crc kubenswrapper[4933]: I0122 07:38:11.943637 4933 scope.go:117] "RemoveContainer" containerID="1b32e660a15a3b4635250c60c37311d6f65b1e17832e99f37c6d9d25bd8e5087" Jan 22 07:38:11 crc kubenswrapper[4933]: I0122 07:38:11.944524 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:38:11 crc kubenswrapper[4933]: E0122 07:38:11.944954 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:38:15 crc kubenswrapper[4933]: I0122 07:38:15.689741 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-fgwtn" Jan 22 07:38:15 crc kubenswrapper[4933]: I0122 07:38:15.746707 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fgwtn"] Jan 22 07:38:15 crc kubenswrapper[4933]: I0122 07:38:15.988843 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-fgwtn" podUID="7896986b-1462-4b45-a6ad-3a2636e6c76c" containerName="registry-server" containerID="cri-o://8749b7376658880a4ff27f7c06d83b2123d396d816ae1d0889e92e89044ab241" gracePeriod=2 Jan 22 07:38:16 crc kubenswrapper[4933]: I0122 07:38:16.597204 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fgwtn" Jan 22 07:38:16 crc kubenswrapper[4933]: I0122 07:38:16.698472 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-msk8w\" (UniqueName: \"kubernetes.io/projected/7896986b-1462-4b45-a6ad-3a2636e6c76c-kube-api-access-msk8w\") pod \"7896986b-1462-4b45-a6ad-3a2636e6c76c\" (UID: \"7896986b-1462-4b45-a6ad-3a2636e6c76c\") " Jan 22 07:38:16 crc kubenswrapper[4933]: I0122 07:38:16.699881 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7896986b-1462-4b45-a6ad-3a2636e6c76c-catalog-content\") pod \"7896986b-1462-4b45-a6ad-3a2636e6c76c\" (UID: \"7896986b-1462-4b45-a6ad-3a2636e6c76c\") " Jan 22 07:38:16 crc kubenswrapper[4933]: I0122 07:38:16.700071 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7896986b-1462-4b45-a6ad-3a2636e6c76c-utilities\") pod \"7896986b-1462-4b45-a6ad-3a2636e6c76c\" (UID: \"7896986b-1462-4b45-a6ad-3a2636e6c76c\") " Jan 22 07:38:16 crc kubenswrapper[4933]: I0122 07:38:16.701257 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7896986b-1462-4b45-a6ad-3a2636e6c76c-utilities" (OuterVolumeSpecName: "utilities") pod "7896986b-1462-4b45-a6ad-3a2636e6c76c" (UID: "7896986b-1462-4b45-a6ad-3a2636e6c76c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:38:16 crc kubenswrapper[4933]: I0122 07:38:16.706247 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7896986b-1462-4b45-a6ad-3a2636e6c76c-kube-api-access-msk8w" (OuterVolumeSpecName: "kube-api-access-msk8w") pod "7896986b-1462-4b45-a6ad-3a2636e6c76c" (UID: "7896986b-1462-4b45-a6ad-3a2636e6c76c"). InnerVolumeSpecName "kube-api-access-msk8w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:38:16 crc kubenswrapper[4933]: I0122 07:38:16.757582 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7896986b-1462-4b45-a6ad-3a2636e6c76c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7896986b-1462-4b45-a6ad-3a2636e6c76c" (UID: "7896986b-1462-4b45-a6ad-3a2636e6c76c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:38:16 crc kubenswrapper[4933]: I0122 07:38:16.802655 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7896986b-1462-4b45-a6ad-3a2636e6c76c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:38:16 crc kubenswrapper[4933]: I0122 07:38:16.802697 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7896986b-1462-4b45-a6ad-3a2636e6c76c-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:38:16 crc kubenswrapper[4933]: I0122 07:38:16.802709 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-msk8w\" (UniqueName: \"kubernetes.io/projected/7896986b-1462-4b45-a6ad-3a2636e6c76c-kube-api-access-msk8w\") on node \"crc\" DevicePath \"\"" Jan 22 07:38:17 crc kubenswrapper[4933]: I0122 07:38:17.000240 4933 generic.go:334] "Generic (PLEG): container finished" podID="7896986b-1462-4b45-a6ad-3a2636e6c76c" containerID="8749b7376658880a4ff27f7c06d83b2123d396d816ae1d0889e92e89044ab241" exitCode=0 Jan 22 07:38:17 crc kubenswrapper[4933]: I0122 07:38:17.000307 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fgwtn" event={"ID":"7896986b-1462-4b45-a6ad-3a2636e6c76c","Type":"ContainerDied","Data":"8749b7376658880a4ff27f7c06d83b2123d396d816ae1d0889e92e89044ab241"} Jan 22 07:38:17 crc kubenswrapper[4933]: I0122 07:38:17.000353 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fgwtn" Jan 22 07:38:17 crc kubenswrapper[4933]: I0122 07:38:17.000386 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fgwtn" event={"ID":"7896986b-1462-4b45-a6ad-3a2636e6c76c","Type":"ContainerDied","Data":"ac780fb66b255f4a2a6c0b91110c8838fa5154ea776a4a90660087bf85ec725c"} Jan 22 07:38:17 crc kubenswrapper[4933]: I0122 07:38:17.000414 4933 scope.go:117] "RemoveContainer" containerID="8749b7376658880a4ff27f7c06d83b2123d396d816ae1d0889e92e89044ab241" Jan 22 07:38:17 crc kubenswrapper[4933]: I0122 07:38:17.022937 4933 scope.go:117] "RemoveContainer" containerID="3ac60a2264f5153716c4d3ceba720420e5afe4130553cc218d5c56666b0b30ee" Jan 22 07:38:17 crc kubenswrapper[4933]: I0122 07:38:17.044717 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fgwtn"] Jan 22 07:38:17 crc kubenswrapper[4933]: I0122 07:38:17.060197 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-fgwtn"] Jan 22 07:38:17 crc kubenswrapper[4933]: I0122 07:38:17.066981 4933 scope.go:117] "RemoveContainer" containerID="095c9c9f13cb47b5348bd434bfc017a3c04ec465220feebe52d7e781a4f86d64" Jan 22 07:38:17 crc kubenswrapper[4933]: I0122 07:38:17.101042 4933 scope.go:117] "RemoveContainer" containerID="8749b7376658880a4ff27f7c06d83b2123d396d816ae1d0889e92e89044ab241" Jan 22 07:38:17 crc kubenswrapper[4933]: E0122 07:38:17.103209 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8749b7376658880a4ff27f7c06d83b2123d396d816ae1d0889e92e89044ab241\": container with ID starting with 8749b7376658880a4ff27f7c06d83b2123d396d816ae1d0889e92e89044ab241 not found: ID does not exist" containerID="8749b7376658880a4ff27f7c06d83b2123d396d816ae1d0889e92e89044ab241" Jan 22 07:38:17 crc kubenswrapper[4933]: I0122 07:38:17.103245 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8749b7376658880a4ff27f7c06d83b2123d396d816ae1d0889e92e89044ab241"} err="failed to get container status \"8749b7376658880a4ff27f7c06d83b2123d396d816ae1d0889e92e89044ab241\": rpc error: code = NotFound desc = could not find container \"8749b7376658880a4ff27f7c06d83b2123d396d816ae1d0889e92e89044ab241\": container with ID starting with 8749b7376658880a4ff27f7c06d83b2123d396d816ae1d0889e92e89044ab241 not found: ID does not exist" Jan 22 07:38:17 crc kubenswrapper[4933]: I0122 07:38:17.103275 4933 scope.go:117] "RemoveContainer" containerID="3ac60a2264f5153716c4d3ceba720420e5afe4130553cc218d5c56666b0b30ee" Jan 22 07:38:17 crc kubenswrapper[4933]: E0122 07:38:17.103544 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ac60a2264f5153716c4d3ceba720420e5afe4130553cc218d5c56666b0b30ee\": container with ID starting with 3ac60a2264f5153716c4d3ceba720420e5afe4130553cc218d5c56666b0b30ee not found: ID does not exist" containerID="3ac60a2264f5153716c4d3ceba720420e5afe4130553cc218d5c56666b0b30ee" Jan 22 07:38:17 crc kubenswrapper[4933]: I0122 07:38:17.103594 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ac60a2264f5153716c4d3ceba720420e5afe4130553cc218d5c56666b0b30ee"} err="failed to get container status \"3ac60a2264f5153716c4d3ceba720420e5afe4130553cc218d5c56666b0b30ee\": rpc error: code = NotFound desc = could not find container \"3ac60a2264f5153716c4d3ceba720420e5afe4130553cc218d5c56666b0b30ee\": container with ID starting with 3ac60a2264f5153716c4d3ceba720420e5afe4130553cc218d5c56666b0b30ee not found: ID does not exist" Jan 22 07:38:17 crc kubenswrapper[4933]: I0122 07:38:17.103626 4933 scope.go:117] "RemoveContainer" containerID="095c9c9f13cb47b5348bd434bfc017a3c04ec465220feebe52d7e781a4f86d64" Jan 22 07:38:17 crc kubenswrapper[4933]: E0122 07:38:17.103989 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"095c9c9f13cb47b5348bd434bfc017a3c04ec465220feebe52d7e781a4f86d64\": container with ID starting with 095c9c9f13cb47b5348bd434bfc017a3c04ec465220feebe52d7e781a4f86d64 not found: ID does not exist" containerID="095c9c9f13cb47b5348bd434bfc017a3c04ec465220feebe52d7e781a4f86d64" Jan 22 07:38:17 crc kubenswrapper[4933]: I0122 07:38:17.104030 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"095c9c9f13cb47b5348bd434bfc017a3c04ec465220feebe52d7e781a4f86d64"} err="failed to get container status \"095c9c9f13cb47b5348bd434bfc017a3c04ec465220feebe52d7e781a4f86d64\": rpc error: code = NotFound desc = could not find container \"095c9c9f13cb47b5348bd434bfc017a3c04ec465220feebe52d7e781a4f86d64\": container with ID starting with 095c9c9f13cb47b5348bd434bfc017a3c04ec465220feebe52d7e781a4f86d64 not found: ID does not exist" Jan 22 07:38:18 crc kubenswrapper[4933]: I0122 07:38:18.502914 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7896986b-1462-4b45-a6ad-3a2636e6c76c" path="/var/lib/kubelet/pods/7896986b-1462-4b45-a6ad-3a2636e6c76c/volumes" Jan 22 07:38:23 crc kubenswrapper[4933]: I0122 07:38:23.490788 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:38:23 crc kubenswrapper[4933]: E0122 07:38:23.491638 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:38:34 crc kubenswrapper[4933]: I0122 07:38:34.491875 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:38:34 crc kubenswrapper[4933]: E0122 07:38:34.493021 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:38:47 crc kubenswrapper[4933]: I0122 07:38:47.490661 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:38:47 crc kubenswrapper[4933]: E0122 07:38:47.491446 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:39:00 crc kubenswrapper[4933]: I0122 07:39:00.491897 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:39:00 crc kubenswrapper[4933]: E0122 07:39:00.492809 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:39:12 crc kubenswrapper[4933]: I0122 07:39:12.504283 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:39:12 crc kubenswrapper[4933]: E0122 07:39:12.505673 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:39:26 crc kubenswrapper[4933]: I0122 07:39:26.492250 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:39:26 crc kubenswrapper[4933]: E0122 07:39:26.492876 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:39:40 crc kubenswrapper[4933]: I0122 07:39:40.491532 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:39:40 crc kubenswrapper[4933]: E0122 07:39:40.492469 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:39:51 crc kubenswrapper[4933]: I0122 07:39:51.037492 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-4b4lx"] Jan 22 07:39:51 crc kubenswrapper[4933]: I0122 07:39:51.048404 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-5e82-account-create-update-vs4tx"] Jan 22 07:39:51 crc kubenswrapper[4933]: I0122 07:39:51.056841 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-4b4lx"] Jan 22 07:39:51 crc kubenswrapper[4933]: I0122 07:39:51.064549 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-5e82-account-create-update-vs4tx"] Jan 22 07:39:51 crc kubenswrapper[4933]: I0122 07:39:51.491320 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:39:51 crc kubenswrapper[4933]: E0122 07:39:51.491702 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:39:52 crc kubenswrapper[4933]: I0122 07:39:52.525183 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9cde1ab7-883c-49c7-9d99-970ac204daf1" path="/var/lib/kubelet/pods/9cde1ab7-883c-49c7-9d99-970ac204daf1/volumes" Jan 22 07:39:52 crc kubenswrapper[4933]: I0122 07:39:52.526680 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4" path="/var/lib/kubelet/pods/cbf7bf50-e1b7-4fb5-af73-b949ae25b1e4/volumes" Jan 22 07:40:06 crc kubenswrapper[4933]: I0122 07:40:06.055990 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-2vflz"] Jan 22 07:40:06 crc kubenswrapper[4933]: I0122 07:40:06.065657 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-2vflz"] Jan 22 07:40:06 crc kubenswrapper[4933]: I0122 07:40:06.491063 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:40:06 crc kubenswrapper[4933]: E0122 07:40:06.491619 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:40:06 crc kubenswrapper[4933]: I0122 07:40:06.503846 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9a2014a-9cf1-447f-ba46-0bd389003bf7" path="/var/lib/kubelet/pods/a9a2014a-9cf1-447f-ba46-0bd389003bf7/volumes" Jan 22 07:40:08 crc kubenswrapper[4933]: I0122 07:40:08.794810 4933 scope.go:117] "RemoveContainer" containerID="7c1b23d40a9cef7ff4ed6eb31a8a418946befcadbb1b22d9482b4740e952c236" Jan 22 07:40:08 crc kubenswrapper[4933]: I0122 07:40:08.823639 4933 scope.go:117] "RemoveContainer" containerID="955005e0b0c176b5dbbb30932c6460dd305769d2af7b76a6a4d4b5e16d39e333" Jan 22 07:40:08 crc kubenswrapper[4933]: I0122 07:40:08.846221 4933 scope.go:117] "RemoveContainer" containerID="7ef209d0698bfe81ba7c846ab6c3525756cd905f0c2ad77c2a1e66f05cd87dfb" Jan 22 07:40:08 crc kubenswrapper[4933]: I0122 07:40:08.864829 4933 scope.go:117] "RemoveContainer" containerID="32c90cdd25f7dc620f09bbf3b3d9705d342899648d3883a4a4d1e70ea468b9f8" Jan 22 07:40:08 crc kubenswrapper[4933]: I0122 07:40:08.937058 4933 scope.go:117] "RemoveContainer" containerID="f34b904e1b86a2167f4c6c0644d56c72e7f6b7cdc82d89991ec84ae1f04aa5c6" Jan 22 07:40:08 crc kubenswrapper[4933]: I0122 07:40:08.958486 4933 scope.go:117] "RemoveContainer" containerID="a07ca5909ec1cef50965ab17eeb78ddcb34df7fea965528dbe7874b10123d663" Jan 22 07:40:09 crc kubenswrapper[4933]: I0122 07:40:09.003787 4933 scope.go:117] "RemoveContainer" containerID="8e098a8fde559b2fb9303f4def2d27c23fe5ab783412916e35ede0c49b0152cc" Jan 22 07:40:09 crc kubenswrapper[4933]: I0122 07:40:09.067328 4933 scope.go:117] "RemoveContainer" containerID="fd46f3a17172030688b626c0cfc56cd405832a105f033af5f810834172c662e0" Jan 22 07:40:09 crc kubenswrapper[4933]: I0122 07:40:09.086170 4933 scope.go:117] "RemoveContainer" containerID="5b51e355749b30de8a876e0235d73537675adcb118b560e67045b16896c710e5" Jan 22 07:40:17 crc kubenswrapper[4933]: I0122 07:40:17.491736 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:40:17 crc kubenswrapper[4933]: E0122 07:40:17.492801 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:40:28 crc kubenswrapper[4933]: I0122 07:40:28.490692 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:40:28 crc kubenswrapper[4933]: E0122 07:40:28.491595 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:40:43 crc kubenswrapper[4933]: I0122 07:40:43.490722 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:40:43 crc kubenswrapper[4933]: E0122 07:40:43.491447 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:40:57 crc kubenswrapper[4933]: I0122 07:40:57.490699 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:40:57 crc kubenswrapper[4933]: E0122 07:40:57.491618 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:41:11 crc kubenswrapper[4933]: I0122 07:41:11.491456 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:41:11 crc kubenswrapper[4933]: E0122 07:41:11.492286 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:41:24 crc kubenswrapper[4933]: I0122 07:41:24.491659 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:41:24 crc kubenswrapper[4933]: E0122 07:41:24.493467 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:41:39 crc kubenswrapper[4933]: I0122 07:41:39.490898 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:41:39 crc kubenswrapper[4933]: E0122 07:41:39.491834 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:41:53 crc kubenswrapper[4933]: I0122 07:41:53.491436 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:41:53 crc kubenswrapper[4933]: E0122 07:41:53.492207 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:42:08 crc kubenswrapper[4933]: I0122 07:42:08.491322 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:42:08 crc kubenswrapper[4933]: E0122 07:42:08.492089 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:42:21 crc kubenswrapper[4933]: I0122 07:42:21.490793 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:42:21 crc kubenswrapper[4933]: E0122 07:42:21.491520 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:42:33 crc kubenswrapper[4933]: I0122 07:42:33.490991 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:42:33 crc kubenswrapper[4933]: E0122 07:42:33.492128 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:42:48 crc kubenswrapper[4933]: I0122 07:42:48.490890 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:42:48 crc kubenswrapper[4933]: E0122 07:42:48.491703 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:42:53 crc kubenswrapper[4933]: I0122 07:42:53.038925 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-kz4qv"] Jan 22 07:42:53 crc kubenswrapper[4933]: I0122 07:42:53.048427 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-kz4qv"] Jan 22 07:42:54 crc kubenswrapper[4933]: I0122 07:42:54.030840 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-7181-account-create-update-st567"] Jan 22 07:42:54 crc kubenswrapper[4933]: I0122 07:42:54.039969 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-7181-account-create-update-st567"] Jan 22 07:42:54 crc kubenswrapper[4933]: I0122 07:42:54.507016 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a4cdab7-6e8c-4777-b870-e1cae08c72de" path="/var/lib/kubelet/pods/8a4cdab7-6e8c-4777-b870-e1cae08c72de/volumes" Jan 22 07:42:54 crc kubenswrapper[4933]: I0122 07:42:54.507985 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae2234cf-74af-4918-b1c3-8f646fdcc109" path="/var/lib/kubelet/pods/ae2234cf-74af-4918-b1c3-8f646fdcc109/volumes" Jan 22 07:43:02 crc kubenswrapper[4933]: I0122 07:43:02.500212 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:43:02 crc kubenswrapper[4933]: E0122 07:43:02.501137 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:43:04 crc kubenswrapper[4933]: I0122 07:43:04.043375 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-wv6b2"] Jan 22 07:43:04 crc kubenswrapper[4933]: I0122 07:43:04.052666 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-wv6b2"] Jan 22 07:43:04 crc kubenswrapper[4933]: I0122 07:43:04.503045 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5" path="/var/lib/kubelet/pods/c0c5e8a9-ecca-4ebd-91d9-b547bbf620b5/volumes" Jan 22 07:43:09 crc kubenswrapper[4933]: I0122 07:43:09.221236 4933 scope.go:117] "RemoveContainer" containerID="8384f88093bf041973e2da4e7bbe48d650b4cf84598e6a10423080f995e95909" Jan 22 07:43:09 crc kubenswrapper[4933]: I0122 07:43:09.245444 4933 scope.go:117] "RemoveContainer" containerID="869188a58887e3b5bf7334224bff7628bc1e1935d3e3ee1a0503451bf3f13a96" Jan 22 07:43:09 crc kubenswrapper[4933]: I0122 07:43:09.306506 4933 scope.go:117] "RemoveContainer" containerID="cd6acb4414ddd3c0defddf57390121c969fda38a6aa81a01024cebf1966c416d" Jan 22 07:43:15 crc kubenswrapper[4933]: I0122 07:43:15.491913 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:43:15 crc kubenswrapper[4933]: I0122 07:43:15.900321 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"00cefbf7cdddf57aeeac4ad5923b7d70d1f5988480bcc20b468032624e763855"} Jan 22 07:43:58 crc kubenswrapper[4933]: I0122 07:43:58.166854 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-znpfg"] Jan 22 07:43:58 crc kubenswrapper[4933]: E0122 07:43:58.171111 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7896986b-1462-4b45-a6ad-3a2636e6c76c" containerName="registry-server" Jan 22 07:43:58 crc kubenswrapper[4933]: I0122 07:43:58.171152 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7896986b-1462-4b45-a6ad-3a2636e6c76c" containerName="registry-server" Jan 22 07:43:58 crc kubenswrapper[4933]: E0122 07:43:58.171174 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7896986b-1462-4b45-a6ad-3a2636e6c76c" containerName="extract-utilities" Jan 22 07:43:58 crc kubenswrapper[4933]: I0122 07:43:58.171186 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7896986b-1462-4b45-a6ad-3a2636e6c76c" containerName="extract-utilities" Jan 22 07:43:58 crc kubenswrapper[4933]: E0122 07:43:58.171226 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7896986b-1462-4b45-a6ad-3a2636e6c76c" containerName="extract-content" Jan 22 07:43:58 crc kubenswrapper[4933]: I0122 07:43:58.171242 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7896986b-1462-4b45-a6ad-3a2636e6c76c" containerName="extract-content" Jan 22 07:43:58 crc kubenswrapper[4933]: I0122 07:43:58.171602 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="7896986b-1462-4b45-a6ad-3a2636e6c76c" containerName="registry-server" Jan 22 07:43:58 crc kubenswrapper[4933]: I0122 07:43:58.174008 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-znpfg" Jan 22 07:43:58 crc kubenswrapper[4933]: I0122 07:43:58.205785 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-znpfg"] Jan 22 07:43:58 crc kubenswrapper[4933]: I0122 07:43:58.343809 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26f13a82-5383-4c0f-9bfb-3d8281a8e959-catalog-content\") pod \"redhat-operators-znpfg\" (UID: \"26f13a82-5383-4c0f-9bfb-3d8281a8e959\") " pod="openshift-marketplace/redhat-operators-znpfg" Jan 22 07:43:58 crc kubenswrapper[4933]: I0122 07:43:58.343973 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nmvg\" (UniqueName: \"kubernetes.io/projected/26f13a82-5383-4c0f-9bfb-3d8281a8e959-kube-api-access-2nmvg\") pod \"redhat-operators-znpfg\" (UID: \"26f13a82-5383-4c0f-9bfb-3d8281a8e959\") " pod="openshift-marketplace/redhat-operators-znpfg" Jan 22 07:43:58 crc kubenswrapper[4933]: I0122 07:43:58.344051 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26f13a82-5383-4c0f-9bfb-3d8281a8e959-utilities\") pod \"redhat-operators-znpfg\" (UID: \"26f13a82-5383-4c0f-9bfb-3d8281a8e959\") " pod="openshift-marketplace/redhat-operators-znpfg" Jan 22 07:43:58 crc kubenswrapper[4933]: I0122 07:43:58.445435 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26f13a82-5383-4c0f-9bfb-3d8281a8e959-utilities\") pod \"redhat-operators-znpfg\" (UID: \"26f13a82-5383-4c0f-9bfb-3d8281a8e959\") " pod="openshift-marketplace/redhat-operators-znpfg" Jan 22 07:43:58 crc kubenswrapper[4933]: I0122 07:43:58.445604 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26f13a82-5383-4c0f-9bfb-3d8281a8e959-catalog-content\") pod \"redhat-operators-znpfg\" (UID: \"26f13a82-5383-4c0f-9bfb-3d8281a8e959\") " pod="openshift-marketplace/redhat-operators-znpfg" Jan 22 07:43:58 crc kubenswrapper[4933]: I0122 07:43:58.445713 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nmvg\" (UniqueName: \"kubernetes.io/projected/26f13a82-5383-4c0f-9bfb-3d8281a8e959-kube-api-access-2nmvg\") pod \"redhat-operators-znpfg\" (UID: \"26f13a82-5383-4c0f-9bfb-3d8281a8e959\") " pod="openshift-marketplace/redhat-operators-znpfg" Jan 22 07:43:58 crc kubenswrapper[4933]: I0122 07:43:58.446137 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26f13a82-5383-4c0f-9bfb-3d8281a8e959-utilities\") pod \"redhat-operators-znpfg\" (UID: \"26f13a82-5383-4c0f-9bfb-3d8281a8e959\") " pod="openshift-marketplace/redhat-operators-znpfg" Jan 22 07:43:58 crc kubenswrapper[4933]: I0122 07:43:58.446137 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26f13a82-5383-4c0f-9bfb-3d8281a8e959-catalog-content\") pod \"redhat-operators-znpfg\" (UID: \"26f13a82-5383-4c0f-9bfb-3d8281a8e959\") " pod="openshift-marketplace/redhat-operators-znpfg" Jan 22 07:43:58 crc kubenswrapper[4933]: I0122 07:43:58.465555 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nmvg\" (UniqueName: \"kubernetes.io/projected/26f13a82-5383-4c0f-9bfb-3d8281a8e959-kube-api-access-2nmvg\") pod \"redhat-operators-znpfg\" (UID: \"26f13a82-5383-4c0f-9bfb-3d8281a8e959\") " pod="openshift-marketplace/redhat-operators-znpfg" Jan 22 07:43:58 crc kubenswrapper[4933]: I0122 07:43:58.521674 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-znpfg" Jan 22 07:43:58 crc kubenswrapper[4933]: I0122 07:43:58.971593 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-znpfg"] Jan 22 07:43:58 crc kubenswrapper[4933]: W0122 07:43:58.981191 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod26f13a82_5383_4c0f_9bfb_3d8281a8e959.slice/crio-0b54a1bf5abca9b68350794c46906aaa456af551b786b17d1c3541dcd8ceb08e WatchSource:0}: Error finding container 0b54a1bf5abca9b68350794c46906aaa456af551b786b17d1c3541dcd8ceb08e: Status 404 returned error can't find the container with id 0b54a1bf5abca9b68350794c46906aaa456af551b786b17d1c3541dcd8ceb08e Jan 22 07:43:59 crc kubenswrapper[4933]: I0122 07:43:59.339530 4933 generic.go:334] "Generic (PLEG): container finished" podID="26f13a82-5383-4c0f-9bfb-3d8281a8e959" containerID="8eef2971858302bbad8bfa12d25def9b074f2b1a65a64bb26feca5e5bd9aefe8" exitCode=0 Jan 22 07:43:59 crc kubenswrapper[4933]: I0122 07:43:59.339617 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znpfg" event={"ID":"26f13a82-5383-4c0f-9bfb-3d8281a8e959","Type":"ContainerDied","Data":"8eef2971858302bbad8bfa12d25def9b074f2b1a65a64bb26feca5e5bd9aefe8"} Jan 22 07:43:59 crc kubenswrapper[4933]: I0122 07:43:59.339899 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znpfg" event={"ID":"26f13a82-5383-4c0f-9bfb-3d8281a8e959","Type":"ContainerStarted","Data":"0b54a1bf5abca9b68350794c46906aaa456af551b786b17d1c3541dcd8ceb08e"} Jan 22 07:43:59 crc kubenswrapper[4933]: I0122 07:43:59.342117 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 07:44:00 crc kubenswrapper[4933]: I0122 07:44:00.353947 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znpfg" event={"ID":"26f13a82-5383-4c0f-9bfb-3d8281a8e959","Type":"ContainerStarted","Data":"d3363ce42ccfdf769f19c678b361ce9070f2d8407ba99ea90162640739094a5c"} Jan 22 07:44:04 crc kubenswrapper[4933]: I0122 07:44:04.408091 4933 generic.go:334] "Generic (PLEG): container finished" podID="26f13a82-5383-4c0f-9bfb-3d8281a8e959" containerID="d3363ce42ccfdf769f19c678b361ce9070f2d8407ba99ea90162640739094a5c" exitCode=0 Jan 22 07:44:04 crc kubenswrapper[4933]: I0122 07:44:04.408144 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znpfg" event={"ID":"26f13a82-5383-4c0f-9bfb-3d8281a8e959","Type":"ContainerDied","Data":"d3363ce42ccfdf769f19c678b361ce9070f2d8407ba99ea90162640739094a5c"} Jan 22 07:44:06 crc kubenswrapper[4933]: I0122 07:44:06.433656 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znpfg" event={"ID":"26f13a82-5383-4c0f-9bfb-3d8281a8e959","Type":"ContainerStarted","Data":"e4714d23879ac83b109e45d2f1c36b57a6de9df1e70dd5eb10ef9c4d2dd8276f"} Jan 22 07:44:06 crc kubenswrapper[4933]: I0122 07:44:06.467871 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-znpfg" podStartSLOduration=2.380047432 podStartE2EDuration="8.467852542s" podCreationTimestamp="2026-01-22 07:43:58 +0000 UTC" firstStartedPulling="2026-01-22 07:43:59.341858522 +0000 UTC m=+7087.178983875" lastFinishedPulling="2026-01-22 07:44:05.429663592 +0000 UTC m=+7093.266788985" observedRunningTime="2026-01-22 07:44:06.453620675 +0000 UTC m=+7094.290746038" watchObservedRunningTime="2026-01-22 07:44:06.467852542 +0000 UTC m=+7094.304977895" Jan 22 07:44:08 crc kubenswrapper[4933]: I0122 07:44:08.522447 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-znpfg" Jan 22 07:44:08 crc kubenswrapper[4933]: I0122 07:44:08.523203 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-znpfg" Jan 22 07:44:09 crc kubenswrapper[4933]: I0122 07:44:09.569159 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-znpfg" podUID="26f13a82-5383-4c0f-9bfb-3d8281a8e959" containerName="registry-server" probeResult="failure" output=< Jan 22 07:44:09 crc kubenswrapper[4933]: timeout: failed to connect service ":50051" within 1s Jan 22 07:44:09 crc kubenswrapper[4933]: > Jan 22 07:44:18 crc kubenswrapper[4933]: I0122 07:44:18.574605 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-znpfg" Jan 22 07:44:18 crc kubenswrapper[4933]: I0122 07:44:18.639169 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-znpfg" Jan 22 07:44:18 crc kubenswrapper[4933]: I0122 07:44:18.806503 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-znpfg"] Jan 22 07:44:20 crc kubenswrapper[4933]: I0122 07:44:20.564178 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-znpfg" podUID="26f13a82-5383-4c0f-9bfb-3d8281a8e959" containerName="registry-server" containerID="cri-o://e4714d23879ac83b109e45d2f1c36b57a6de9df1e70dd5eb10ef9c4d2dd8276f" gracePeriod=2 Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.070874 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-znpfg" Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.195235 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26f13a82-5383-4c0f-9bfb-3d8281a8e959-utilities\") pod \"26f13a82-5383-4c0f-9bfb-3d8281a8e959\" (UID: \"26f13a82-5383-4c0f-9bfb-3d8281a8e959\") " Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.195746 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2nmvg\" (UniqueName: \"kubernetes.io/projected/26f13a82-5383-4c0f-9bfb-3d8281a8e959-kube-api-access-2nmvg\") pod \"26f13a82-5383-4c0f-9bfb-3d8281a8e959\" (UID: \"26f13a82-5383-4c0f-9bfb-3d8281a8e959\") " Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.195910 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26f13a82-5383-4c0f-9bfb-3d8281a8e959-catalog-content\") pod \"26f13a82-5383-4c0f-9bfb-3d8281a8e959\" (UID: \"26f13a82-5383-4c0f-9bfb-3d8281a8e959\") " Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.196154 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26f13a82-5383-4c0f-9bfb-3d8281a8e959-utilities" (OuterVolumeSpecName: "utilities") pod "26f13a82-5383-4c0f-9bfb-3d8281a8e959" (UID: "26f13a82-5383-4c0f-9bfb-3d8281a8e959"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.196829 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26f13a82-5383-4c0f-9bfb-3d8281a8e959-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.203555 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26f13a82-5383-4c0f-9bfb-3d8281a8e959-kube-api-access-2nmvg" (OuterVolumeSpecName: "kube-api-access-2nmvg") pod "26f13a82-5383-4c0f-9bfb-3d8281a8e959" (UID: "26f13a82-5383-4c0f-9bfb-3d8281a8e959"). InnerVolumeSpecName "kube-api-access-2nmvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.298548 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2nmvg\" (UniqueName: \"kubernetes.io/projected/26f13a82-5383-4c0f-9bfb-3d8281a8e959-kube-api-access-2nmvg\") on node \"crc\" DevicePath \"\"" Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.343372 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26f13a82-5383-4c0f-9bfb-3d8281a8e959-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "26f13a82-5383-4c0f-9bfb-3d8281a8e959" (UID: "26f13a82-5383-4c0f-9bfb-3d8281a8e959"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.401284 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26f13a82-5383-4c0f-9bfb-3d8281a8e959-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.577131 4933 generic.go:334] "Generic (PLEG): container finished" podID="26f13a82-5383-4c0f-9bfb-3d8281a8e959" containerID="e4714d23879ac83b109e45d2f1c36b57a6de9df1e70dd5eb10ef9c4d2dd8276f" exitCode=0 Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.577183 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znpfg" event={"ID":"26f13a82-5383-4c0f-9bfb-3d8281a8e959","Type":"ContainerDied","Data":"e4714d23879ac83b109e45d2f1c36b57a6de9df1e70dd5eb10ef9c4d2dd8276f"} Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.577212 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-znpfg" Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.577248 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znpfg" event={"ID":"26f13a82-5383-4c0f-9bfb-3d8281a8e959","Type":"ContainerDied","Data":"0b54a1bf5abca9b68350794c46906aaa456af551b786b17d1c3541dcd8ceb08e"} Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.577279 4933 scope.go:117] "RemoveContainer" containerID="e4714d23879ac83b109e45d2f1c36b57a6de9df1e70dd5eb10ef9c4d2dd8276f" Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.610694 4933 scope.go:117] "RemoveContainer" containerID="d3363ce42ccfdf769f19c678b361ce9070f2d8407ba99ea90162640739094a5c" Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.627339 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-znpfg"] Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.641882 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-znpfg"] Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.649660 4933 scope.go:117] "RemoveContainer" containerID="8eef2971858302bbad8bfa12d25def9b074f2b1a65a64bb26feca5e5bd9aefe8" Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.693259 4933 scope.go:117] "RemoveContainer" containerID="e4714d23879ac83b109e45d2f1c36b57a6de9df1e70dd5eb10ef9c4d2dd8276f" Jan 22 07:44:21 crc kubenswrapper[4933]: E0122 07:44:21.693733 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4714d23879ac83b109e45d2f1c36b57a6de9df1e70dd5eb10ef9c4d2dd8276f\": container with ID starting with e4714d23879ac83b109e45d2f1c36b57a6de9df1e70dd5eb10ef9c4d2dd8276f not found: ID does not exist" containerID="e4714d23879ac83b109e45d2f1c36b57a6de9df1e70dd5eb10ef9c4d2dd8276f" Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.693778 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4714d23879ac83b109e45d2f1c36b57a6de9df1e70dd5eb10ef9c4d2dd8276f"} err="failed to get container status \"e4714d23879ac83b109e45d2f1c36b57a6de9df1e70dd5eb10ef9c4d2dd8276f\": rpc error: code = NotFound desc = could not find container \"e4714d23879ac83b109e45d2f1c36b57a6de9df1e70dd5eb10ef9c4d2dd8276f\": container with ID starting with e4714d23879ac83b109e45d2f1c36b57a6de9df1e70dd5eb10ef9c4d2dd8276f not found: ID does not exist" Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.693798 4933 scope.go:117] "RemoveContainer" containerID="d3363ce42ccfdf769f19c678b361ce9070f2d8407ba99ea90162640739094a5c" Jan 22 07:44:21 crc kubenswrapper[4933]: E0122 07:44:21.694254 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3363ce42ccfdf769f19c678b361ce9070f2d8407ba99ea90162640739094a5c\": container with ID starting with d3363ce42ccfdf769f19c678b361ce9070f2d8407ba99ea90162640739094a5c not found: ID does not exist" containerID="d3363ce42ccfdf769f19c678b361ce9070f2d8407ba99ea90162640739094a5c" Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.694310 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3363ce42ccfdf769f19c678b361ce9070f2d8407ba99ea90162640739094a5c"} err="failed to get container status \"d3363ce42ccfdf769f19c678b361ce9070f2d8407ba99ea90162640739094a5c\": rpc error: code = NotFound desc = could not find container \"d3363ce42ccfdf769f19c678b361ce9070f2d8407ba99ea90162640739094a5c\": container with ID starting with d3363ce42ccfdf769f19c678b361ce9070f2d8407ba99ea90162640739094a5c not found: ID does not exist" Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.694360 4933 scope.go:117] "RemoveContainer" containerID="8eef2971858302bbad8bfa12d25def9b074f2b1a65a64bb26feca5e5bd9aefe8" Jan 22 07:44:21 crc kubenswrapper[4933]: E0122 07:44:21.694709 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8eef2971858302bbad8bfa12d25def9b074f2b1a65a64bb26feca5e5bd9aefe8\": container with ID starting with 8eef2971858302bbad8bfa12d25def9b074f2b1a65a64bb26feca5e5bd9aefe8 not found: ID does not exist" containerID="8eef2971858302bbad8bfa12d25def9b074f2b1a65a64bb26feca5e5bd9aefe8" Jan 22 07:44:21 crc kubenswrapper[4933]: I0122 07:44:21.694767 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8eef2971858302bbad8bfa12d25def9b074f2b1a65a64bb26feca5e5bd9aefe8"} err="failed to get container status \"8eef2971858302bbad8bfa12d25def9b074f2b1a65a64bb26feca5e5bd9aefe8\": rpc error: code = NotFound desc = could not find container \"8eef2971858302bbad8bfa12d25def9b074f2b1a65a64bb26feca5e5bd9aefe8\": container with ID starting with 8eef2971858302bbad8bfa12d25def9b074f2b1a65a64bb26feca5e5bd9aefe8 not found: ID does not exist" Jan 22 07:44:22 crc kubenswrapper[4933]: I0122 07:44:22.503783 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26f13a82-5383-4c0f-9bfb-3d8281a8e959" path="/var/lib/kubelet/pods/26f13a82-5383-4c0f-9bfb-3d8281a8e959/volumes" Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.146157 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp"] Jan 22 07:45:00 crc kubenswrapper[4933]: E0122 07:45:00.148857 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26f13a82-5383-4c0f-9bfb-3d8281a8e959" containerName="extract-utilities" Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.148942 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="26f13a82-5383-4c0f-9bfb-3d8281a8e959" containerName="extract-utilities" Jan 22 07:45:00 crc kubenswrapper[4933]: E0122 07:45:00.149110 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26f13a82-5383-4c0f-9bfb-3d8281a8e959" containerName="extract-content" Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.149207 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="26f13a82-5383-4c0f-9bfb-3d8281a8e959" containerName="extract-content" Jan 22 07:45:00 crc kubenswrapper[4933]: E0122 07:45:00.149369 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26f13a82-5383-4c0f-9bfb-3d8281a8e959" containerName="registry-server" Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.149455 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="26f13a82-5383-4c0f-9bfb-3d8281a8e959" containerName="registry-server" Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.149791 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="26f13a82-5383-4c0f-9bfb-3d8281a8e959" containerName="registry-server" Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.152245 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp" Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.154821 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.155932 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.169837 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp"] Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.185148 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a655516-5e44-40ee-92f7-129dcfb09834-secret-volume\") pod \"collect-profiles-29484465-mhcpp\" (UID: \"4a655516-5e44-40ee-92f7-129dcfb09834\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp" Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.185574 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhpvh\" (UniqueName: \"kubernetes.io/projected/4a655516-5e44-40ee-92f7-129dcfb09834-kube-api-access-zhpvh\") pod \"collect-profiles-29484465-mhcpp\" (UID: \"4a655516-5e44-40ee-92f7-129dcfb09834\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp" Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.185729 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a655516-5e44-40ee-92f7-129dcfb09834-config-volume\") pod \"collect-profiles-29484465-mhcpp\" (UID: \"4a655516-5e44-40ee-92f7-129dcfb09834\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp" Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.287846 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhpvh\" (UniqueName: \"kubernetes.io/projected/4a655516-5e44-40ee-92f7-129dcfb09834-kube-api-access-zhpvh\") pod \"collect-profiles-29484465-mhcpp\" (UID: \"4a655516-5e44-40ee-92f7-129dcfb09834\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp" Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.287939 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a655516-5e44-40ee-92f7-129dcfb09834-config-volume\") pod \"collect-profiles-29484465-mhcpp\" (UID: \"4a655516-5e44-40ee-92f7-129dcfb09834\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp" Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.288040 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a655516-5e44-40ee-92f7-129dcfb09834-secret-volume\") pod \"collect-profiles-29484465-mhcpp\" (UID: \"4a655516-5e44-40ee-92f7-129dcfb09834\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp" Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.288967 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a655516-5e44-40ee-92f7-129dcfb09834-config-volume\") pod \"collect-profiles-29484465-mhcpp\" (UID: \"4a655516-5e44-40ee-92f7-129dcfb09834\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp" Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.307189 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a655516-5e44-40ee-92f7-129dcfb09834-secret-volume\") pod \"collect-profiles-29484465-mhcpp\" (UID: \"4a655516-5e44-40ee-92f7-129dcfb09834\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp" Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.308055 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhpvh\" (UniqueName: \"kubernetes.io/projected/4a655516-5e44-40ee-92f7-129dcfb09834-kube-api-access-zhpvh\") pod \"collect-profiles-29484465-mhcpp\" (UID: \"4a655516-5e44-40ee-92f7-129dcfb09834\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp" Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.481819 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp" Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.973859 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp"] Jan 22 07:45:00 crc kubenswrapper[4933]: I0122 07:45:00.994630 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp" event={"ID":"4a655516-5e44-40ee-92f7-129dcfb09834","Type":"ContainerStarted","Data":"412f18d7212772f479dce88c72222695cd986410b895ad5c30284338944e6354"} Jan 22 07:45:02 crc kubenswrapper[4933]: I0122 07:45:02.006754 4933 generic.go:334] "Generic (PLEG): container finished" podID="4a655516-5e44-40ee-92f7-129dcfb09834" containerID="ad8a2b6208aa16a08cef5bd57199b5726c6c45b84b3c389519a0c572a985fc87" exitCode=0 Jan 22 07:45:02 crc kubenswrapper[4933]: I0122 07:45:02.006893 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp" event={"ID":"4a655516-5e44-40ee-92f7-129dcfb09834","Type":"ContainerDied","Data":"ad8a2b6208aa16a08cef5bd57199b5726c6c45b84b3c389519a0c572a985fc87"} Jan 22 07:45:03 crc kubenswrapper[4933]: I0122 07:45:03.379388 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp" Jan 22 07:45:03 crc kubenswrapper[4933]: I0122 07:45:03.464250 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zhpvh\" (UniqueName: \"kubernetes.io/projected/4a655516-5e44-40ee-92f7-129dcfb09834-kube-api-access-zhpvh\") pod \"4a655516-5e44-40ee-92f7-129dcfb09834\" (UID: \"4a655516-5e44-40ee-92f7-129dcfb09834\") " Jan 22 07:45:03 crc kubenswrapper[4933]: I0122 07:45:03.464690 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a655516-5e44-40ee-92f7-129dcfb09834-config-volume\") pod \"4a655516-5e44-40ee-92f7-129dcfb09834\" (UID: \"4a655516-5e44-40ee-92f7-129dcfb09834\") " Jan 22 07:45:03 crc kubenswrapper[4933]: I0122 07:45:03.464899 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a655516-5e44-40ee-92f7-129dcfb09834-secret-volume\") pod \"4a655516-5e44-40ee-92f7-129dcfb09834\" (UID: \"4a655516-5e44-40ee-92f7-129dcfb09834\") " Jan 22 07:45:03 crc kubenswrapper[4933]: I0122 07:45:03.465215 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a655516-5e44-40ee-92f7-129dcfb09834-config-volume" (OuterVolumeSpecName: "config-volume") pod "4a655516-5e44-40ee-92f7-129dcfb09834" (UID: "4a655516-5e44-40ee-92f7-129dcfb09834"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:45:03 crc kubenswrapper[4933]: I0122 07:45:03.465644 4933 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a655516-5e44-40ee-92f7-129dcfb09834-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 07:45:03 crc kubenswrapper[4933]: I0122 07:45:03.470192 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a655516-5e44-40ee-92f7-129dcfb09834-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4a655516-5e44-40ee-92f7-129dcfb09834" (UID: "4a655516-5e44-40ee-92f7-129dcfb09834"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:45:03 crc kubenswrapper[4933]: I0122 07:45:03.470672 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a655516-5e44-40ee-92f7-129dcfb09834-kube-api-access-zhpvh" (OuterVolumeSpecName: "kube-api-access-zhpvh") pod "4a655516-5e44-40ee-92f7-129dcfb09834" (UID: "4a655516-5e44-40ee-92f7-129dcfb09834"). InnerVolumeSpecName "kube-api-access-zhpvh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:45:03 crc kubenswrapper[4933]: I0122 07:45:03.568660 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zhpvh\" (UniqueName: \"kubernetes.io/projected/4a655516-5e44-40ee-92f7-129dcfb09834-kube-api-access-zhpvh\") on node \"crc\" DevicePath \"\"" Jan 22 07:45:03 crc kubenswrapper[4933]: I0122 07:45:03.568914 4933 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a655516-5e44-40ee-92f7-129dcfb09834-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 07:45:04 crc kubenswrapper[4933]: I0122 07:45:04.025776 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp" event={"ID":"4a655516-5e44-40ee-92f7-129dcfb09834","Type":"ContainerDied","Data":"412f18d7212772f479dce88c72222695cd986410b895ad5c30284338944e6354"} Jan 22 07:45:04 crc kubenswrapper[4933]: I0122 07:45:04.026101 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="412f18d7212772f479dce88c72222695cd986410b895ad5c30284338944e6354" Jan 22 07:45:04 crc kubenswrapper[4933]: I0122 07:45:04.025844 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp" Jan 22 07:45:04 crc kubenswrapper[4933]: E0122 07:45:04.256794 4933 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4a655516_5e44_40ee_92f7_129dcfb09834.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4a655516_5e44_40ee_92f7_129dcfb09834.slice/crio-412f18d7212772f479dce88c72222695cd986410b895ad5c30284338944e6354\": RecentStats: unable to find data in memory cache]" Jan 22 07:45:04 crc kubenswrapper[4933]: I0122 07:45:04.466025 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m"] Jan 22 07:45:04 crc kubenswrapper[4933]: I0122 07:45:04.477944 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484420-7l26m"] Jan 22 07:45:04 crc kubenswrapper[4933]: I0122 07:45:04.504958 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91ecac75-7375-46f4-ab03-d11965c60ca7" path="/var/lib/kubelet/pods/91ecac75-7375-46f4-ab03-d11965c60ca7/volumes" Jan 22 07:45:09 crc kubenswrapper[4933]: I0122 07:45:09.459304 4933 scope.go:117] "RemoveContainer" containerID="383a573a21c2b648d11ce18eff2704d0db9cffb9be1e9d1d6125ea59d687e759" Jan 22 07:45:40 crc kubenswrapper[4933]: I0122 07:45:40.943191 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:45:40 crc kubenswrapper[4933]: I0122 07:45:40.943849 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:46:03 crc kubenswrapper[4933]: I0122 07:46:03.553848 4933 generic.go:334] "Generic (PLEG): container finished" podID="c722274d-78ae-420e-8487-f52eac7984d7" containerID="a115b66ae22ad5334408def3487df63503be5826a15a2b37c93c3ace15ec2783" exitCode=0 Jan 22 07:46:03 crc kubenswrapper[4933]: I0122 07:46:03.554280 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" event={"ID":"c722274d-78ae-420e-8487-f52eac7984d7","Type":"ContainerDied","Data":"a115b66ae22ad5334408def3487df63503be5826a15a2b37c93c3ace15ec2783"} Jan 22 07:46:04 crc kubenswrapper[4933]: I0122 07:46:04.965472 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" Jan 22 07:46:05 crc kubenswrapper[4933]: I0122 07:46:05.099147 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c722274d-78ae-420e-8487-f52eac7984d7-tripleo-cleanup-combined-ca-bundle\") pod \"c722274d-78ae-420e-8487-f52eac7984d7\" (UID: \"c722274d-78ae-420e-8487-f52eac7984d7\") " Jan 22 07:46:05 crc kubenswrapper[4933]: I0122 07:46:05.099309 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mc45l\" (UniqueName: \"kubernetes.io/projected/c722274d-78ae-420e-8487-f52eac7984d7-kube-api-access-mc45l\") pod \"c722274d-78ae-420e-8487-f52eac7984d7\" (UID: \"c722274d-78ae-420e-8487-f52eac7984d7\") " Jan 22 07:46:05 crc kubenswrapper[4933]: I0122 07:46:05.099519 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c722274d-78ae-420e-8487-f52eac7984d7-inventory\") pod \"c722274d-78ae-420e-8487-f52eac7984d7\" (UID: \"c722274d-78ae-420e-8487-f52eac7984d7\") " Jan 22 07:46:05 crc kubenswrapper[4933]: I0122 07:46:05.099586 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c722274d-78ae-420e-8487-f52eac7984d7-ssh-key-openstack-cell1\") pod \"c722274d-78ae-420e-8487-f52eac7984d7\" (UID: \"c722274d-78ae-420e-8487-f52eac7984d7\") " Jan 22 07:46:05 crc kubenswrapper[4933]: I0122 07:46:05.106377 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c722274d-78ae-420e-8487-f52eac7984d7-tripleo-cleanup-combined-ca-bundle" (OuterVolumeSpecName: "tripleo-cleanup-combined-ca-bundle") pod "c722274d-78ae-420e-8487-f52eac7984d7" (UID: "c722274d-78ae-420e-8487-f52eac7984d7"). InnerVolumeSpecName "tripleo-cleanup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:46:05 crc kubenswrapper[4933]: I0122 07:46:05.106861 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c722274d-78ae-420e-8487-f52eac7984d7-kube-api-access-mc45l" (OuterVolumeSpecName: "kube-api-access-mc45l") pod "c722274d-78ae-420e-8487-f52eac7984d7" (UID: "c722274d-78ae-420e-8487-f52eac7984d7"). InnerVolumeSpecName "kube-api-access-mc45l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:46:05 crc kubenswrapper[4933]: I0122 07:46:05.131598 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c722274d-78ae-420e-8487-f52eac7984d7-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "c722274d-78ae-420e-8487-f52eac7984d7" (UID: "c722274d-78ae-420e-8487-f52eac7984d7"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:46:05 crc kubenswrapper[4933]: I0122 07:46:05.139333 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c722274d-78ae-420e-8487-f52eac7984d7-inventory" (OuterVolumeSpecName: "inventory") pod "c722274d-78ae-420e-8487-f52eac7984d7" (UID: "c722274d-78ae-420e-8487-f52eac7984d7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:46:05 crc kubenswrapper[4933]: I0122 07:46:05.202021 4933 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c722274d-78ae-420e-8487-f52eac7984d7-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 07:46:05 crc kubenswrapper[4933]: I0122 07:46:05.202051 4933 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/c722274d-78ae-420e-8487-f52eac7984d7-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 22 07:46:05 crc kubenswrapper[4933]: I0122 07:46:05.202062 4933 reconciler_common.go:293] "Volume detached for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c722274d-78ae-420e-8487-f52eac7984d7-tripleo-cleanup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:46:05 crc kubenswrapper[4933]: I0122 07:46:05.202070 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mc45l\" (UniqueName: \"kubernetes.io/projected/c722274d-78ae-420e-8487-f52eac7984d7-kube-api-access-mc45l\") on node \"crc\" DevicePath \"\"" Jan 22 07:46:05 crc kubenswrapper[4933]: I0122 07:46:05.581397 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" event={"ID":"c722274d-78ae-420e-8487-f52eac7984d7","Type":"ContainerDied","Data":"9a3f741fdf01b12465a75d10e59014f169788d14ee72c41eee7ea87ef3e27844"} Jan 22 07:46:05 crc kubenswrapper[4933]: I0122 07:46:05.581457 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a3f741fdf01b12465a75d10e59014f169788d14ee72c41eee7ea87ef3e27844" Jan 22 07:46:05 crc kubenswrapper[4933]: I0122 07:46:05.581536 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.271598 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-mwncg"] Jan 22 07:46:10 crc kubenswrapper[4933]: E0122 07:46:10.272582 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c722274d-78ae-420e-8487-f52eac7984d7" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.272599 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c722274d-78ae-420e-8487-f52eac7984d7" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Jan 22 07:46:10 crc kubenswrapper[4933]: E0122 07:46:10.272629 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a655516-5e44-40ee-92f7-129dcfb09834" containerName="collect-profiles" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.272637 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a655516-5e44-40ee-92f7-129dcfb09834" containerName="collect-profiles" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.272898 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a655516-5e44-40ee-92f7-129dcfb09834" containerName="collect-profiles" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.272916 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="c722274d-78ae-420e-8487-f52eac7984d7" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.273891 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.278411 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-d9x6d" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.278655 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.278812 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.282699 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.291703 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-mwncg"] Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.438186 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/62f31314-79cb-48c7-a585-9fc2096932e3-ssh-key-openstack-cell1\") pod \"bootstrap-openstack-openstack-cell1-mwncg\" (UID: \"62f31314-79cb-48c7-a585-9fc2096932e3\") " pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.438251 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62f31314-79cb-48c7-a585-9fc2096932e3-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-mwncg\" (UID: \"62f31314-79cb-48c7-a585-9fc2096932e3\") " pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.438339 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/62f31314-79cb-48c7-a585-9fc2096932e3-inventory\") pod \"bootstrap-openstack-openstack-cell1-mwncg\" (UID: \"62f31314-79cb-48c7-a585-9fc2096932e3\") " pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.438457 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwptz\" (UniqueName: \"kubernetes.io/projected/62f31314-79cb-48c7-a585-9fc2096932e3-kube-api-access-wwptz\") pod \"bootstrap-openstack-openstack-cell1-mwncg\" (UID: \"62f31314-79cb-48c7-a585-9fc2096932e3\") " pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.540990 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/62f31314-79cb-48c7-a585-9fc2096932e3-ssh-key-openstack-cell1\") pod \"bootstrap-openstack-openstack-cell1-mwncg\" (UID: \"62f31314-79cb-48c7-a585-9fc2096932e3\") " pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.541040 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62f31314-79cb-48c7-a585-9fc2096932e3-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-mwncg\" (UID: \"62f31314-79cb-48c7-a585-9fc2096932e3\") " pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.541120 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/62f31314-79cb-48c7-a585-9fc2096932e3-inventory\") pod \"bootstrap-openstack-openstack-cell1-mwncg\" (UID: \"62f31314-79cb-48c7-a585-9fc2096932e3\") " pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.541190 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwptz\" (UniqueName: \"kubernetes.io/projected/62f31314-79cb-48c7-a585-9fc2096932e3-kube-api-access-wwptz\") pod \"bootstrap-openstack-openstack-cell1-mwncg\" (UID: \"62f31314-79cb-48c7-a585-9fc2096932e3\") " pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.548943 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/62f31314-79cb-48c7-a585-9fc2096932e3-inventory\") pod \"bootstrap-openstack-openstack-cell1-mwncg\" (UID: \"62f31314-79cb-48c7-a585-9fc2096932e3\") " pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.548943 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/62f31314-79cb-48c7-a585-9fc2096932e3-ssh-key-openstack-cell1\") pod \"bootstrap-openstack-openstack-cell1-mwncg\" (UID: \"62f31314-79cb-48c7-a585-9fc2096932e3\") " pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.551888 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62f31314-79cb-48c7-a585-9fc2096932e3-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-mwncg\" (UID: \"62f31314-79cb-48c7-a585-9fc2096932e3\") " pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.561782 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwptz\" (UniqueName: \"kubernetes.io/projected/62f31314-79cb-48c7-a585-9fc2096932e3-kube-api-access-wwptz\") pod \"bootstrap-openstack-openstack-cell1-mwncg\" (UID: \"62f31314-79cb-48c7-a585-9fc2096932e3\") " pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.594494 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.942788 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:46:10 crc kubenswrapper[4933]: I0122 07:46:10.943136 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:46:11 crc kubenswrapper[4933]: I0122 07:46:11.316149 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-mwncg"] Jan 22 07:46:11 crc kubenswrapper[4933]: I0122 07:46:11.629757 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" event={"ID":"62f31314-79cb-48c7-a585-9fc2096932e3","Type":"ContainerStarted","Data":"6c4c7fc4b7b2b8149760c8a5b49d895660d8185d601cf0e4618326a5699d78af"} Jan 22 07:46:12 crc kubenswrapper[4933]: I0122 07:46:12.640462 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" event={"ID":"62f31314-79cb-48c7-a585-9fc2096932e3","Type":"ContainerStarted","Data":"4cad9c2ad89397433f11dcc9321d34cb55525d6bdc8e0f521c10f4cc4ba08ed1"} Jan 22 07:46:12 crc kubenswrapper[4933]: I0122 07:46:12.736345 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" podStartSLOduration=2.24658859 podStartE2EDuration="2.736317404s" podCreationTimestamp="2026-01-22 07:46:10 +0000 UTC" firstStartedPulling="2026-01-22 07:46:11.311643066 +0000 UTC m=+7219.148768419" lastFinishedPulling="2026-01-22 07:46:11.80137188 +0000 UTC m=+7219.638497233" observedRunningTime="2026-01-22 07:46:12.726875313 +0000 UTC m=+7220.564000666" watchObservedRunningTime="2026-01-22 07:46:12.736317404 +0000 UTC m=+7220.573442767" Jan 22 07:46:40 crc kubenswrapper[4933]: I0122 07:46:40.943288 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:46:40 crc kubenswrapper[4933]: I0122 07:46:40.943809 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:46:40 crc kubenswrapper[4933]: I0122 07:46:40.943868 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 07:46:40 crc kubenswrapper[4933]: I0122 07:46:40.944817 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"00cefbf7cdddf57aeeac4ad5923b7d70d1f5988480bcc20b468032624e763855"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 07:46:40 crc kubenswrapper[4933]: I0122 07:46:40.944890 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://00cefbf7cdddf57aeeac4ad5923b7d70d1f5988480bcc20b468032624e763855" gracePeriod=600 Jan 22 07:46:41 crc kubenswrapper[4933]: I0122 07:46:41.920276 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="00cefbf7cdddf57aeeac4ad5923b7d70d1f5988480bcc20b468032624e763855" exitCode=0 Jan 22 07:46:41 crc kubenswrapper[4933]: I0122 07:46:41.920357 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"00cefbf7cdddf57aeeac4ad5923b7d70d1f5988480bcc20b468032624e763855"} Jan 22 07:46:41 crc kubenswrapper[4933]: I0122 07:46:41.920887 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4"} Jan 22 07:46:41 crc kubenswrapper[4933]: I0122 07:46:41.920937 4933 scope.go:117] "RemoveContainer" containerID="7c01cab701a41f6523c6eaebcbbfcdad454c8642cad1bb9ca7d0db276972a804" Jan 22 07:49:09 crc kubenswrapper[4933]: I0122 07:49:09.683186 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4rhwq"] Jan 22 07:49:09 crc kubenswrapper[4933]: I0122 07:49:09.685907 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4rhwq" Jan 22 07:49:09 crc kubenswrapper[4933]: I0122 07:49:09.697031 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4rhwq"] Jan 22 07:49:09 crc kubenswrapper[4933]: I0122 07:49:09.763565 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cb5c247-c0c7-4974-9ec2-90bb418113a0-catalog-content\") pod \"community-operators-4rhwq\" (UID: \"2cb5c247-c0c7-4974-9ec2-90bb418113a0\") " pod="openshift-marketplace/community-operators-4rhwq" Jan 22 07:49:09 crc kubenswrapper[4933]: I0122 07:49:09.763683 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cb5c247-c0c7-4974-9ec2-90bb418113a0-utilities\") pod \"community-operators-4rhwq\" (UID: \"2cb5c247-c0c7-4974-9ec2-90bb418113a0\") " pod="openshift-marketplace/community-operators-4rhwq" Jan 22 07:49:09 crc kubenswrapper[4933]: I0122 07:49:09.763712 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vd6q2\" (UniqueName: \"kubernetes.io/projected/2cb5c247-c0c7-4974-9ec2-90bb418113a0-kube-api-access-vd6q2\") pod \"community-operators-4rhwq\" (UID: \"2cb5c247-c0c7-4974-9ec2-90bb418113a0\") " pod="openshift-marketplace/community-operators-4rhwq" Jan 22 07:49:09 crc kubenswrapper[4933]: I0122 07:49:09.865285 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cb5c247-c0c7-4974-9ec2-90bb418113a0-utilities\") pod \"community-operators-4rhwq\" (UID: \"2cb5c247-c0c7-4974-9ec2-90bb418113a0\") " pod="openshift-marketplace/community-operators-4rhwq" Jan 22 07:49:09 crc kubenswrapper[4933]: I0122 07:49:09.865354 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vd6q2\" (UniqueName: \"kubernetes.io/projected/2cb5c247-c0c7-4974-9ec2-90bb418113a0-kube-api-access-vd6q2\") pod \"community-operators-4rhwq\" (UID: \"2cb5c247-c0c7-4974-9ec2-90bb418113a0\") " pod="openshift-marketplace/community-operators-4rhwq" Jan 22 07:49:09 crc kubenswrapper[4933]: I0122 07:49:09.865506 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cb5c247-c0c7-4974-9ec2-90bb418113a0-catalog-content\") pod \"community-operators-4rhwq\" (UID: \"2cb5c247-c0c7-4974-9ec2-90bb418113a0\") " pod="openshift-marketplace/community-operators-4rhwq" Jan 22 07:49:09 crc kubenswrapper[4933]: I0122 07:49:09.866410 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cb5c247-c0c7-4974-9ec2-90bb418113a0-catalog-content\") pod \"community-operators-4rhwq\" (UID: \"2cb5c247-c0c7-4974-9ec2-90bb418113a0\") " pod="openshift-marketplace/community-operators-4rhwq" Jan 22 07:49:09 crc kubenswrapper[4933]: I0122 07:49:09.866418 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cb5c247-c0c7-4974-9ec2-90bb418113a0-utilities\") pod \"community-operators-4rhwq\" (UID: \"2cb5c247-c0c7-4974-9ec2-90bb418113a0\") " pod="openshift-marketplace/community-operators-4rhwq" Jan 22 07:49:09 crc kubenswrapper[4933]: I0122 07:49:09.887104 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vd6q2\" (UniqueName: \"kubernetes.io/projected/2cb5c247-c0c7-4974-9ec2-90bb418113a0-kube-api-access-vd6q2\") pod \"community-operators-4rhwq\" (UID: \"2cb5c247-c0c7-4974-9ec2-90bb418113a0\") " pod="openshift-marketplace/community-operators-4rhwq" Jan 22 07:49:10 crc kubenswrapper[4933]: I0122 07:49:10.019899 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4rhwq" Jan 22 07:49:10 crc kubenswrapper[4933]: I0122 07:49:10.593767 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4rhwq"] Jan 22 07:49:10 crc kubenswrapper[4933]: I0122 07:49:10.942958 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:49:10 crc kubenswrapper[4933]: I0122 07:49:10.944010 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:49:11 crc kubenswrapper[4933]: I0122 07:49:11.436323 4933 generic.go:334] "Generic (PLEG): container finished" podID="2cb5c247-c0c7-4974-9ec2-90bb418113a0" containerID="2ae6f9c86313507a68f167b546739a8e29e3744febeab5df9a7ad6396c3697ec" exitCode=0 Jan 22 07:49:11 crc kubenswrapper[4933]: I0122 07:49:11.436618 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4rhwq" event={"ID":"2cb5c247-c0c7-4974-9ec2-90bb418113a0","Type":"ContainerDied","Data":"2ae6f9c86313507a68f167b546739a8e29e3744febeab5df9a7ad6396c3697ec"} Jan 22 07:49:11 crc kubenswrapper[4933]: I0122 07:49:11.436646 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4rhwq" event={"ID":"2cb5c247-c0c7-4974-9ec2-90bb418113a0","Type":"ContainerStarted","Data":"6d879c9fa34a82fa1691a6b777c0ff451e0b8f088a71836a19a5c1c4b63c89f4"} Jan 22 07:49:11 crc kubenswrapper[4933]: I0122 07:49:11.441806 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 07:49:11 crc kubenswrapper[4933]: I0122 07:49:11.863685 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-82r2d"] Jan 22 07:49:11 crc kubenswrapper[4933]: I0122 07:49:11.866404 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-82r2d" Jan 22 07:49:11 crc kubenswrapper[4933]: I0122 07:49:11.879448 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-82r2d"] Jan 22 07:49:11 crc kubenswrapper[4933]: I0122 07:49:11.910576 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0e93e27-7e33-4723-8e63-8a0e4dbbab3a-utilities\") pod \"redhat-marketplace-82r2d\" (UID: \"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a\") " pod="openshift-marketplace/redhat-marketplace-82r2d" Jan 22 07:49:11 crc kubenswrapper[4933]: I0122 07:49:11.910747 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4gnn\" (UniqueName: \"kubernetes.io/projected/a0e93e27-7e33-4723-8e63-8a0e4dbbab3a-kube-api-access-h4gnn\") pod \"redhat-marketplace-82r2d\" (UID: \"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a\") " pod="openshift-marketplace/redhat-marketplace-82r2d" Jan 22 07:49:11 crc kubenswrapper[4933]: I0122 07:49:11.910883 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0e93e27-7e33-4723-8e63-8a0e4dbbab3a-catalog-content\") pod \"redhat-marketplace-82r2d\" (UID: \"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a\") " pod="openshift-marketplace/redhat-marketplace-82r2d" Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.013064 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0e93e27-7e33-4723-8e63-8a0e4dbbab3a-utilities\") pod \"redhat-marketplace-82r2d\" (UID: \"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a\") " pod="openshift-marketplace/redhat-marketplace-82r2d" Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.013605 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0e93e27-7e33-4723-8e63-8a0e4dbbab3a-utilities\") pod \"redhat-marketplace-82r2d\" (UID: \"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a\") " pod="openshift-marketplace/redhat-marketplace-82r2d" Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.014052 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4gnn\" (UniqueName: \"kubernetes.io/projected/a0e93e27-7e33-4723-8e63-8a0e4dbbab3a-kube-api-access-h4gnn\") pod \"redhat-marketplace-82r2d\" (UID: \"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a\") " pod="openshift-marketplace/redhat-marketplace-82r2d" Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.014278 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0e93e27-7e33-4723-8e63-8a0e4dbbab3a-catalog-content\") pod \"redhat-marketplace-82r2d\" (UID: \"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a\") " pod="openshift-marketplace/redhat-marketplace-82r2d" Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.014662 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0e93e27-7e33-4723-8e63-8a0e4dbbab3a-catalog-content\") pod \"redhat-marketplace-82r2d\" (UID: \"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a\") " pod="openshift-marketplace/redhat-marketplace-82r2d" Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.044975 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4gnn\" (UniqueName: \"kubernetes.io/projected/a0e93e27-7e33-4723-8e63-8a0e4dbbab3a-kube-api-access-h4gnn\") pod \"redhat-marketplace-82r2d\" (UID: \"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a\") " pod="openshift-marketplace/redhat-marketplace-82r2d" Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.074975 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hc2pj"] Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.078239 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hc2pj" Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.112247 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hc2pj"] Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.116427 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pq9rm\" (UniqueName: \"kubernetes.io/projected/9be31a6a-b0fb-42ec-b92c-acb2a0144d8b-kube-api-access-pq9rm\") pod \"certified-operators-hc2pj\" (UID: \"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b\") " pod="openshift-marketplace/certified-operators-hc2pj" Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.116526 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9be31a6a-b0fb-42ec-b92c-acb2a0144d8b-catalog-content\") pod \"certified-operators-hc2pj\" (UID: \"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b\") " pod="openshift-marketplace/certified-operators-hc2pj" Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.116820 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9be31a6a-b0fb-42ec-b92c-acb2a0144d8b-utilities\") pod \"certified-operators-hc2pj\" (UID: \"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b\") " pod="openshift-marketplace/certified-operators-hc2pj" Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.188712 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-82r2d" Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.219484 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9be31a6a-b0fb-42ec-b92c-acb2a0144d8b-utilities\") pod \"certified-operators-hc2pj\" (UID: \"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b\") " pod="openshift-marketplace/certified-operators-hc2pj" Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.219552 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pq9rm\" (UniqueName: \"kubernetes.io/projected/9be31a6a-b0fb-42ec-b92c-acb2a0144d8b-kube-api-access-pq9rm\") pod \"certified-operators-hc2pj\" (UID: \"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b\") " pod="openshift-marketplace/certified-operators-hc2pj" Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.219598 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9be31a6a-b0fb-42ec-b92c-acb2a0144d8b-catalog-content\") pod \"certified-operators-hc2pj\" (UID: \"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b\") " pod="openshift-marketplace/certified-operators-hc2pj" Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.220224 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9be31a6a-b0fb-42ec-b92c-acb2a0144d8b-catalog-content\") pod \"certified-operators-hc2pj\" (UID: \"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b\") " pod="openshift-marketplace/certified-operators-hc2pj" Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.220496 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9be31a6a-b0fb-42ec-b92c-acb2a0144d8b-utilities\") pod \"certified-operators-hc2pj\" (UID: \"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b\") " pod="openshift-marketplace/certified-operators-hc2pj" Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.245489 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pq9rm\" (UniqueName: \"kubernetes.io/projected/9be31a6a-b0fb-42ec-b92c-acb2a0144d8b-kube-api-access-pq9rm\") pod \"certified-operators-hc2pj\" (UID: \"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b\") " pod="openshift-marketplace/certified-operators-hc2pj" Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.431311 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hc2pj" Jan 22 07:49:12 crc kubenswrapper[4933]: I0122 07:49:12.739560 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-82r2d"] Jan 22 07:49:13 crc kubenswrapper[4933]: I0122 07:49:13.023290 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hc2pj"] Jan 22 07:49:13 crc kubenswrapper[4933]: I0122 07:49:13.454445 4933 generic.go:334] "Generic (PLEG): container finished" podID="a0e93e27-7e33-4723-8e63-8a0e4dbbab3a" containerID="b4833259c5672a7374f00d6461f7b1508a16a04d2a8494cd134586fc9a03a211" exitCode=0 Jan 22 07:49:13 crc kubenswrapper[4933]: I0122 07:49:13.454517 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-82r2d" event={"ID":"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a","Type":"ContainerDied","Data":"b4833259c5672a7374f00d6461f7b1508a16a04d2a8494cd134586fc9a03a211"} Jan 22 07:49:13 crc kubenswrapper[4933]: I0122 07:49:13.454545 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-82r2d" event={"ID":"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a","Type":"ContainerStarted","Data":"88c9b60fe537db89e6bb406f3870aa61f0294983d464b2d8dcdb5ea60870f4d3"} Jan 22 07:49:13 crc kubenswrapper[4933]: I0122 07:49:13.456423 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4rhwq" event={"ID":"2cb5c247-c0c7-4974-9ec2-90bb418113a0","Type":"ContainerStarted","Data":"d43a99b175ccdf552051be6db76be74d0c58d2a045ef9fa4c0951c55e8080a64"} Jan 22 07:49:13 crc kubenswrapper[4933]: I0122 07:49:13.459985 4933 generic.go:334] "Generic (PLEG): container finished" podID="9be31a6a-b0fb-42ec-b92c-acb2a0144d8b" containerID="ec2fbe040958413d88a67807bbb926dc0a369e8df7ba254316b4110b3dc59e74" exitCode=0 Jan 22 07:49:13 crc kubenswrapper[4933]: I0122 07:49:13.460010 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hc2pj" event={"ID":"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b","Type":"ContainerDied","Data":"ec2fbe040958413d88a67807bbb926dc0a369e8df7ba254316b4110b3dc59e74"} Jan 22 07:49:13 crc kubenswrapper[4933]: I0122 07:49:13.460026 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hc2pj" event={"ID":"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b","Type":"ContainerStarted","Data":"40fa75121fc2ab89ed2228c50252b486e22242152a4dbb3db67f589581225e5e"} Jan 22 07:49:14 crc kubenswrapper[4933]: I0122 07:49:14.479102 4933 generic.go:334] "Generic (PLEG): container finished" podID="2cb5c247-c0c7-4974-9ec2-90bb418113a0" containerID="d43a99b175ccdf552051be6db76be74d0c58d2a045ef9fa4c0951c55e8080a64" exitCode=0 Jan 22 07:49:14 crc kubenswrapper[4933]: I0122 07:49:14.479347 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4rhwq" event={"ID":"2cb5c247-c0c7-4974-9ec2-90bb418113a0","Type":"ContainerDied","Data":"d43a99b175ccdf552051be6db76be74d0c58d2a045ef9fa4c0951c55e8080a64"} Jan 22 07:49:15 crc kubenswrapper[4933]: I0122 07:49:15.492527 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-82r2d" event={"ID":"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a","Type":"ContainerStarted","Data":"6b9bc2567d4d6927e1c8271e7b7fe9b83cc342577588090883c8a86c1b5a67f5"} Jan 22 07:49:15 crc kubenswrapper[4933]: I0122 07:49:15.497432 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hc2pj" event={"ID":"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b","Type":"ContainerStarted","Data":"33b8f421a6cec9f892f446737ffe98a077b260c43dd2fcdf5a35e2722252a95c"} Jan 22 07:49:16 crc kubenswrapper[4933]: I0122 07:49:16.509997 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4rhwq" event={"ID":"2cb5c247-c0c7-4974-9ec2-90bb418113a0","Type":"ContainerStarted","Data":"0f57ca22c1650a0af4990d993c063d53abe0df919f2ed63367356e5632d066f4"} Jan 22 07:49:16 crc kubenswrapper[4933]: I0122 07:49:16.513561 4933 generic.go:334] "Generic (PLEG): container finished" podID="9be31a6a-b0fb-42ec-b92c-acb2a0144d8b" containerID="33b8f421a6cec9f892f446737ffe98a077b260c43dd2fcdf5a35e2722252a95c" exitCode=0 Jan 22 07:49:16 crc kubenswrapper[4933]: I0122 07:49:16.513611 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hc2pj" event={"ID":"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b","Type":"ContainerDied","Data":"33b8f421a6cec9f892f446737ffe98a077b260c43dd2fcdf5a35e2722252a95c"} Jan 22 07:49:16 crc kubenswrapper[4933]: I0122 07:49:16.545800 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4rhwq" podStartSLOduration=3.440324817 podStartE2EDuration="7.545781656s" podCreationTimestamp="2026-01-22 07:49:09 +0000 UTC" firstStartedPulling="2026-01-22 07:49:11.441420039 +0000 UTC m=+7399.278545392" lastFinishedPulling="2026-01-22 07:49:15.546876878 +0000 UTC m=+7403.384002231" observedRunningTime="2026-01-22 07:49:16.533898206 +0000 UTC m=+7404.371023559" watchObservedRunningTime="2026-01-22 07:49:16.545781656 +0000 UTC m=+7404.382906999" Jan 22 07:49:17 crc kubenswrapper[4933]: I0122 07:49:17.523411 4933 generic.go:334] "Generic (PLEG): container finished" podID="a0e93e27-7e33-4723-8e63-8a0e4dbbab3a" containerID="6b9bc2567d4d6927e1c8271e7b7fe9b83cc342577588090883c8a86c1b5a67f5" exitCode=0 Jan 22 07:49:17 crc kubenswrapper[4933]: I0122 07:49:17.523493 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-82r2d" event={"ID":"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a","Type":"ContainerDied","Data":"6b9bc2567d4d6927e1c8271e7b7fe9b83cc342577588090883c8a86c1b5a67f5"} Jan 22 07:49:17 crc kubenswrapper[4933]: I0122 07:49:17.527880 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hc2pj" event={"ID":"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b","Type":"ContainerStarted","Data":"5eb71ad959b6f2ed909a2096191536496bb840599b0a1797381d0fe954d074f2"} Jan 22 07:49:17 crc kubenswrapper[4933]: I0122 07:49:17.569699 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hc2pj" podStartSLOduration=1.948473525 podStartE2EDuration="5.56967565s" podCreationTimestamp="2026-01-22 07:49:12 +0000 UTC" firstStartedPulling="2026-01-22 07:49:13.465176505 +0000 UTC m=+7401.302301858" lastFinishedPulling="2026-01-22 07:49:17.08637861 +0000 UTC m=+7404.923503983" observedRunningTime="2026-01-22 07:49:17.563039939 +0000 UTC m=+7405.400165302" watchObservedRunningTime="2026-01-22 07:49:17.56967565 +0000 UTC m=+7405.406801043" Jan 22 07:49:18 crc kubenswrapper[4933]: I0122 07:49:18.538286 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-82r2d" event={"ID":"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a","Type":"ContainerStarted","Data":"277cc63826df8916c9d97d631e7b713de46fa585624ed8dc3fb63b91b3d69526"} Jan 22 07:49:18 crc kubenswrapper[4933]: I0122 07:49:18.568919 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-82r2d" podStartSLOduration=3.0815887220000002 podStartE2EDuration="7.568896275s" podCreationTimestamp="2026-01-22 07:49:11 +0000 UTC" firstStartedPulling="2026-01-22 07:49:13.456989586 +0000 UTC m=+7401.294114939" lastFinishedPulling="2026-01-22 07:49:17.944297139 +0000 UTC m=+7405.781422492" observedRunningTime="2026-01-22 07:49:18.556548935 +0000 UTC m=+7406.393674298" watchObservedRunningTime="2026-01-22 07:49:18.568896275 +0000 UTC m=+7406.406021628" Jan 22 07:49:20 crc kubenswrapper[4933]: I0122 07:49:20.020290 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4rhwq" Jan 22 07:49:20 crc kubenswrapper[4933]: I0122 07:49:20.020688 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4rhwq" Jan 22 07:49:20 crc kubenswrapper[4933]: I0122 07:49:20.558747 4933 generic.go:334] "Generic (PLEG): container finished" podID="62f31314-79cb-48c7-a585-9fc2096932e3" containerID="4cad9c2ad89397433f11dcc9321d34cb55525d6bdc8e0f521c10f4cc4ba08ed1" exitCode=0 Jan 22 07:49:20 crc kubenswrapper[4933]: I0122 07:49:20.558809 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" event={"ID":"62f31314-79cb-48c7-a585-9fc2096932e3","Type":"ContainerDied","Data":"4cad9c2ad89397433f11dcc9321d34cb55525d6bdc8e0f521c10f4cc4ba08ed1"} Jan 22 07:49:21 crc kubenswrapper[4933]: I0122 07:49:21.083254 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-4rhwq" podUID="2cb5c247-c0c7-4974-9ec2-90bb418113a0" containerName="registry-server" probeResult="failure" output=< Jan 22 07:49:21 crc kubenswrapper[4933]: timeout: failed to connect service ":50051" within 1s Jan 22 07:49:21 crc kubenswrapper[4933]: > Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.000799 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.167556 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62f31314-79cb-48c7-a585-9fc2096932e3-bootstrap-combined-ca-bundle\") pod \"62f31314-79cb-48c7-a585-9fc2096932e3\" (UID: \"62f31314-79cb-48c7-a585-9fc2096932e3\") " Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.167738 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwptz\" (UniqueName: \"kubernetes.io/projected/62f31314-79cb-48c7-a585-9fc2096932e3-kube-api-access-wwptz\") pod \"62f31314-79cb-48c7-a585-9fc2096932e3\" (UID: \"62f31314-79cb-48c7-a585-9fc2096932e3\") " Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.167798 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/62f31314-79cb-48c7-a585-9fc2096932e3-ssh-key-openstack-cell1\") pod \"62f31314-79cb-48c7-a585-9fc2096932e3\" (UID: \"62f31314-79cb-48c7-a585-9fc2096932e3\") " Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.167998 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/62f31314-79cb-48c7-a585-9fc2096932e3-inventory\") pod \"62f31314-79cb-48c7-a585-9fc2096932e3\" (UID: \"62f31314-79cb-48c7-a585-9fc2096932e3\") " Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.175394 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62f31314-79cb-48c7-a585-9fc2096932e3-kube-api-access-wwptz" (OuterVolumeSpecName: "kube-api-access-wwptz") pod "62f31314-79cb-48c7-a585-9fc2096932e3" (UID: "62f31314-79cb-48c7-a585-9fc2096932e3"). InnerVolumeSpecName "kube-api-access-wwptz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.178342 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62f31314-79cb-48c7-a585-9fc2096932e3-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "62f31314-79cb-48c7-a585-9fc2096932e3" (UID: "62f31314-79cb-48c7-a585-9fc2096932e3"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.189605 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-82r2d" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.189676 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-82r2d" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.208899 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62f31314-79cb-48c7-a585-9fc2096932e3-inventory" (OuterVolumeSpecName: "inventory") pod "62f31314-79cb-48c7-a585-9fc2096932e3" (UID: "62f31314-79cb-48c7-a585-9fc2096932e3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.220206 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62f31314-79cb-48c7-a585-9fc2096932e3-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "62f31314-79cb-48c7-a585-9fc2096932e3" (UID: "62f31314-79cb-48c7-a585-9fc2096932e3"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.250265 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-82r2d" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.298256 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwptz\" (UniqueName: \"kubernetes.io/projected/62f31314-79cb-48c7-a585-9fc2096932e3-kube-api-access-wwptz\") on node \"crc\" DevicePath \"\"" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.298297 4933 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/62f31314-79cb-48c7-a585-9fc2096932e3-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.298317 4933 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/62f31314-79cb-48c7-a585-9fc2096932e3-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.298331 4933 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62f31314-79cb-48c7-a585-9fc2096932e3-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.431805 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hc2pj" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.432207 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hc2pj" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.479363 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hc2pj" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.581174 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" event={"ID":"62f31314-79cb-48c7-a585-9fc2096932e3","Type":"ContainerDied","Data":"6c4c7fc4b7b2b8149760c8a5b49d895660d8185d601cf0e4618326a5699d78af"} Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.581229 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c4c7fc4b7b2b8149760c8a5b49d895660d8185d601cf0e4618326a5699d78af" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.581425 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-mwncg" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.655114 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hc2pj" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.700993 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-h4gwh"] Jan 22 07:49:22 crc kubenswrapper[4933]: E0122 07:49:22.701564 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62f31314-79cb-48c7-a585-9fc2096932e3" containerName="bootstrap-openstack-openstack-cell1" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.701586 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="62f31314-79cb-48c7-a585-9fc2096932e3" containerName="bootstrap-openstack-openstack-cell1" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.701896 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="62f31314-79cb-48c7-a585-9fc2096932e3" containerName="bootstrap-openstack-openstack-cell1" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.702821 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-h4gwh" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.706982 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.707399 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.707690 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-d9x6d" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.707875 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.712961 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kq4v\" (UniqueName: \"kubernetes.io/projected/fba3a485-7601-4024-a522-48d26533750b-kube-api-access-2kq4v\") pod \"download-cache-openstack-openstack-cell1-h4gwh\" (UID: \"fba3a485-7601-4024-a522-48d26533750b\") " pod="openstack/download-cache-openstack-openstack-cell1-h4gwh" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.713052 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/fba3a485-7601-4024-a522-48d26533750b-ssh-key-openstack-cell1\") pod \"download-cache-openstack-openstack-cell1-h4gwh\" (UID: \"fba3a485-7601-4024-a522-48d26533750b\") " pod="openstack/download-cache-openstack-openstack-cell1-h4gwh" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.713151 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fba3a485-7601-4024-a522-48d26533750b-inventory\") pod \"download-cache-openstack-openstack-cell1-h4gwh\" (UID: \"fba3a485-7601-4024-a522-48d26533750b\") " pod="openstack/download-cache-openstack-openstack-cell1-h4gwh" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.727301 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-h4gwh"] Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.814748 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kq4v\" (UniqueName: \"kubernetes.io/projected/fba3a485-7601-4024-a522-48d26533750b-kube-api-access-2kq4v\") pod \"download-cache-openstack-openstack-cell1-h4gwh\" (UID: \"fba3a485-7601-4024-a522-48d26533750b\") " pod="openstack/download-cache-openstack-openstack-cell1-h4gwh" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.814793 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/fba3a485-7601-4024-a522-48d26533750b-ssh-key-openstack-cell1\") pod \"download-cache-openstack-openstack-cell1-h4gwh\" (UID: \"fba3a485-7601-4024-a522-48d26533750b\") " pod="openstack/download-cache-openstack-openstack-cell1-h4gwh" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.814839 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fba3a485-7601-4024-a522-48d26533750b-inventory\") pod \"download-cache-openstack-openstack-cell1-h4gwh\" (UID: \"fba3a485-7601-4024-a522-48d26533750b\") " pod="openstack/download-cache-openstack-openstack-cell1-h4gwh" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.819024 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fba3a485-7601-4024-a522-48d26533750b-inventory\") pod \"download-cache-openstack-openstack-cell1-h4gwh\" (UID: \"fba3a485-7601-4024-a522-48d26533750b\") " pod="openstack/download-cache-openstack-openstack-cell1-h4gwh" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.820060 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/fba3a485-7601-4024-a522-48d26533750b-ssh-key-openstack-cell1\") pod \"download-cache-openstack-openstack-cell1-h4gwh\" (UID: \"fba3a485-7601-4024-a522-48d26533750b\") " pod="openstack/download-cache-openstack-openstack-cell1-h4gwh" Jan 22 07:49:22 crc kubenswrapper[4933]: I0122 07:49:22.836060 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kq4v\" (UniqueName: \"kubernetes.io/projected/fba3a485-7601-4024-a522-48d26533750b-kube-api-access-2kq4v\") pod \"download-cache-openstack-openstack-cell1-h4gwh\" (UID: \"fba3a485-7601-4024-a522-48d26533750b\") " pod="openstack/download-cache-openstack-openstack-cell1-h4gwh" Jan 22 07:49:23 crc kubenswrapper[4933]: I0122 07:49:23.034273 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-h4gwh" Jan 22 07:49:23 crc kubenswrapper[4933]: I0122 07:49:23.698706 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-h4gwh"] Jan 22 07:49:23 crc kubenswrapper[4933]: W0122 07:49:23.707246 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfba3a485_7601_4024_a522_48d26533750b.slice/crio-3accaa63c0cced60b90304be4fff880cd5d9fc2f7886734fbb4c7fcc3b6d2bc8 WatchSource:0}: Error finding container 3accaa63c0cced60b90304be4fff880cd5d9fc2f7886734fbb4c7fcc3b6d2bc8: Status 404 returned error can't find the container with id 3accaa63c0cced60b90304be4fff880cd5d9fc2f7886734fbb4c7fcc3b6d2bc8 Jan 22 07:49:24 crc kubenswrapper[4933]: I0122 07:49:24.605882 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-h4gwh" event={"ID":"fba3a485-7601-4024-a522-48d26533750b","Type":"ContainerStarted","Data":"cc5a3101ab2d6ac5b1e24a10b4c9db342d9c22c2afbb6cdf61383154ff09fdf9"} Jan 22 07:49:24 crc kubenswrapper[4933]: I0122 07:49:24.606417 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-h4gwh" event={"ID":"fba3a485-7601-4024-a522-48d26533750b","Type":"ContainerStarted","Data":"3accaa63c0cced60b90304be4fff880cd5d9fc2f7886734fbb4c7fcc3b6d2bc8"} Jan 22 07:49:24 crc kubenswrapper[4933]: I0122 07:49:24.624213 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-openstack-openstack-cell1-h4gwh" podStartSLOduration=2.159409502 podStartE2EDuration="2.624181562s" podCreationTimestamp="2026-01-22 07:49:22 +0000 UTC" firstStartedPulling="2026-01-22 07:49:23.710420205 +0000 UTC m=+7411.547545558" lastFinishedPulling="2026-01-22 07:49:24.175192265 +0000 UTC m=+7412.012317618" observedRunningTime="2026-01-22 07:49:24.623614208 +0000 UTC m=+7412.460739561" watchObservedRunningTime="2026-01-22 07:49:24.624181562 +0000 UTC m=+7412.461306915" Jan 22 07:49:26 crc kubenswrapper[4933]: I0122 07:49:26.456633 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hc2pj"] Jan 22 07:49:26 crc kubenswrapper[4933]: I0122 07:49:26.457095 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hc2pj" podUID="9be31a6a-b0fb-42ec-b92c-acb2a0144d8b" containerName="registry-server" containerID="cri-o://5eb71ad959b6f2ed909a2096191536496bb840599b0a1797381d0fe954d074f2" gracePeriod=2 Jan 22 07:49:26 crc kubenswrapper[4933]: I0122 07:49:26.629753 4933 generic.go:334] "Generic (PLEG): container finished" podID="9be31a6a-b0fb-42ec-b92c-acb2a0144d8b" containerID="5eb71ad959b6f2ed909a2096191536496bb840599b0a1797381d0fe954d074f2" exitCode=0 Jan 22 07:49:26 crc kubenswrapper[4933]: I0122 07:49:26.629808 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hc2pj" event={"ID":"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b","Type":"ContainerDied","Data":"5eb71ad959b6f2ed909a2096191536496bb840599b0a1797381d0fe954d074f2"} Jan 22 07:49:27 crc kubenswrapper[4933]: I0122 07:49:27.003959 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hc2pj" Jan 22 07:49:27 crc kubenswrapper[4933]: I0122 07:49:27.115791 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9be31a6a-b0fb-42ec-b92c-acb2a0144d8b-catalog-content\") pod \"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b\" (UID: \"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b\") " Jan 22 07:49:27 crc kubenswrapper[4933]: I0122 07:49:27.115991 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9be31a6a-b0fb-42ec-b92c-acb2a0144d8b-utilities\") pod \"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b\" (UID: \"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b\") " Jan 22 07:49:27 crc kubenswrapper[4933]: I0122 07:49:27.116155 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pq9rm\" (UniqueName: \"kubernetes.io/projected/9be31a6a-b0fb-42ec-b92c-acb2a0144d8b-kube-api-access-pq9rm\") pod \"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b\" (UID: \"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b\") " Jan 22 07:49:27 crc kubenswrapper[4933]: I0122 07:49:27.117398 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9be31a6a-b0fb-42ec-b92c-acb2a0144d8b-utilities" (OuterVolumeSpecName: "utilities") pod "9be31a6a-b0fb-42ec-b92c-acb2a0144d8b" (UID: "9be31a6a-b0fb-42ec-b92c-acb2a0144d8b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:49:27 crc kubenswrapper[4933]: I0122 07:49:27.124054 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9be31a6a-b0fb-42ec-b92c-acb2a0144d8b-kube-api-access-pq9rm" (OuterVolumeSpecName: "kube-api-access-pq9rm") pod "9be31a6a-b0fb-42ec-b92c-acb2a0144d8b" (UID: "9be31a6a-b0fb-42ec-b92c-acb2a0144d8b"). InnerVolumeSpecName "kube-api-access-pq9rm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:49:27 crc kubenswrapper[4933]: I0122 07:49:27.178633 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9be31a6a-b0fb-42ec-b92c-acb2a0144d8b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9be31a6a-b0fb-42ec-b92c-acb2a0144d8b" (UID: "9be31a6a-b0fb-42ec-b92c-acb2a0144d8b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:49:27 crc kubenswrapper[4933]: I0122 07:49:27.218499 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pq9rm\" (UniqueName: \"kubernetes.io/projected/9be31a6a-b0fb-42ec-b92c-acb2a0144d8b-kube-api-access-pq9rm\") on node \"crc\" DevicePath \"\"" Jan 22 07:49:27 crc kubenswrapper[4933]: I0122 07:49:27.218533 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9be31a6a-b0fb-42ec-b92c-acb2a0144d8b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:49:27 crc kubenswrapper[4933]: I0122 07:49:27.218544 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9be31a6a-b0fb-42ec-b92c-acb2a0144d8b-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:49:27 crc kubenswrapper[4933]: I0122 07:49:27.643056 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hc2pj" event={"ID":"9be31a6a-b0fb-42ec-b92c-acb2a0144d8b","Type":"ContainerDied","Data":"40fa75121fc2ab89ed2228c50252b486e22242152a4dbb3db67f589581225e5e"} Jan 22 07:49:27 crc kubenswrapper[4933]: I0122 07:49:27.643126 4933 scope.go:117] "RemoveContainer" containerID="5eb71ad959b6f2ed909a2096191536496bb840599b0a1797381d0fe954d074f2" Jan 22 07:49:27 crc kubenswrapper[4933]: I0122 07:49:27.643283 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hc2pj" Jan 22 07:49:27 crc kubenswrapper[4933]: I0122 07:49:27.671173 4933 scope.go:117] "RemoveContainer" containerID="33b8f421a6cec9f892f446737ffe98a077b260c43dd2fcdf5a35e2722252a95c" Jan 22 07:49:27 crc kubenswrapper[4933]: I0122 07:49:27.684627 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hc2pj"] Jan 22 07:49:27 crc kubenswrapper[4933]: I0122 07:49:27.693159 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hc2pj"] Jan 22 07:49:27 crc kubenswrapper[4933]: I0122 07:49:27.707772 4933 scope.go:117] "RemoveContainer" containerID="ec2fbe040958413d88a67807bbb926dc0a369e8df7ba254316b4110b3dc59e74" Jan 22 07:49:28 crc kubenswrapper[4933]: I0122 07:49:28.513254 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9be31a6a-b0fb-42ec-b92c-acb2a0144d8b" path="/var/lib/kubelet/pods/9be31a6a-b0fb-42ec-b92c-acb2a0144d8b/volumes" Jan 22 07:49:30 crc kubenswrapper[4933]: I0122 07:49:30.076036 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4rhwq" Jan 22 07:49:30 crc kubenswrapper[4933]: I0122 07:49:30.131320 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4rhwq" Jan 22 07:49:31 crc kubenswrapper[4933]: I0122 07:49:30.668511 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4rhwq"] Jan 22 07:49:31 crc kubenswrapper[4933]: I0122 07:49:31.693896 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4rhwq" podUID="2cb5c247-c0c7-4974-9ec2-90bb418113a0" containerName="registry-server" containerID="cri-o://0f57ca22c1650a0af4990d993c063d53abe0df919f2ed63367356e5632d066f4" gracePeriod=2 Jan 22 07:49:32 crc kubenswrapper[4933]: I0122 07:49:32.243110 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-82r2d" Jan 22 07:49:32 crc kubenswrapper[4933]: I0122 07:49:32.705196 4933 generic.go:334] "Generic (PLEG): container finished" podID="2cb5c247-c0c7-4974-9ec2-90bb418113a0" containerID="0f57ca22c1650a0af4990d993c063d53abe0df919f2ed63367356e5632d066f4" exitCode=0 Jan 22 07:49:32 crc kubenswrapper[4933]: I0122 07:49:32.705319 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4rhwq" event={"ID":"2cb5c247-c0c7-4974-9ec2-90bb418113a0","Type":"ContainerDied","Data":"0f57ca22c1650a0af4990d993c063d53abe0df919f2ed63367356e5632d066f4"} Jan 22 07:49:33 crc kubenswrapper[4933]: I0122 07:49:33.054438 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4rhwq" Jan 22 07:49:33 crc kubenswrapper[4933]: I0122 07:49:33.158570 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cb5c247-c0c7-4974-9ec2-90bb418113a0-utilities\") pod \"2cb5c247-c0c7-4974-9ec2-90bb418113a0\" (UID: \"2cb5c247-c0c7-4974-9ec2-90bb418113a0\") " Jan 22 07:49:33 crc kubenswrapper[4933]: I0122 07:49:33.158709 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cb5c247-c0c7-4974-9ec2-90bb418113a0-catalog-content\") pod \"2cb5c247-c0c7-4974-9ec2-90bb418113a0\" (UID: \"2cb5c247-c0c7-4974-9ec2-90bb418113a0\") " Jan 22 07:49:33 crc kubenswrapper[4933]: I0122 07:49:33.158885 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vd6q2\" (UniqueName: \"kubernetes.io/projected/2cb5c247-c0c7-4974-9ec2-90bb418113a0-kube-api-access-vd6q2\") pod \"2cb5c247-c0c7-4974-9ec2-90bb418113a0\" (UID: \"2cb5c247-c0c7-4974-9ec2-90bb418113a0\") " Jan 22 07:49:33 crc kubenswrapper[4933]: I0122 07:49:33.159480 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2cb5c247-c0c7-4974-9ec2-90bb418113a0-utilities" (OuterVolumeSpecName: "utilities") pod "2cb5c247-c0c7-4974-9ec2-90bb418113a0" (UID: "2cb5c247-c0c7-4974-9ec2-90bb418113a0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:49:33 crc kubenswrapper[4933]: I0122 07:49:33.159722 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cb5c247-c0c7-4974-9ec2-90bb418113a0-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:49:33 crc kubenswrapper[4933]: I0122 07:49:33.167117 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2cb5c247-c0c7-4974-9ec2-90bb418113a0-kube-api-access-vd6q2" (OuterVolumeSpecName: "kube-api-access-vd6q2") pod "2cb5c247-c0c7-4974-9ec2-90bb418113a0" (UID: "2cb5c247-c0c7-4974-9ec2-90bb418113a0"). InnerVolumeSpecName "kube-api-access-vd6q2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:49:33 crc kubenswrapper[4933]: I0122 07:49:33.227433 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2cb5c247-c0c7-4974-9ec2-90bb418113a0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2cb5c247-c0c7-4974-9ec2-90bb418113a0" (UID: "2cb5c247-c0c7-4974-9ec2-90bb418113a0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:49:33 crc kubenswrapper[4933]: I0122 07:49:33.261580 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vd6q2\" (UniqueName: \"kubernetes.io/projected/2cb5c247-c0c7-4974-9ec2-90bb418113a0-kube-api-access-vd6q2\") on node \"crc\" DevicePath \"\"" Jan 22 07:49:33 crc kubenswrapper[4933]: I0122 07:49:33.261614 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cb5c247-c0c7-4974-9ec2-90bb418113a0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:49:33 crc kubenswrapper[4933]: I0122 07:49:33.715659 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4rhwq" event={"ID":"2cb5c247-c0c7-4974-9ec2-90bb418113a0","Type":"ContainerDied","Data":"6d879c9fa34a82fa1691a6b777c0ff451e0b8f088a71836a19a5c1c4b63c89f4"} Jan 22 07:49:33 crc kubenswrapper[4933]: I0122 07:49:33.715722 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4rhwq" Jan 22 07:49:33 crc kubenswrapper[4933]: I0122 07:49:33.715729 4933 scope.go:117] "RemoveContainer" containerID="0f57ca22c1650a0af4990d993c063d53abe0df919f2ed63367356e5632d066f4" Jan 22 07:49:33 crc kubenswrapper[4933]: I0122 07:49:33.765454 4933 scope.go:117] "RemoveContainer" containerID="d43a99b175ccdf552051be6db76be74d0c58d2a045ef9fa4c0951c55e8080a64" Jan 22 07:49:33 crc kubenswrapper[4933]: I0122 07:49:33.776610 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4rhwq"] Jan 22 07:49:33 crc kubenswrapper[4933]: I0122 07:49:33.790675 4933 scope.go:117] "RemoveContainer" containerID="2ae6f9c86313507a68f167b546739a8e29e3744febeab5df9a7ad6396c3697ec" Jan 22 07:49:33 crc kubenswrapper[4933]: I0122 07:49:33.790822 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4rhwq"] Jan 22 07:49:34 crc kubenswrapper[4933]: I0122 07:49:34.503993 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2cb5c247-c0c7-4974-9ec2-90bb418113a0" path="/var/lib/kubelet/pods/2cb5c247-c0c7-4974-9ec2-90bb418113a0/volumes" Jan 22 07:49:34 crc kubenswrapper[4933]: I0122 07:49:34.654699 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-82r2d"] Jan 22 07:49:34 crc kubenswrapper[4933]: I0122 07:49:34.655150 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-82r2d" podUID="a0e93e27-7e33-4723-8e63-8a0e4dbbab3a" containerName="registry-server" containerID="cri-o://277cc63826df8916c9d97d631e7b713de46fa585624ed8dc3fb63b91b3d69526" gracePeriod=2 Jan 22 07:49:35 crc kubenswrapper[4933]: I0122 07:49:35.738114 4933 generic.go:334] "Generic (PLEG): container finished" podID="a0e93e27-7e33-4723-8e63-8a0e4dbbab3a" containerID="277cc63826df8916c9d97d631e7b713de46fa585624ed8dc3fb63b91b3d69526" exitCode=0 Jan 22 07:49:35 crc kubenswrapper[4933]: I0122 07:49:35.738187 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-82r2d" event={"ID":"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a","Type":"ContainerDied","Data":"277cc63826df8916c9d97d631e7b713de46fa585624ed8dc3fb63b91b3d69526"} Jan 22 07:49:36 crc kubenswrapper[4933]: I0122 07:49:36.187601 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-82r2d" Jan 22 07:49:36 crc kubenswrapper[4933]: I0122 07:49:36.340835 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0e93e27-7e33-4723-8e63-8a0e4dbbab3a-utilities\") pod \"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a\" (UID: \"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a\") " Jan 22 07:49:36 crc kubenswrapper[4933]: I0122 07:49:36.340974 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0e93e27-7e33-4723-8e63-8a0e4dbbab3a-catalog-content\") pod \"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a\" (UID: \"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a\") " Jan 22 07:49:36 crc kubenswrapper[4933]: I0122 07:49:36.341221 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4gnn\" (UniqueName: \"kubernetes.io/projected/a0e93e27-7e33-4723-8e63-8a0e4dbbab3a-kube-api-access-h4gnn\") pod \"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a\" (UID: \"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a\") " Jan 22 07:49:36 crc kubenswrapper[4933]: I0122 07:49:36.341850 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0e93e27-7e33-4723-8e63-8a0e4dbbab3a-utilities" (OuterVolumeSpecName: "utilities") pod "a0e93e27-7e33-4723-8e63-8a0e4dbbab3a" (UID: "a0e93e27-7e33-4723-8e63-8a0e4dbbab3a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:49:36 crc kubenswrapper[4933]: I0122 07:49:36.349969 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0e93e27-7e33-4723-8e63-8a0e4dbbab3a-kube-api-access-h4gnn" (OuterVolumeSpecName: "kube-api-access-h4gnn") pod "a0e93e27-7e33-4723-8e63-8a0e4dbbab3a" (UID: "a0e93e27-7e33-4723-8e63-8a0e4dbbab3a"). InnerVolumeSpecName "kube-api-access-h4gnn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:49:36 crc kubenswrapper[4933]: I0122 07:49:36.367360 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0e93e27-7e33-4723-8e63-8a0e4dbbab3a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a0e93e27-7e33-4723-8e63-8a0e4dbbab3a" (UID: "a0e93e27-7e33-4723-8e63-8a0e4dbbab3a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:49:36 crc kubenswrapper[4933]: I0122 07:49:36.446273 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0e93e27-7e33-4723-8e63-8a0e4dbbab3a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:49:36 crc kubenswrapper[4933]: I0122 07:49:36.446349 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4gnn\" (UniqueName: \"kubernetes.io/projected/a0e93e27-7e33-4723-8e63-8a0e4dbbab3a-kube-api-access-h4gnn\") on node \"crc\" DevicePath \"\"" Jan 22 07:49:36 crc kubenswrapper[4933]: I0122 07:49:36.446368 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0e93e27-7e33-4723-8e63-8a0e4dbbab3a-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:49:36 crc kubenswrapper[4933]: I0122 07:49:36.751311 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-82r2d" event={"ID":"a0e93e27-7e33-4723-8e63-8a0e4dbbab3a","Type":"ContainerDied","Data":"88c9b60fe537db89e6bb406f3870aa61f0294983d464b2d8dcdb5ea60870f4d3"} Jan 22 07:49:36 crc kubenswrapper[4933]: I0122 07:49:36.751415 4933 scope.go:117] "RemoveContainer" containerID="277cc63826df8916c9d97d631e7b713de46fa585624ed8dc3fb63b91b3d69526" Jan 22 07:49:36 crc kubenswrapper[4933]: I0122 07:49:36.751503 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-82r2d" Jan 22 07:49:36 crc kubenswrapper[4933]: I0122 07:49:36.786995 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-82r2d"] Jan 22 07:49:36 crc kubenswrapper[4933]: I0122 07:49:36.791190 4933 scope.go:117] "RemoveContainer" containerID="6b9bc2567d4d6927e1c8271e7b7fe9b83cc342577588090883c8a86c1b5a67f5" Jan 22 07:49:36 crc kubenswrapper[4933]: I0122 07:49:36.798359 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-82r2d"] Jan 22 07:49:36 crc kubenswrapper[4933]: I0122 07:49:36.813106 4933 scope.go:117] "RemoveContainer" containerID="b4833259c5672a7374f00d6461f7b1508a16a04d2a8494cd134586fc9a03a211" Jan 22 07:49:38 crc kubenswrapper[4933]: I0122 07:49:38.503405 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0e93e27-7e33-4723-8e63-8a0e4dbbab3a" path="/var/lib/kubelet/pods/a0e93e27-7e33-4723-8e63-8a0e4dbbab3a/volumes" Jan 22 07:49:40 crc kubenswrapper[4933]: I0122 07:49:40.943923 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:49:40 crc kubenswrapper[4933]: I0122 07:49:40.944848 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:50:10 crc kubenswrapper[4933]: I0122 07:50:10.943150 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:50:10 crc kubenswrapper[4933]: I0122 07:50:10.943621 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:50:10 crc kubenswrapper[4933]: I0122 07:50:10.943670 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 07:50:10 crc kubenswrapper[4933]: I0122 07:50:10.944603 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 07:50:10 crc kubenswrapper[4933]: I0122 07:50:10.944676 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" gracePeriod=600 Jan 22 07:50:11 crc kubenswrapper[4933]: E0122 07:50:11.108920 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:50:11 crc kubenswrapper[4933]: I0122 07:50:11.124849 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" exitCode=0 Jan 22 07:50:11 crc kubenswrapper[4933]: I0122 07:50:11.124899 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4"} Jan 22 07:50:11 crc kubenswrapper[4933]: I0122 07:50:11.124936 4933 scope.go:117] "RemoveContainer" containerID="00cefbf7cdddf57aeeac4ad5923b7d70d1f5988480bcc20b468032624e763855" Jan 22 07:50:11 crc kubenswrapper[4933]: I0122 07:50:11.125593 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:50:11 crc kubenswrapper[4933]: E0122 07:50:11.125900 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:50:26 crc kubenswrapper[4933]: I0122 07:50:26.490885 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:50:26 crc kubenswrapper[4933]: E0122 07:50:26.491555 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:50:37 crc kubenswrapper[4933]: I0122 07:50:37.490634 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:50:37 crc kubenswrapper[4933]: E0122 07:50:37.491571 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:50:52 crc kubenswrapper[4933]: I0122 07:50:52.501380 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:50:52 crc kubenswrapper[4933]: E0122 07:50:52.509934 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:50:53 crc kubenswrapper[4933]: I0122 07:50:53.549334 4933 generic.go:334] "Generic (PLEG): container finished" podID="fba3a485-7601-4024-a522-48d26533750b" containerID="cc5a3101ab2d6ac5b1e24a10b4c9db342d9c22c2afbb6cdf61383154ff09fdf9" exitCode=0 Jan 22 07:50:53 crc kubenswrapper[4933]: I0122 07:50:53.549383 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-h4gwh" event={"ID":"fba3a485-7601-4024-a522-48d26533750b","Type":"ContainerDied","Data":"cc5a3101ab2d6ac5b1e24a10b4c9db342d9c22c2afbb6cdf61383154ff09fdf9"} Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.027714 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-h4gwh" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.100986 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fba3a485-7601-4024-a522-48d26533750b-inventory\") pod \"fba3a485-7601-4024-a522-48d26533750b\" (UID: \"fba3a485-7601-4024-a522-48d26533750b\") " Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.101150 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/fba3a485-7601-4024-a522-48d26533750b-ssh-key-openstack-cell1\") pod \"fba3a485-7601-4024-a522-48d26533750b\" (UID: \"fba3a485-7601-4024-a522-48d26533750b\") " Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.101284 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2kq4v\" (UniqueName: \"kubernetes.io/projected/fba3a485-7601-4024-a522-48d26533750b-kube-api-access-2kq4v\") pod \"fba3a485-7601-4024-a522-48d26533750b\" (UID: \"fba3a485-7601-4024-a522-48d26533750b\") " Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.110452 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fba3a485-7601-4024-a522-48d26533750b-kube-api-access-2kq4v" (OuterVolumeSpecName: "kube-api-access-2kq4v") pod "fba3a485-7601-4024-a522-48d26533750b" (UID: "fba3a485-7601-4024-a522-48d26533750b"). InnerVolumeSpecName "kube-api-access-2kq4v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.136167 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fba3a485-7601-4024-a522-48d26533750b-inventory" (OuterVolumeSpecName: "inventory") pod "fba3a485-7601-4024-a522-48d26533750b" (UID: "fba3a485-7601-4024-a522-48d26533750b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.151404 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fba3a485-7601-4024-a522-48d26533750b-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "fba3a485-7601-4024-a522-48d26533750b" (UID: "fba3a485-7601-4024-a522-48d26533750b"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.205166 4933 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/fba3a485-7601-4024-a522-48d26533750b-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.205532 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2kq4v\" (UniqueName: \"kubernetes.io/projected/fba3a485-7601-4024-a522-48d26533750b-kube-api-access-2kq4v\") on node \"crc\" DevicePath \"\"" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.205634 4933 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fba3a485-7601-4024-a522-48d26533750b-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.571913 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-h4gwh" event={"ID":"fba3a485-7601-4024-a522-48d26533750b","Type":"ContainerDied","Data":"3accaa63c0cced60b90304be4fff880cd5d9fc2f7886734fbb4c7fcc3b6d2bc8"} Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.572315 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3accaa63c0cced60b90304be4fff880cd5d9fc2f7886734fbb4c7fcc3b6d2bc8" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.572031 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-h4gwh" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.662911 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-l7b2n"] Jan 22 07:50:55 crc kubenswrapper[4933]: E0122 07:50:55.663501 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9be31a6a-b0fb-42ec-b92c-acb2a0144d8b" containerName="extract-content" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.663526 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9be31a6a-b0fb-42ec-b92c-acb2a0144d8b" containerName="extract-content" Jan 22 07:50:55 crc kubenswrapper[4933]: E0122 07:50:55.663544 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9be31a6a-b0fb-42ec-b92c-acb2a0144d8b" containerName="registry-server" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.663552 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9be31a6a-b0fb-42ec-b92c-acb2a0144d8b" containerName="registry-server" Jan 22 07:50:55 crc kubenswrapper[4933]: E0122 07:50:55.663587 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0e93e27-7e33-4723-8e63-8a0e4dbbab3a" containerName="extract-utilities" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.663597 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0e93e27-7e33-4723-8e63-8a0e4dbbab3a" containerName="extract-utilities" Jan 22 07:50:55 crc kubenswrapper[4933]: E0122 07:50:55.663614 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cb5c247-c0c7-4974-9ec2-90bb418113a0" containerName="registry-server" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.663622 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cb5c247-c0c7-4974-9ec2-90bb418113a0" containerName="registry-server" Jan 22 07:50:55 crc kubenswrapper[4933]: E0122 07:50:55.663638 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fba3a485-7601-4024-a522-48d26533750b" containerName="download-cache-openstack-openstack-cell1" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.663647 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="fba3a485-7601-4024-a522-48d26533750b" containerName="download-cache-openstack-openstack-cell1" Jan 22 07:50:55 crc kubenswrapper[4933]: E0122 07:50:55.663673 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cb5c247-c0c7-4974-9ec2-90bb418113a0" containerName="extract-utilities" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.663682 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cb5c247-c0c7-4974-9ec2-90bb418113a0" containerName="extract-utilities" Jan 22 07:50:55 crc kubenswrapper[4933]: E0122 07:50:55.663700 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9be31a6a-b0fb-42ec-b92c-acb2a0144d8b" containerName="extract-utilities" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.663708 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9be31a6a-b0fb-42ec-b92c-acb2a0144d8b" containerName="extract-utilities" Jan 22 07:50:55 crc kubenswrapper[4933]: E0122 07:50:55.663723 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0e93e27-7e33-4723-8e63-8a0e4dbbab3a" containerName="extract-content" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.663731 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0e93e27-7e33-4723-8e63-8a0e4dbbab3a" containerName="extract-content" Jan 22 07:50:55 crc kubenswrapper[4933]: E0122 07:50:55.663752 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0e93e27-7e33-4723-8e63-8a0e4dbbab3a" containerName="registry-server" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.663760 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0e93e27-7e33-4723-8e63-8a0e4dbbab3a" containerName="registry-server" Jan 22 07:50:55 crc kubenswrapper[4933]: E0122 07:50:55.663776 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cb5c247-c0c7-4974-9ec2-90bb418113a0" containerName="extract-content" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.663784 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cb5c247-c0c7-4974-9ec2-90bb418113a0" containerName="extract-content" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.664026 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="fba3a485-7601-4024-a522-48d26533750b" containerName="download-cache-openstack-openstack-cell1" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.664051 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0e93e27-7e33-4723-8e63-8a0e4dbbab3a" containerName="registry-server" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.664071 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cb5c247-c0c7-4974-9ec2-90bb418113a0" containerName="registry-server" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.664115 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="9be31a6a-b0fb-42ec-b92c-acb2a0144d8b" containerName="registry-server" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.665023 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-l7b2n" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.668640 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.668733 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-d9x6d" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.671664 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.672498 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.677699 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-l7b2n"] Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.715480 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/a0667ff9-d0c8-48ae-8389-74bc383cd66b-ssh-key-openstack-cell1\") pod \"configure-network-openstack-openstack-cell1-l7b2n\" (UID: \"a0667ff9-d0c8-48ae-8389-74bc383cd66b\") " pod="openstack/configure-network-openstack-openstack-cell1-l7b2n" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.715823 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0667ff9-d0c8-48ae-8389-74bc383cd66b-inventory\") pod \"configure-network-openstack-openstack-cell1-l7b2n\" (UID: \"a0667ff9-d0c8-48ae-8389-74bc383cd66b\") " pod="openstack/configure-network-openstack-openstack-cell1-l7b2n" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.715989 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4j9l\" (UniqueName: \"kubernetes.io/projected/a0667ff9-d0c8-48ae-8389-74bc383cd66b-kube-api-access-r4j9l\") pod \"configure-network-openstack-openstack-cell1-l7b2n\" (UID: \"a0667ff9-d0c8-48ae-8389-74bc383cd66b\") " pod="openstack/configure-network-openstack-openstack-cell1-l7b2n" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.817834 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/a0667ff9-d0c8-48ae-8389-74bc383cd66b-ssh-key-openstack-cell1\") pod \"configure-network-openstack-openstack-cell1-l7b2n\" (UID: \"a0667ff9-d0c8-48ae-8389-74bc383cd66b\") " pod="openstack/configure-network-openstack-openstack-cell1-l7b2n" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.817993 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0667ff9-d0c8-48ae-8389-74bc383cd66b-inventory\") pod \"configure-network-openstack-openstack-cell1-l7b2n\" (UID: \"a0667ff9-d0c8-48ae-8389-74bc383cd66b\") " pod="openstack/configure-network-openstack-openstack-cell1-l7b2n" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.818228 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4j9l\" (UniqueName: \"kubernetes.io/projected/a0667ff9-d0c8-48ae-8389-74bc383cd66b-kube-api-access-r4j9l\") pod \"configure-network-openstack-openstack-cell1-l7b2n\" (UID: \"a0667ff9-d0c8-48ae-8389-74bc383cd66b\") " pod="openstack/configure-network-openstack-openstack-cell1-l7b2n" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.828723 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0667ff9-d0c8-48ae-8389-74bc383cd66b-inventory\") pod \"configure-network-openstack-openstack-cell1-l7b2n\" (UID: \"a0667ff9-d0c8-48ae-8389-74bc383cd66b\") " pod="openstack/configure-network-openstack-openstack-cell1-l7b2n" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.833861 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/a0667ff9-d0c8-48ae-8389-74bc383cd66b-ssh-key-openstack-cell1\") pod \"configure-network-openstack-openstack-cell1-l7b2n\" (UID: \"a0667ff9-d0c8-48ae-8389-74bc383cd66b\") " pod="openstack/configure-network-openstack-openstack-cell1-l7b2n" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.839641 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4j9l\" (UniqueName: \"kubernetes.io/projected/a0667ff9-d0c8-48ae-8389-74bc383cd66b-kube-api-access-r4j9l\") pod \"configure-network-openstack-openstack-cell1-l7b2n\" (UID: \"a0667ff9-d0c8-48ae-8389-74bc383cd66b\") " pod="openstack/configure-network-openstack-openstack-cell1-l7b2n" Jan 22 07:50:55 crc kubenswrapper[4933]: I0122 07:50:55.989041 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-l7b2n" Jan 22 07:50:56 crc kubenswrapper[4933]: I0122 07:50:56.606831 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-l7b2n"] Jan 22 07:50:57 crc kubenswrapper[4933]: I0122 07:50:57.590724 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-l7b2n" event={"ID":"a0667ff9-d0c8-48ae-8389-74bc383cd66b","Type":"ContainerStarted","Data":"062c5eff3465689f441550f9f85433c044c1bbac36ee7a60729f3e75c01622a2"} Jan 22 07:50:57 crc kubenswrapper[4933]: I0122 07:50:57.591221 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-l7b2n" event={"ID":"a0667ff9-d0c8-48ae-8389-74bc383cd66b","Type":"ContainerStarted","Data":"7c681663b7a73a57b1371230cb7e9641c77b75de403c010f696e5b76ec6086a3"} Jan 22 07:50:57 crc kubenswrapper[4933]: I0122 07:50:57.608963 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-openstack-openstack-cell1-l7b2n" podStartSLOduration=2.163938337 podStartE2EDuration="2.608943487s" podCreationTimestamp="2026-01-22 07:50:55 +0000 UTC" firstStartedPulling="2026-01-22 07:50:56.616045795 +0000 UTC m=+7504.453171148" lastFinishedPulling="2026-01-22 07:50:57.061050945 +0000 UTC m=+7504.898176298" observedRunningTime="2026-01-22 07:50:57.607164733 +0000 UTC m=+7505.444290106" watchObservedRunningTime="2026-01-22 07:50:57.608943487 +0000 UTC m=+7505.446068840" Jan 22 07:51:05 crc kubenswrapper[4933]: I0122 07:51:05.491791 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:51:05 crc kubenswrapper[4933]: E0122 07:51:05.492783 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:51:18 crc kubenswrapper[4933]: I0122 07:51:18.491035 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:51:18 crc kubenswrapper[4933]: E0122 07:51:18.491834 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:51:30 crc kubenswrapper[4933]: I0122 07:51:30.492103 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:51:30 crc kubenswrapper[4933]: E0122 07:51:30.492825 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:51:45 crc kubenswrapper[4933]: I0122 07:51:45.491889 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:51:45 crc kubenswrapper[4933]: E0122 07:51:45.493261 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:51:58 crc kubenswrapper[4933]: I0122 07:51:58.491584 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:51:58 crc kubenswrapper[4933]: E0122 07:51:58.492693 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:52:12 crc kubenswrapper[4933]: I0122 07:52:12.498235 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:52:12 crc kubenswrapper[4933]: E0122 07:52:12.498928 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:52:17 crc kubenswrapper[4933]: I0122 07:52:17.360420 4933 generic.go:334] "Generic (PLEG): container finished" podID="a0667ff9-d0c8-48ae-8389-74bc383cd66b" containerID="062c5eff3465689f441550f9f85433c044c1bbac36ee7a60729f3e75c01622a2" exitCode=0 Jan 22 07:52:17 crc kubenswrapper[4933]: I0122 07:52:17.360507 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-l7b2n" event={"ID":"a0667ff9-d0c8-48ae-8389-74bc383cd66b","Type":"ContainerDied","Data":"062c5eff3465689f441550f9f85433c044c1bbac36ee7a60729f3e75c01622a2"} Jan 22 07:52:18 crc kubenswrapper[4933]: I0122 07:52:18.970922 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-l7b2n" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.021587 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0667ff9-d0c8-48ae-8389-74bc383cd66b-inventory\") pod \"a0667ff9-d0c8-48ae-8389-74bc383cd66b\" (UID: \"a0667ff9-d0c8-48ae-8389-74bc383cd66b\") " Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.021695 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/a0667ff9-d0c8-48ae-8389-74bc383cd66b-ssh-key-openstack-cell1\") pod \"a0667ff9-d0c8-48ae-8389-74bc383cd66b\" (UID: \"a0667ff9-d0c8-48ae-8389-74bc383cd66b\") " Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.021869 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r4j9l\" (UniqueName: \"kubernetes.io/projected/a0667ff9-d0c8-48ae-8389-74bc383cd66b-kube-api-access-r4j9l\") pod \"a0667ff9-d0c8-48ae-8389-74bc383cd66b\" (UID: \"a0667ff9-d0c8-48ae-8389-74bc383cd66b\") " Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.041125 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0667ff9-d0c8-48ae-8389-74bc383cd66b-kube-api-access-r4j9l" (OuterVolumeSpecName: "kube-api-access-r4j9l") pod "a0667ff9-d0c8-48ae-8389-74bc383cd66b" (UID: "a0667ff9-d0c8-48ae-8389-74bc383cd66b"). InnerVolumeSpecName "kube-api-access-r4j9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.052243 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0667ff9-d0c8-48ae-8389-74bc383cd66b-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "a0667ff9-d0c8-48ae-8389-74bc383cd66b" (UID: "a0667ff9-d0c8-48ae-8389-74bc383cd66b"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.058276 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0667ff9-d0c8-48ae-8389-74bc383cd66b-inventory" (OuterVolumeSpecName: "inventory") pod "a0667ff9-d0c8-48ae-8389-74bc383cd66b" (UID: "a0667ff9-d0c8-48ae-8389-74bc383cd66b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.123964 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r4j9l\" (UniqueName: \"kubernetes.io/projected/a0667ff9-d0c8-48ae-8389-74bc383cd66b-kube-api-access-r4j9l\") on node \"crc\" DevicePath \"\"" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.124008 4933 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0667ff9-d0c8-48ae-8389-74bc383cd66b-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.124019 4933 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/a0667ff9-d0c8-48ae-8389-74bc383cd66b-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.378143 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-l7b2n" event={"ID":"a0667ff9-d0c8-48ae-8389-74bc383cd66b","Type":"ContainerDied","Data":"7c681663b7a73a57b1371230cb7e9641c77b75de403c010f696e5b76ec6086a3"} Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.378561 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c681663b7a73a57b1371230cb7e9641c77b75de403c010f696e5b76ec6086a3" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.378190 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-l7b2n" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.472983 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-ht5r9"] Jan 22 07:52:19 crc kubenswrapper[4933]: E0122 07:52:19.473606 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0667ff9-d0c8-48ae-8389-74bc383cd66b" containerName="configure-network-openstack-openstack-cell1" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.473623 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0667ff9-d0c8-48ae-8389-74bc383cd66b" containerName="configure-network-openstack-openstack-cell1" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.473860 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0667ff9-d0c8-48ae-8389-74bc383cd66b" containerName="configure-network-openstack-openstack-cell1" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.474610 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-ht5r9" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.478417 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.479106 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.479305 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.479384 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-d9x6d" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.482689 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-ht5r9"] Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.531768 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4-ssh-key-openstack-cell1\") pod \"validate-network-openstack-openstack-cell1-ht5r9\" (UID: \"f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4\") " pod="openstack/validate-network-openstack-openstack-cell1-ht5r9" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.532025 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7md8\" (UniqueName: \"kubernetes.io/projected/f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4-kube-api-access-p7md8\") pod \"validate-network-openstack-openstack-cell1-ht5r9\" (UID: \"f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4\") " pod="openstack/validate-network-openstack-openstack-cell1-ht5r9" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.532370 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4-inventory\") pod \"validate-network-openstack-openstack-cell1-ht5r9\" (UID: \"f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4\") " pod="openstack/validate-network-openstack-openstack-cell1-ht5r9" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.634627 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4-inventory\") pod \"validate-network-openstack-openstack-cell1-ht5r9\" (UID: \"f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4\") " pod="openstack/validate-network-openstack-openstack-cell1-ht5r9" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.634815 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4-ssh-key-openstack-cell1\") pod \"validate-network-openstack-openstack-cell1-ht5r9\" (UID: \"f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4\") " pod="openstack/validate-network-openstack-openstack-cell1-ht5r9" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.634840 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7md8\" (UniqueName: \"kubernetes.io/projected/f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4-kube-api-access-p7md8\") pod \"validate-network-openstack-openstack-cell1-ht5r9\" (UID: \"f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4\") " pod="openstack/validate-network-openstack-openstack-cell1-ht5r9" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.641316 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4-ssh-key-openstack-cell1\") pod \"validate-network-openstack-openstack-cell1-ht5r9\" (UID: \"f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4\") " pod="openstack/validate-network-openstack-openstack-cell1-ht5r9" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.643665 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4-inventory\") pod \"validate-network-openstack-openstack-cell1-ht5r9\" (UID: \"f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4\") " pod="openstack/validate-network-openstack-openstack-cell1-ht5r9" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.664464 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7md8\" (UniqueName: \"kubernetes.io/projected/f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4-kube-api-access-p7md8\") pod \"validate-network-openstack-openstack-cell1-ht5r9\" (UID: \"f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4\") " pod="openstack/validate-network-openstack-openstack-cell1-ht5r9" Jan 22 07:52:19 crc kubenswrapper[4933]: I0122 07:52:19.803714 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-ht5r9" Jan 22 07:52:20 crc kubenswrapper[4933]: I0122 07:52:20.312769 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-ht5r9"] Jan 22 07:52:20 crc kubenswrapper[4933]: I0122 07:52:20.389478 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-ht5r9" event={"ID":"f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4","Type":"ContainerStarted","Data":"26159d96b7c710cb7c2004cb25065eda9a44213a52361d10abc160adf853ee39"} Jan 22 07:52:21 crc kubenswrapper[4933]: I0122 07:52:21.401486 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-ht5r9" event={"ID":"f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4","Type":"ContainerStarted","Data":"92a7157db3bec9399e89a51ab8b125cdb6b82cf4044e2196bc21aaf49d336ec9"} Jan 22 07:52:21 crc kubenswrapper[4933]: I0122 07:52:21.420646 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-openstack-openstack-cell1-ht5r9" podStartSLOduration=1.7125865139999998 podStartE2EDuration="2.42063138s" podCreationTimestamp="2026-01-22 07:52:19 +0000 UTC" firstStartedPulling="2026-01-22 07:52:20.317622551 +0000 UTC m=+7588.154747904" lastFinishedPulling="2026-01-22 07:52:21.025667407 +0000 UTC m=+7588.862792770" observedRunningTime="2026-01-22 07:52:21.417203637 +0000 UTC m=+7589.254329020" watchObservedRunningTime="2026-01-22 07:52:21.42063138 +0000 UTC m=+7589.257756733" Jan 22 07:52:26 crc kubenswrapper[4933]: I0122 07:52:26.457180 4933 generic.go:334] "Generic (PLEG): container finished" podID="f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4" containerID="92a7157db3bec9399e89a51ab8b125cdb6b82cf4044e2196bc21aaf49d336ec9" exitCode=0 Jan 22 07:52:26 crc kubenswrapper[4933]: I0122 07:52:26.457304 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-ht5r9" event={"ID":"f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4","Type":"ContainerDied","Data":"92a7157db3bec9399e89a51ab8b125cdb6b82cf4044e2196bc21aaf49d336ec9"} Jan 22 07:52:26 crc kubenswrapper[4933]: I0122 07:52:26.491313 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:52:26 crc kubenswrapper[4933]: E0122 07:52:26.491551 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.001744 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-ht5r9" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.119338 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p7md8\" (UniqueName: \"kubernetes.io/projected/f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4-kube-api-access-p7md8\") pod \"f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4\" (UID: \"f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4\") " Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.119510 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4-inventory\") pod \"f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4\" (UID: \"f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4\") " Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.119595 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4-ssh-key-openstack-cell1\") pod \"f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4\" (UID: \"f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4\") " Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.127363 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4-kube-api-access-p7md8" (OuterVolumeSpecName: "kube-api-access-p7md8") pod "f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4" (UID: "f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4"). InnerVolumeSpecName "kube-api-access-p7md8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.150392 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4" (UID: "f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.157844 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4-inventory" (OuterVolumeSpecName: "inventory") pod "f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4" (UID: "f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.223506 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p7md8\" (UniqueName: \"kubernetes.io/projected/f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4-kube-api-access-p7md8\") on node \"crc\" DevicePath \"\"" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.223559 4933 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.223573 4933 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.475850 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-ht5r9" event={"ID":"f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4","Type":"ContainerDied","Data":"26159d96b7c710cb7c2004cb25065eda9a44213a52361d10abc160adf853ee39"} Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.475902 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="26159d96b7c710cb7c2004cb25065eda9a44213a52361d10abc160adf853ee39" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.475924 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-ht5r9" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.553333 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-openstack-openstack-cell1-6b67h"] Jan 22 07:52:28 crc kubenswrapper[4933]: E0122 07:52:28.553994 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4" containerName="validate-network-openstack-openstack-cell1" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.554010 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4" containerName="validate-network-openstack-openstack-cell1" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.554277 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4" containerName="validate-network-openstack-openstack-cell1" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.555614 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-6b67h" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.557422 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.558355 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-d9x6d" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.558361 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.558895 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.570211 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-6b67h"] Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.631920 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmrck\" (UniqueName: \"kubernetes.io/projected/ea30a0b1-7265-4e11-b38d-36fa9c2e08e3-kube-api-access-rmrck\") pod \"install-os-openstack-openstack-cell1-6b67h\" (UID: \"ea30a0b1-7265-4e11-b38d-36fa9c2e08e3\") " pod="openstack/install-os-openstack-openstack-cell1-6b67h" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.632030 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ea30a0b1-7265-4e11-b38d-36fa9c2e08e3-inventory\") pod \"install-os-openstack-openstack-cell1-6b67h\" (UID: \"ea30a0b1-7265-4e11-b38d-36fa9c2e08e3\") " pod="openstack/install-os-openstack-openstack-cell1-6b67h" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.632421 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/ea30a0b1-7265-4e11-b38d-36fa9c2e08e3-ssh-key-openstack-cell1\") pod \"install-os-openstack-openstack-cell1-6b67h\" (UID: \"ea30a0b1-7265-4e11-b38d-36fa9c2e08e3\") " pod="openstack/install-os-openstack-openstack-cell1-6b67h" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.734784 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/ea30a0b1-7265-4e11-b38d-36fa9c2e08e3-ssh-key-openstack-cell1\") pod \"install-os-openstack-openstack-cell1-6b67h\" (UID: \"ea30a0b1-7265-4e11-b38d-36fa9c2e08e3\") " pod="openstack/install-os-openstack-openstack-cell1-6b67h" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.734921 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmrck\" (UniqueName: \"kubernetes.io/projected/ea30a0b1-7265-4e11-b38d-36fa9c2e08e3-kube-api-access-rmrck\") pod \"install-os-openstack-openstack-cell1-6b67h\" (UID: \"ea30a0b1-7265-4e11-b38d-36fa9c2e08e3\") " pod="openstack/install-os-openstack-openstack-cell1-6b67h" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.734961 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ea30a0b1-7265-4e11-b38d-36fa9c2e08e3-inventory\") pod \"install-os-openstack-openstack-cell1-6b67h\" (UID: \"ea30a0b1-7265-4e11-b38d-36fa9c2e08e3\") " pod="openstack/install-os-openstack-openstack-cell1-6b67h" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.738882 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ea30a0b1-7265-4e11-b38d-36fa9c2e08e3-inventory\") pod \"install-os-openstack-openstack-cell1-6b67h\" (UID: \"ea30a0b1-7265-4e11-b38d-36fa9c2e08e3\") " pod="openstack/install-os-openstack-openstack-cell1-6b67h" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.739357 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/ea30a0b1-7265-4e11-b38d-36fa9c2e08e3-ssh-key-openstack-cell1\") pod \"install-os-openstack-openstack-cell1-6b67h\" (UID: \"ea30a0b1-7265-4e11-b38d-36fa9c2e08e3\") " pod="openstack/install-os-openstack-openstack-cell1-6b67h" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.751326 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmrck\" (UniqueName: \"kubernetes.io/projected/ea30a0b1-7265-4e11-b38d-36fa9c2e08e3-kube-api-access-rmrck\") pod \"install-os-openstack-openstack-cell1-6b67h\" (UID: \"ea30a0b1-7265-4e11-b38d-36fa9c2e08e3\") " pod="openstack/install-os-openstack-openstack-cell1-6b67h" Jan 22 07:52:28 crc kubenswrapper[4933]: I0122 07:52:28.882224 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-6b67h" Jan 22 07:52:29 crc kubenswrapper[4933]: I0122 07:52:29.443214 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-6b67h"] Jan 22 07:52:29 crc kubenswrapper[4933]: I0122 07:52:29.486547 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-6b67h" event={"ID":"ea30a0b1-7265-4e11-b38d-36fa9c2e08e3","Type":"ContainerStarted","Data":"b69ba3d3e33b9ac3b9a3d2fd35b28d24bc9b6b833a432f9d7b655fc9a55ac6ab"} Jan 22 07:52:30 crc kubenswrapper[4933]: I0122 07:52:30.506478 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-6b67h" event={"ID":"ea30a0b1-7265-4e11-b38d-36fa9c2e08e3","Type":"ContainerStarted","Data":"891a6adab40e871584949134ad91db7b5608314b966b06b91e7b828c1df225fa"} Jan 22 07:52:30 crc kubenswrapper[4933]: I0122 07:52:30.519629 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-openstack-openstack-cell1-6b67h" podStartSLOduration=2.121821498 podStartE2EDuration="2.51960449s" podCreationTimestamp="2026-01-22 07:52:28 +0000 UTC" firstStartedPulling="2026-01-22 07:52:29.439877308 +0000 UTC m=+7597.277002661" lastFinishedPulling="2026-01-22 07:52:29.8376603 +0000 UTC m=+7597.674785653" observedRunningTime="2026-01-22 07:52:30.519409396 +0000 UTC m=+7598.356534759" watchObservedRunningTime="2026-01-22 07:52:30.51960449 +0000 UTC m=+7598.356729843" Jan 22 07:52:37 crc kubenswrapper[4933]: I0122 07:52:37.491271 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:52:37 crc kubenswrapper[4933]: E0122 07:52:37.492377 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:52:52 crc kubenswrapper[4933]: I0122 07:52:52.510167 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:52:52 crc kubenswrapper[4933]: E0122 07:52:52.511411 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:53:06 crc kubenswrapper[4933]: I0122 07:53:06.492727 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:53:06 crc kubenswrapper[4933]: E0122 07:53:06.493804 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:53:14 crc kubenswrapper[4933]: I0122 07:53:14.955422 4933 generic.go:334] "Generic (PLEG): container finished" podID="ea30a0b1-7265-4e11-b38d-36fa9c2e08e3" containerID="891a6adab40e871584949134ad91db7b5608314b966b06b91e7b828c1df225fa" exitCode=0 Jan 22 07:53:14 crc kubenswrapper[4933]: I0122 07:53:14.955487 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-6b67h" event={"ID":"ea30a0b1-7265-4e11-b38d-36fa9c2e08e3","Type":"ContainerDied","Data":"891a6adab40e871584949134ad91db7b5608314b966b06b91e7b828c1df225fa"} Jan 22 07:53:16 crc kubenswrapper[4933]: I0122 07:53:16.513733 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-6b67h" Jan 22 07:53:16 crc kubenswrapper[4933]: I0122 07:53:16.534193 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/ea30a0b1-7265-4e11-b38d-36fa9c2e08e3-ssh-key-openstack-cell1\") pod \"ea30a0b1-7265-4e11-b38d-36fa9c2e08e3\" (UID: \"ea30a0b1-7265-4e11-b38d-36fa9c2e08e3\") " Jan 22 07:53:16 crc kubenswrapper[4933]: I0122 07:53:16.534307 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rmrck\" (UniqueName: \"kubernetes.io/projected/ea30a0b1-7265-4e11-b38d-36fa9c2e08e3-kube-api-access-rmrck\") pod \"ea30a0b1-7265-4e11-b38d-36fa9c2e08e3\" (UID: \"ea30a0b1-7265-4e11-b38d-36fa9c2e08e3\") " Jan 22 07:53:16 crc kubenswrapper[4933]: I0122 07:53:16.534454 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ea30a0b1-7265-4e11-b38d-36fa9c2e08e3-inventory\") pod \"ea30a0b1-7265-4e11-b38d-36fa9c2e08e3\" (UID: \"ea30a0b1-7265-4e11-b38d-36fa9c2e08e3\") " Jan 22 07:53:16 crc kubenswrapper[4933]: I0122 07:53:16.541834 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea30a0b1-7265-4e11-b38d-36fa9c2e08e3-kube-api-access-rmrck" (OuterVolumeSpecName: "kube-api-access-rmrck") pod "ea30a0b1-7265-4e11-b38d-36fa9c2e08e3" (UID: "ea30a0b1-7265-4e11-b38d-36fa9c2e08e3"). InnerVolumeSpecName "kube-api-access-rmrck". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:53:16 crc kubenswrapper[4933]: I0122 07:53:16.576898 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea30a0b1-7265-4e11-b38d-36fa9c2e08e3-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "ea30a0b1-7265-4e11-b38d-36fa9c2e08e3" (UID: "ea30a0b1-7265-4e11-b38d-36fa9c2e08e3"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:53:16 crc kubenswrapper[4933]: I0122 07:53:16.578236 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea30a0b1-7265-4e11-b38d-36fa9c2e08e3-inventory" (OuterVolumeSpecName: "inventory") pod "ea30a0b1-7265-4e11-b38d-36fa9c2e08e3" (UID: "ea30a0b1-7265-4e11-b38d-36fa9c2e08e3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:53:16 crc kubenswrapper[4933]: I0122 07:53:16.636950 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rmrck\" (UniqueName: \"kubernetes.io/projected/ea30a0b1-7265-4e11-b38d-36fa9c2e08e3-kube-api-access-rmrck\") on node \"crc\" DevicePath \"\"" Jan 22 07:53:16 crc kubenswrapper[4933]: I0122 07:53:16.636994 4933 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ea30a0b1-7265-4e11-b38d-36fa9c2e08e3-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 07:53:16 crc kubenswrapper[4933]: I0122 07:53:16.637006 4933 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/ea30a0b1-7265-4e11-b38d-36fa9c2e08e3-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 22 07:53:16 crc kubenswrapper[4933]: I0122 07:53:16.974712 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-6b67h" event={"ID":"ea30a0b1-7265-4e11-b38d-36fa9c2e08e3","Type":"ContainerDied","Data":"b69ba3d3e33b9ac3b9a3d2fd35b28d24bc9b6b833a432f9d7b655fc9a55ac6ab"} Jan 22 07:53:16 crc kubenswrapper[4933]: I0122 07:53:16.974760 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b69ba3d3e33b9ac3b9a3d2fd35b28d24bc9b6b833a432f9d7b655fc9a55ac6ab" Jan 22 07:53:16 crc kubenswrapper[4933]: I0122 07:53:16.974770 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-6b67h" Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.091478 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-jrjd6"] Jan 22 07:53:17 crc kubenswrapper[4933]: E0122 07:53:17.092593 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea30a0b1-7265-4e11-b38d-36fa9c2e08e3" containerName="install-os-openstack-openstack-cell1" Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.092701 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea30a0b1-7265-4e11-b38d-36fa9c2e08e3" containerName="install-os-openstack-openstack-cell1" Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.093008 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea30a0b1-7265-4e11-b38d-36fa9c2e08e3" containerName="install-os-openstack-openstack-cell1" Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.094036 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-jrjd6" Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.096524 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.096850 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.096986 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-d9x6d" Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.097123 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.107852 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-jrjd6"] Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.146141 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7ac0961f-f1e7-4eeb-905a-67edf5e3c46a-ssh-key-openstack-cell1\") pod \"configure-os-openstack-openstack-cell1-jrjd6\" (UID: \"7ac0961f-f1e7-4eeb-905a-67edf5e3c46a\") " pod="openstack/configure-os-openstack-openstack-cell1-jrjd6" Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.146421 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ac0961f-f1e7-4eeb-905a-67edf5e3c46a-inventory\") pod \"configure-os-openstack-openstack-cell1-jrjd6\" (UID: \"7ac0961f-f1e7-4eeb-905a-67edf5e3c46a\") " pod="openstack/configure-os-openstack-openstack-cell1-jrjd6" Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.146517 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntw7w\" (UniqueName: \"kubernetes.io/projected/7ac0961f-f1e7-4eeb-905a-67edf5e3c46a-kube-api-access-ntw7w\") pod \"configure-os-openstack-openstack-cell1-jrjd6\" (UID: \"7ac0961f-f1e7-4eeb-905a-67edf5e3c46a\") " pod="openstack/configure-os-openstack-openstack-cell1-jrjd6" Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.248668 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ac0961f-f1e7-4eeb-905a-67edf5e3c46a-inventory\") pod \"configure-os-openstack-openstack-cell1-jrjd6\" (UID: \"7ac0961f-f1e7-4eeb-905a-67edf5e3c46a\") " pod="openstack/configure-os-openstack-openstack-cell1-jrjd6" Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.248781 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntw7w\" (UniqueName: \"kubernetes.io/projected/7ac0961f-f1e7-4eeb-905a-67edf5e3c46a-kube-api-access-ntw7w\") pod \"configure-os-openstack-openstack-cell1-jrjd6\" (UID: \"7ac0961f-f1e7-4eeb-905a-67edf5e3c46a\") " pod="openstack/configure-os-openstack-openstack-cell1-jrjd6" Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.249031 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7ac0961f-f1e7-4eeb-905a-67edf5e3c46a-ssh-key-openstack-cell1\") pod \"configure-os-openstack-openstack-cell1-jrjd6\" (UID: \"7ac0961f-f1e7-4eeb-905a-67edf5e3c46a\") " pod="openstack/configure-os-openstack-openstack-cell1-jrjd6" Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.260304 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ac0961f-f1e7-4eeb-905a-67edf5e3c46a-inventory\") pod \"configure-os-openstack-openstack-cell1-jrjd6\" (UID: \"7ac0961f-f1e7-4eeb-905a-67edf5e3c46a\") " pod="openstack/configure-os-openstack-openstack-cell1-jrjd6" Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.260343 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7ac0961f-f1e7-4eeb-905a-67edf5e3c46a-ssh-key-openstack-cell1\") pod \"configure-os-openstack-openstack-cell1-jrjd6\" (UID: \"7ac0961f-f1e7-4eeb-905a-67edf5e3c46a\") " pod="openstack/configure-os-openstack-openstack-cell1-jrjd6" Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.270372 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntw7w\" (UniqueName: \"kubernetes.io/projected/7ac0961f-f1e7-4eeb-905a-67edf5e3c46a-kube-api-access-ntw7w\") pod \"configure-os-openstack-openstack-cell1-jrjd6\" (UID: \"7ac0961f-f1e7-4eeb-905a-67edf5e3c46a\") " pod="openstack/configure-os-openstack-openstack-cell1-jrjd6" Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.422623 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-jrjd6" Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.966038 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-jrjd6"] Jan 22 07:53:17 crc kubenswrapper[4933]: I0122 07:53:17.985183 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-jrjd6" event={"ID":"7ac0961f-f1e7-4eeb-905a-67edf5e3c46a","Type":"ContainerStarted","Data":"f3a4784f475b1bede8ee5bf3382b1ddaeeccb9da90456ae8bc054848b6c12956"} Jan 22 07:53:20 crc kubenswrapper[4933]: I0122 07:53:20.003739 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-jrjd6" event={"ID":"7ac0961f-f1e7-4eeb-905a-67edf5e3c46a","Type":"ContainerStarted","Data":"65ac665e86e9504383e3e8498693a17f9f8714717a9879e8ba213e2a74c8d407"} Jan 22 07:53:20 crc kubenswrapper[4933]: I0122 07:53:20.034018 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-openstack-openstack-cell1-jrjd6" podStartSLOduration=2.218123933 podStartE2EDuration="3.033999549s" podCreationTimestamp="2026-01-22 07:53:17 +0000 UTC" firstStartedPulling="2026-01-22 07:53:17.971724607 +0000 UTC m=+7645.808849960" lastFinishedPulling="2026-01-22 07:53:18.787600223 +0000 UTC m=+7646.624725576" observedRunningTime="2026-01-22 07:53:20.02458965 +0000 UTC m=+7647.861715013" watchObservedRunningTime="2026-01-22 07:53:20.033999549 +0000 UTC m=+7647.871124912" Jan 22 07:53:21 crc kubenswrapper[4933]: I0122 07:53:21.490695 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:53:21 crc kubenswrapper[4933]: E0122 07:53:21.491211 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:53:34 crc kubenswrapper[4933]: I0122 07:53:34.491445 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:53:34 crc kubenswrapper[4933]: E0122 07:53:34.492271 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:53:48 crc kubenswrapper[4933]: I0122 07:53:48.490801 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:53:48 crc kubenswrapper[4933]: E0122 07:53:48.491659 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:53:59 crc kubenswrapper[4933]: I0122 07:53:59.491831 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:53:59 crc kubenswrapper[4933]: E0122 07:53:59.492826 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:54:00 crc kubenswrapper[4933]: I0122 07:54:00.377258 4933 generic.go:334] "Generic (PLEG): container finished" podID="7ac0961f-f1e7-4eeb-905a-67edf5e3c46a" containerID="65ac665e86e9504383e3e8498693a17f9f8714717a9879e8ba213e2a74c8d407" exitCode=2 Jan 22 07:54:00 crc kubenswrapper[4933]: I0122 07:54:00.377335 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-jrjd6" event={"ID":"7ac0961f-f1e7-4eeb-905a-67edf5e3c46a","Type":"ContainerDied","Data":"65ac665e86e9504383e3e8498693a17f9f8714717a9879e8ba213e2a74c8d407"} Jan 22 07:54:01 crc kubenswrapper[4933]: I0122 07:54:01.901188 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-jrjd6" Jan 22 07:54:02 crc kubenswrapper[4933]: I0122 07:54:02.013593 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ntw7w\" (UniqueName: \"kubernetes.io/projected/7ac0961f-f1e7-4eeb-905a-67edf5e3c46a-kube-api-access-ntw7w\") pod \"7ac0961f-f1e7-4eeb-905a-67edf5e3c46a\" (UID: \"7ac0961f-f1e7-4eeb-905a-67edf5e3c46a\") " Jan 22 07:54:02 crc kubenswrapper[4933]: I0122 07:54:02.014467 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ac0961f-f1e7-4eeb-905a-67edf5e3c46a-inventory\") pod \"7ac0961f-f1e7-4eeb-905a-67edf5e3c46a\" (UID: \"7ac0961f-f1e7-4eeb-905a-67edf5e3c46a\") " Jan 22 07:54:02 crc kubenswrapper[4933]: I0122 07:54:02.014724 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7ac0961f-f1e7-4eeb-905a-67edf5e3c46a-ssh-key-openstack-cell1\") pod \"7ac0961f-f1e7-4eeb-905a-67edf5e3c46a\" (UID: \"7ac0961f-f1e7-4eeb-905a-67edf5e3c46a\") " Jan 22 07:54:02 crc kubenswrapper[4933]: I0122 07:54:02.021710 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ac0961f-f1e7-4eeb-905a-67edf5e3c46a-kube-api-access-ntw7w" (OuterVolumeSpecName: "kube-api-access-ntw7w") pod "7ac0961f-f1e7-4eeb-905a-67edf5e3c46a" (UID: "7ac0961f-f1e7-4eeb-905a-67edf5e3c46a"). InnerVolumeSpecName "kube-api-access-ntw7w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:54:02 crc kubenswrapper[4933]: I0122 07:54:02.055970 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ac0961f-f1e7-4eeb-905a-67edf5e3c46a-inventory" (OuterVolumeSpecName: "inventory") pod "7ac0961f-f1e7-4eeb-905a-67edf5e3c46a" (UID: "7ac0961f-f1e7-4eeb-905a-67edf5e3c46a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:54:02 crc kubenswrapper[4933]: I0122 07:54:02.057013 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ac0961f-f1e7-4eeb-905a-67edf5e3c46a-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "7ac0961f-f1e7-4eeb-905a-67edf5e3c46a" (UID: "7ac0961f-f1e7-4eeb-905a-67edf5e3c46a"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:54:02 crc kubenswrapper[4933]: I0122 07:54:02.117307 4933 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7ac0961f-f1e7-4eeb-905a-67edf5e3c46a-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 07:54:02 crc kubenswrapper[4933]: I0122 07:54:02.117339 4933 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/7ac0961f-f1e7-4eeb-905a-67edf5e3c46a-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 22 07:54:02 crc kubenswrapper[4933]: I0122 07:54:02.117351 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ntw7w\" (UniqueName: \"kubernetes.io/projected/7ac0961f-f1e7-4eeb-905a-67edf5e3c46a-kube-api-access-ntw7w\") on node \"crc\" DevicePath \"\"" Jan 22 07:54:02 crc kubenswrapper[4933]: I0122 07:54:02.441529 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-jrjd6" event={"ID":"7ac0961f-f1e7-4eeb-905a-67edf5e3c46a","Type":"ContainerDied","Data":"f3a4784f475b1bede8ee5bf3382b1ddaeeccb9da90456ae8bc054848b6c12956"} Jan 22 07:54:02 crc kubenswrapper[4933]: I0122 07:54:02.441583 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f3a4784f475b1bede8ee5bf3382b1ddaeeccb9da90456ae8bc054848b6c12956" Jan 22 07:54:02 crc kubenswrapper[4933]: I0122 07:54:02.441665 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-jrjd6" Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.050235 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-bkcjm"] Jan 22 07:54:10 crc kubenswrapper[4933]: E0122 07:54:10.051286 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ac0961f-f1e7-4eeb-905a-67edf5e3c46a" containerName="configure-os-openstack-openstack-cell1" Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.051303 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ac0961f-f1e7-4eeb-905a-67edf5e3c46a" containerName="configure-os-openstack-openstack-cell1" Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.051674 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ac0961f-f1e7-4eeb-905a-67edf5e3c46a" containerName="configure-os-openstack-openstack-cell1" Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.052542 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-bkcjm" Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.056595 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.056856 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-d9x6d" Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.057025 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.066640 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.076894 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-bkcjm"] Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.114058 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lh4dc\" (UniqueName: \"kubernetes.io/projected/094d9352-b3a7-4f18-9802-646c4b82bf27-kube-api-access-lh4dc\") pod \"configure-os-openstack-openstack-cell1-bkcjm\" (UID: \"094d9352-b3a7-4f18-9802-646c4b82bf27\") " pod="openstack/configure-os-openstack-openstack-cell1-bkcjm" Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.114134 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/094d9352-b3a7-4f18-9802-646c4b82bf27-ssh-key-openstack-cell1\") pod \"configure-os-openstack-openstack-cell1-bkcjm\" (UID: \"094d9352-b3a7-4f18-9802-646c4b82bf27\") " pod="openstack/configure-os-openstack-openstack-cell1-bkcjm" Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.114266 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/094d9352-b3a7-4f18-9802-646c4b82bf27-inventory\") pod \"configure-os-openstack-openstack-cell1-bkcjm\" (UID: \"094d9352-b3a7-4f18-9802-646c4b82bf27\") " pod="openstack/configure-os-openstack-openstack-cell1-bkcjm" Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.216346 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/094d9352-b3a7-4f18-9802-646c4b82bf27-inventory\") pod \"configure-os-openstack-openstack-cell1-bkcjm\" (UID: \"094d9352-b3a7-4f18-9802-646c4b82bf27\") " pod="openstack/configure-os-openstack-openstack-cell1-bkcjm" Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.216658 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lh4dc\" (UniqueName: \"kubernetes.io/projected/094d9352-b3a7-4f18-9802-646c4b82bf27-kube-api-access-lh4dc\") pod \"configure-os-openstack-openstack-cell1-bkcjm\" (UID: \"094d9352-b3a7-4f18-9802-646c4b82bf27\") " pod="openstack/configure-os-openstack-openstack-cell1-bkcjm" Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.216688 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/094d9352-b3a7-4f18-9802-646c4b82bf27-ssh-key-openstack-cell1\") pod \"configure-os-openstack-openstack-cell1-bkcjm\" (UID: \"094d9352-b3a7-4f18-9802-646c4b82bf27\") " pod="openstack/configure-os-openstack-openstack-cell1-bkcjm" Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.224117 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/094d9352-b3a7-4f18-9802-646c4b82bf27-ssh-key-openstack-cell1\") pod \"configure-os-openstack-openstack-cell1-bkcjm\" (UID: \"094d9352-b3a7-4f18-9802-646c4b82bf27\") " pod="openstack/configure-os-openstack-openstack-cell1-bkcjm" Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.230492 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/094d9352-b3a7-4f18-9802-646c4b82bf27-inventory\") pod \"configure-os-openstack-openstack-cell1-bkcjm\" (UID: \"094d9352-b3a7-4f18-9802-646c4b82bf27\") " pod="openstack/configure-os-openstack-openstack-cell1-bkcjm" Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.234739 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lh4dc\" (UniqueName: \"kubernetes.io/projected/094d9352-b3a7-4f18-9802-646c4b82bf27-kube-api-access-lh4dc\") pod \"configure-os-openstack-openstack-cell1-bkcjm\" (UID: \"094d9352-b3a7-4f18-9802-646c4b82bf27\") " pod="openstack/configure-os-openstack-openstack-cell1-bkcjm" Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.377304 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-bkcjm" Jan 22 07:54:10 crc kubenswrapper[4933]: I0122 07:54:10.956162 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-bkcjm"] Jan 22 07:54:11 crc kubenswrapper[4933]: I0122 07:54:11.528769 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-bkcjm" event={"ID":"094d9352-b3a7-4f18-9802-646c4b82bf27","Type":"ContainerStarted","Data":"061256ab1dc0ac453c6698ae007ad7e1c16c037487d22d343d78a76ef890c321"} Jan 22 07:54:12 crc kubenswrapper[4933]: I0122 07:54:12.541920 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-bkcjm" event={"ID":"094d9352-b3a7-4f18-9802-646c4b82bf27","Type":"ContainerStarted","Data":"17b16eadb789d96673e3182525cb758fa9c05a8575fee1f7ac88c9cee3afb283"} Jan 22 07:54:12 crc kubenswrapper[4933]: I0122 07:54:12.566776 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-openstack-openstack-cell1-bkcjm" podStartSLOduration=1.710191888 podStartE2EDuration="2.566757935s" podCreationTimestamp="2026-01-22 07:54:10 +0000 UTC" firstStartedPulling="2026-01-22 07:54:10.965128223 +0000 UTC m=+7698.802253576" lastFinishedPulling="2026-01-22 07:54:11.82169425 +0000 UTC m=+7699.658819623" observedRunningTime="2026-01-22 07:54:12.560949084 +0000 UTC m=+7700.398074507" watchObservedRunningTime="2026-01-22 07:54:12.566757935 +0000 UTC m=+7700.403883298" Jan 22 07:54:13 crc kubenswrapper[4933]: I0122 07:54:13.491572 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:54:13 crc kubenswrapper[4933]: E0122 07:54:13.492219 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:54:15 crc kubenswrapper[4933]: I0122 07:54:15.956663 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-65dd78db8b-67tnw" podUID="37f95090-a891-4563-adcf-7aa34d7ff34c" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Jan 22 07:54:26 crc kubenswrapper[4933]: I0122 07:54:26.491590 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:54:26 crc kubenswrapper[4933]: E0122 07:54:26.493158 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:54:38 crc kubenswrapper[4933]: I0122 07:54:38.490929 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:54:38 crc kubenswrapper[4933]: E0122 07:54:38.491843 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:54:47 crc kubenswrapper[4933]: I0122 07:54:47.887495 4933 generic.go:334] "Generic (PLEG): container finished" podID="094d9352-b3a7-4f18-9802-646c4b82bf27" containerID="17b16eadb789d96673e3182525cb758fa9c05a8575fee1f7ac88c9cee3afb283" exitCode=2 Jan 22 07:54:47 crc kubenswrapper[4933]: I0122 07:54:47.887600 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-bkcjm" event={"ID":"094d9352-b3a7-4f18-9802-646c4b82bf27","Type":"ContainerDied","Data":"17b16eadb789d96673e3182525cb758fa9c05a8575fee1f7ac88c9cee3afb283"} Jan 22 07:54:49 crc kubenswrapper[4933]: I0122 07:54:49.288116 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-bkcjm" Jan 22 07:54:49 crc kubenswrapper[4933]: I0122 07:54:49.418927 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/094d9352-b3a7-4f18-9802-646c4b82bf27-inventory\") pod \"094d9352-b3a7-4f18-9802-646c4b82bf27\" (UID: \"094d9352-b3a7-4f18-9802-646c4b82bf27\") " Jan 22 07:54:49 crc kubenswrapper[4933]: I0122 07:54:49.419426 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lh4dc\" (UniqueName: \"kubernetes.io/projected/094d9352-b3a7-4f18-9802-646c4b82bf27-kube-api-access-lh4dc\") pod \"094d9352-b3a7-4f18-9802-646c4b82bf27\" (UID: \"094d9352-b3a7-4f18-9802-646c4b82bf27\") " Jan 22 07:54:49 crc kubenswrapper[4933]: I0122 07:54:49.419500 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/094d9352-b3a7-4f18-9802-646c4b82bf27-ssh-key-openstack-cell1\") pod \"094d9352-b3a7-4f18-9802-646c4b82bf27\" (UID: \"094d9352-b3a7-4f18-9802-646c4b82bf27\") " Jan 22 07:54:49 crc kubenswrapper[4933]: I0122 07:54:49.428031 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/094d9352-b3a7-4f18-9802-646c4b82bf27-kube-api-access-lh4dc" (OuterVolumeSpecName: "kube-api-access-lh4dc") pod "094d9352-b3a7-4f18-9802-646c4b82bf27" (UID: "094d9352-b3a7-4f18-9802-646c4b82bf27"). InnerVolumeSpecName "kube-api-access-lh4dc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:54:49 crc kubenswrapper[4933]: I0122 07:54:49.451070 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/094d9352-b3a7-4f18-9802-646c4b82bf27-inventory" (OuterVolumeSpecName: "inventory") pod "094d9352-b3a7-4f18-9802-646c4b82bf27" (UID: "094d9352-b3a7-4f18-9802-646c4b82bf27"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:54:49 crc kubenswrapper[4933]: I0122 07:54:49.454753 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/094d9352-b3a7-4f18-9802-646c4b82bf27-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "094d9352-b3a7-4f18-9802-646c4b82bf27" (UID: "094d9352-b3a7-4f18-9802-646c4b82bf27"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:54:49 crc kubenswrapper[4933]: I0122 07:54:49.523443 4933 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/094d9352-b3a7-4f18-9802-646c4b82bf27-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 07:54:49 crc kubenswrapper[4933]: I0122 07:54:49.523667 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lh4dc\" (UniqueName: \"kubernetes.io/projected/094d9352-b3a7-4f18-9802-646c4b82bf27-kube-api-access-lh4dc\") on node \"crc\" DevicePath \"\"" Jan 22 07:54:49 crc kubenswrapper[4933]: I0122 07:54:49.523766 4933 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/094d9352-b3a7-4f18-9802-646c4b82bf27-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 22 07:54:49 crc kubenswrapper[4933]: I0122 07:54:49.909711 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-bkcjm" event={"ID":"094d9352-b3a7-4f18-9802-646c4b82bf27","Type":"ContainerDied","Data":"061256ab1dc0ac453c6698ae007ad7e1c16c037487d22d343d78a76ef890c321"} Jan 22 07:54:49 crc kubenswrapper[4933]: I0122 07:54:49.910321 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="061256ab1dc0ac453c6698ae007ad7e1c16c037487d22d343d78a76ef890c321" Jan 22 07:54:49 crc kubenswrapper[4933]: I0122 07:54:49.909755 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-bkcjm" Jan 22 07:54:53 crc kubenswrapper[4933]: I0122 07:54:53.491487 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:54:53 crc kubenswrapper[4933]: E0122 07:54:53.492234 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.037567 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-m56mj"] Jan 22 07:55:07 crc kubenswrapper[4933]: E0122 07:55:07.038691 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="094d9352-b3a7-4f18-9802-646c4b82bf27" containerName="configure-os-openstack-openstack-cell1" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.038716 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="094d9352-b3a7-4f18-9802-646c4b82bf27" containerName="configure-os-openstack-openstack-cell1" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.039114 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="094d9352-b3a7-4f18-9802-646c4b82bf27" containerName="configure-os-openstack-openstack-cell1" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.040491 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-m56mj" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.059783 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.065718 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.066103 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-d9x6d" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.066306 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.098843 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-m56mj"] Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.138262 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8-inventory\") pod \"configure-os-openstack-openstack-cell1-m56mj\" (UID: \"dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8\") " pod="openstack/configure-os-openstack-openstack-cell1-m56mj" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.138321 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8-ssh-key-openstack-cell1\") pod \"configure-os-openstack-openstack-cell1-m56mj\" (UID: \"dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8\") " pod="openstack/configure-os-openstack-openstack-cell1-m56mj" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.138497 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mlmb\" (UniqueName: \"kubernetes.io/projected/dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8-kube-api-access-6mlmb\") pod \"configure-os-openstack-openstack-cell1-m56mj\" (UID: \"dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8\") " pod="openstack/configure-os-openstack-openstack-cell1-m56mj" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.239778 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mlmb\" (UniqueName: \"kubernetes.io/projected/dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8-kube-api-access-6mlmb\") pod \"configure-os-openstack-openstack-cell1-m56mj\" (UID: \"dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8\") " pod="openstack/configure-os-openstack-openstack-cell1-m56mj" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.240137 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8-inventory\") pod \"configure-os-openstack-openstack-cell1-m56mj\" (UID: \"dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8\") " pod="openstack/configure-os-openstack-openstack-cell1-m56mj" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.240261 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8-ssh-key-openstack-cell1\") pod \"configure-os-openstack-openstack-cell1-m56mj\" (UID: \"dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8\") " pod="openstack/configure-os-openstack-openstack-cell1-m56mj" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.253754 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8-inventory\") pod \"configure-os-openstack-openstack-cell1-m56mj\" (UID: \"dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8\") " pod="openstack/configure-os-openstack-openstack-cell1-m56mj" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.253820 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8-ssh-key-openstack-cell1\") pod \"configure-os-openstack-openstack-cell1-m56mj\" (UID: \"dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8\") " pod="openstack/configure-os-openstack-openstack-cell1-m56mj" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.256233 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mlmb\" (UniqueName: \"kubernetes.io/projected/dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8-kube-api-access-6mlmb\") pod \"configure-os-openstack-openstack-cell1-m56mj\" (UID: \"dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8\") " pod="openstack/configure-os-openstack-openstack-cell1-m56mj" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.391668 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-m56mj" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.491892 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:55:07 crc kubenswrapper[4933]: E0122 07:55:07.492312 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 07:55:07 crc kubenswrapper[4933]: I0122 07:55:07.993467 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-m56mj"] Jan 22 07:55:08 crc kubenswrapper[4933]: I0122 07:55:08.003138 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 07:55:08 crc kubenswrapper[4933]: I0122 07:55:08.105096 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-m56mj" event={"ID":"dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8","Type":"ContainerStarted","Data":"344f0987819aeb0768b473ad166396ef021c2f8a5652613ab3393477cd7d3fd0"} Jan 22 07:55:09 crc kubenswrapper[4933]: I0122 07:55:09.120811 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-m56mj" event={"ID":"dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8","Type":"ContainerStarted","Data":"03ac3b44a6f074b5d1ee810b048063e86207dbb8004c2e77e11243b316324c4d"} Jan 22 07:55:09 crc kubenswrapper[4933]: I0122 07:55:09.145266 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-openstack-openstack-cell1-m56mj" podStartSLOduration=1.684622308 podStartE2EDuration="2.145241077s" podCreationTimestamp="2026-01-22 07:55:07 +0000 UTC" firstStartedPulling="2026-01-22 07:55:08.002831391 +0000 UTC m=+7755.839956764" lastFinishedPulling="2026-01-22 07:55:08.46345018 +0000 UTC m=+7756.300575533" observedRunningTime="2026-01-22 07:55:09.140365199 +0000 UTC m=+7756.977490572" watchObservedRunningTime="2026-01-22 07:55:09.145241077 +0000 UTC m=+7756.982366440" Jan 22 07:55:21 crc kubenswrapper[4933]: I0122 07:55:21.490839 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:55:22 crc kubenswrapper[4933]: I0122 07:55:22.262456 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"c681476a698bdfba0829493019ebf1b8e76064c7d6a7c94d045eb0a2b4992611"} Jan 22 07:55:44 crc kubenswrapper[4933]: I0122 07:55:44.507753 4933 generic.go:334] "Generic (PLEG): container finished" podID="dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8" containerID="03ac3b44a6f074b5d1ee810b048063e86207dbb8004c2e77e11243b316324c4d" exitCode=2 Jan 22 07:55:44 crc kubenswrapper[4933]: I0122 07:55:44.509472 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-m56mj" event={"ID":"dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8","Type":"ContainerDied","Data":"03ac3b44a6f074b5d1ee810b048063e86207dbb8004c2e77e11243b316324c4d"} Jan 22 07:55:45 crc kubenswrapper[4933]: I0122 07:55:45.245836 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5v4z7"] Jan 22 07:55:45 crc kubenswrapper[4933]: I0122 07:55:45.248990 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5v4z7" Jan 22 07:55:45 crc kubenswrapper[4933]: I0122 07:55:45.276836 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5v4z7"] Jan 22 07:55:45 crc kubenswrapper[4933]: I0122 07:55:45.338971 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmffs\" (UniqueName: \"kubernetes.io/projected/e709caff-122d-4470-8c64-d378215a3f77-kube-api-access-nmffs\") pod \"redhat-operators-5v4z7\" (UID: \"e709caff-122d-4470-8c64-d378215a3f77\") " pod="openshift-marketplace/redhat-operators-5v4z7" Jan 22 07:55:45 crc kubenswrapper[4933]: I0122 07:55:45.339147 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e709caff-122d-4470-8c64-d378215a3f77-utilities\") pod \"redhat-operators-5v4z7\" (UID: \"e709caff-122d-4470-8c64-d378215a3f77\") " pod="openshift-marketplace/redhat-operators-5v4z7" Jan 22 07:55:45 crc kubenswrapper[4933]: I0122 07:55:45.339700 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e709caff-122d-4470-8c64-d378215a3f77-catalog-content\") pod \"redhat-operators-5v4z7\" (UID: \"e709caff-122d-4470-8c64-d378215a3f77\") " pod="openshift-marketplace/redhat-operators-5v4z7" Jan 22 07:55:45 crc kubenswrapper[4933]: I0122 07:55:45.442130 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmffs\" (UniqueName: \"kubernetes.io/projected/e709caff-122d-4470-8c64-d378215a3f77-kube-api-access-nmffs\") pod \"redhat-operators-5v4z7\" (UID: \"e709caff-122d-4470-8c64-d378215a3f77\") " pod="openshift-marketplace/redhat-operators-5v4z7" Jan 22 07:55:45 crc kubenswrapper[4933]: I0122 07:55:45.442197 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e709caff-122d-4470-8c64-d378215a3f77-utilities\") pod \"redhat-operators-5v4z7\" (UID: \"e709caff-122d-4470-8c64-d378215a3f77\") " pod="openshift-marketplace/redhat-operators-5v4z7" Jan 22 07:55:45 crc kubenswrapper[4933]: I0122 07:55:45.442331 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e709caff-122d-4470-8c64-d378215a3f77-catalog-content\") pod \"redhat-operators-5v4z7\" (UID: \"e709caff-122d-4470-8c64-d378215a3f77\") " pod="openshift-marketplace/redhat-operators-5v4z7" Jan 22 07:55:45 crc kubenswrapper[4933]: I0122 07:55:45.442852 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e709caff-122d-4470-8c64-d378215a3f77-utilities\") pod \"redhat-operators-5v4z7\" (UID: \"e709caff-122d-4470-8c64-d378215a3f77\") " pod="openshift-marketplace/redhat-operators-5v4z7" Jan 22 07:55:45 crc kubenswrapper[4933]: I0122 07:55:45.442881 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e709caff-122d-4470-8c64-d378215a3f77-catalog-content\") pod \"redhat-operators-5v4z7\" (UID: \"e709caff-122d-4470-8c64-d378215a3f77\") " pod="openshift-marketplace/redhat-operators-5v4z7" Jan 22 07:55:45 crc kubenswrapper[4933]: I0122 07:55:45.462374 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmffs\" (UniqueName: \"kubernetes.io/projected/e709caff-122d-4470-8c64-d378215a3f77-kube-api-access-nmffs\") pod \"redhat-operators-5v4z7\" (UID: \"e709caff-122d-4470-8c64-d378215a3f77\") " pod="openshift-marketplace/redhat-operators-5v4z7" Jan 22 07:55:45 crc kubenswrapper[4933]: I0122 07:55:45.577635 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5v4z7" Jan 22 07:55:46 crc kubenswrapper[4933]: I0122 07:55:46.009475 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-m56mj" Jan 22 07:55:46 crc kubenswrapper[4933]: I0122 07:55:46.157034 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5v4z7"] Jan 22 07:55:46 crc kubenswrapper[4933]: I0122 07:55:46.166876 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6mlmb\" (UniqueName: \"kubernetes.io/projected/dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8-kube-api-access-6mlmb\") pod \"dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8\" (UID: \"dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8\") " Jan 22 07:55:46 crc kubenswrapper[4933]: I0122 07:55:46.167178 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8-ssh-key-openstack-cell1\") pod \"dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8\" (UID: \"dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8\") " Jan 22 07:55:46 crc kubenswrapper[4933]: I0122 07:55:46.167258 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8-inventory\") pod \"dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8\" (UID: \"dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8\") " Jan 22 07:55:46 crc kubenswrapper[4933]: I0122 07:55:46.176386 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8-kube-api-access-6mlmb" (OuterVolumeSpecName: "kube-api-access-6mlmb") pod "dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8" (UID: "dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8"). InnerVolumeSpecName "kube-api-access-6mlmb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:55:46 crc kubenswrapper[4933]: I0122 07:55:46.208391 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8-inventory" (OuterVolumeSpecName: "inventory") pod "dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8" (UID: "dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:55:46 crc kubenswrapper[4933]: I0122 07:55:46.218231 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8" (UID: "dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:55:46 crc kubenswrapper[4933]: I0122 07:55:46.269485 4933 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 22 07:55:46 crc kubenswrapper[4933]: I0122 07:55:46.269782 4933 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 07:55:46 crc kubenswrapper[4933]: I0122 07:55:46.269895 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6mlmb\" (UniqueName: \"kubernetes.io/projected/dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8-kube-api-access-6mlmb\") on node \"crc\" DevicePath \"\"" Jan 22 07:55:46 crc kubenswrapper[4933]: I0122 07:55:46.527125 4933 generic.go:334] "Generic (PLEG): container finished" podID="e709caff-122d-4470-8c64-d378215a3f77" containerID="7dd314eb29397ebdac9ff4ab0650699804a104b0aef97c5efb2c34f83f1017c8" exitCode=0 Jan 22 07:55:46 crc kubenswrapper[4933]: I0122 07:55:46.527175 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5v4z7" event={"ID":"e709caff-122d-4470-8c64-d378215a3f77","Type":"ContainerDied","Data":"7dd314eb29397ebdac9ff4ab0650699804a104b0aef97c5efb2c34f83f1017c8"} Jan 22 07:55:46 crc kubenswrapper[4933]: I0122 07:55:46.527515 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5v4z7" event={"ID":"e709caff-122d-4470-8c64-d378215a3f77","Type":"ContainerStarted","Data":"e96c3c5d209d20edd868c0410a04e53329e33e65455e2cc7a36f6b5952263e67"} Jan 22 07:55:46 crc kubenswrapper[4933]: I0122 07:55:46.534429 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-m56mj" event={"ID":"dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8","Type":"ContainerDied","Data":"344f0987819aeb0768b473ad166396ef021c2f8a5652613ab3393477cd7d3fd0"} Jan 22 07:55:46 crc kubenswrapper[4933]: I0122 07:55:46.534510 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="344f0987819aeb0768b473ad166396ef021c2f8a5652613ab3393477cd7d3fd0" Jan 22 07:55:46 crc kubenswrapper[4933]: I0122 07:55:46.534477 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-m56mj" Jan 22 07:55:48 crc kubenswrapper[4933]: I0122 07:55:48.573530 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5v4z7" event={"ID":"e709caff-122d-4470-8c64-d378215a3f77","Type":"ContainerStarted","Data":"c015b78a700a2d87494acf0698c4c59cea74d2a8d284222102b3628441fde262"} Jan 22 07:55:49 crc kubenswrapper[4933]: I0122 07:55:49.582933 4933 generic.go:334] "Generic (PLEG): container finished" podID="e709caff-122d-4470-8c64-d378215a3f77" containerID="c015b78a700a2d87494acf0698c4c59cea74d2a8d284222102b3628441fde262" exitCode=0 Jan 22 07:55:49 crc kubenswrapper[4933]: I0122 07:55:49.583010 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5v4z7" event={"ID":"e709caff-122d-4470-8c64-d378215a3f77","Type":"ContainerDied","Data":"c015b78a700a2d87494acf0698c4c59cea74d2a8d284222102b3628441fde262"} Jan 22 07:55:51 crc kubenswrapper[4933]: I0122 07:55:51.604828 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5v4z7" event={"ID":"e709caff-122d-4470-8c64-d378215a3f77","Type":"ContainerStarted","Data":"99f6250d615a16a66ea45ee2a1ca1e3a1b598e7de6b6228de90697684b859142"} Jan 22 07:55:51 crc kubenswrapper[4933]: I0122 07:55:51.630387 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5v4z7" podStartSLOduration=1.863641952 podStartE2EDuration="6.630367569s" podCreationTimestamp="2026-01-22 07:55:45 +0000 UTC" firstStartedPulling="2026-01-22 07:55:46.531470406 +0000 UTC m=+7794.368595759" lastFinishedPulling="2026-01-22 07:55:51.298196013 +0000 UTC m=+7799.135321376" observedRunningTime="2026-01-22 07:55:51.626108296 +0000 UTC m=+7799.463233649" watchObservedRunningTime="2026-01-22 07:55:51.630367569 +0000 UTC m=+7799.467492922" Jan 22 07:55:55 crc kubenswrapper[4933]: I0122 07:55:55.577806 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5v4z7" Jan 22 07:55:55 crc kubenswrapper[4933]: I0122 07:55:55.578275 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5v4z7" Jan 22 07:55:56 crc kubenswrapper[4933]: I0122 07:55:56.640725 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5v4z7" podUID="e709caff-122d-4470-8c64-d378215a3f77" containerName="registry-server" probeResult="failure" output=< Jan 22 07:55:56 crc kubenswrapper[4933]: timeout: failed to connect service ":50051" within 1s Jan 22 07:55:56 crc kubenswrapper[4933]: > Jan 22 07:56:05 crc kubenswrapper[4933]: I0122 07:56:05.639014 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5v4z7" Jan 22 07:56:05 crc kubenswrapper[4933]: I0122 07:56:05.702877 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5v4z7" Jan 22 07:56:05 crc kubenswrapper[4933]: I0122 07:56:05.893478 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5v4z7"] Jan 22 07:56:06 crc kubenswrapper[4933]: I0122 07:56:06.743973 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5v4z7" podUID="e709caff-122d-4470-8c64-d378215a3f77" containerName="registry-server" containerID="cri-o://99f6250d615a16a66ea45ee2a1ca1e3a1b598e7de6b6228de90697684b859142" gracePeriod=2 Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.262315 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5v4z7" Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.349979 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e709caff-122d-4470-8c64-d378215a3f77-catalog-content\") pod \"e709caff-122d-4470-8c64-d378215a3f77\" (UID: \"e709caff-122d-4470-8c64-d378215a3f77\") " Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.350056 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e709caff-122d-4470-8c64-d378215a3f77-utilities\") pod \"e709caff-122d-4470-8c64-d378215a3f77\" (UID: \"e709caff-122d-4470-8c64-d378215a3f77\") " Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.350220 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nmffs\" (UniqueName: \"kubernetes.io/projected/e709caff-122d-4470-8c64-d378215a3f77-kube-api-access-nmffs\") pod \"e709caff-122d-4470-8c64-d378215a3f77\" (UID: \"e709caff-122d-4470-8c64-d378215a3f77\") " Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.350985 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e709caff-122d-4470-8c64-d378215a3f77-utilities" (OuterVolumeSpecName: "utilities") pod "e709caff-122d-4470-8c64-d378215a3f77" (UID: "e709caff-122d-4470-8c64-d378215a3f77"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.357782 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e709caff-122d-4470-8c64-d378215a3f77-kube-api-access-nmffs" (OuterVolumeSpecName: "kube-api-access-nmffs") pod "e709caff-122d-4470-8c64-d378215a3f77" (UID: "e709caff-122d-4470-8c64-d378215a3f77"). InnerVolumeSpecName "kube-api-access-nmffs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.452468 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e709caff-122d-4470-8c64-d378215a3f77-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.452512 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nmffs\" (UniqueName: \"kubernetes.io/projected/e709caff-122d-4470-8c64-d378215a3f77-kube-api-access-nmffs\") on node \"crc\" DevicePath \"\"" Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.464643 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e709caff-122d-4470-8c64-d378215a3f77-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e709caff-122d-4470-8c64-d378215a3f77" (UID: "e709caff-122d-4470-8c64-d378215a3f77"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.554217 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e709caff-122d-4470-8c64-d378215a3f77-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.766563 4933 generic.go:334] "Generic (PLEG): container finished" podID="e709caff-122d-4470-8c64-d378215a3f77" containerID="99f6250d615a16a66ea45ee2a1ca1e3a1b598e7de6b6228de90697684b859142" exitCode=0 Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.766614 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5v4z7" event={"ID":"e709caff-122d-4470-8c64-d378215a3f77","Type":"ContainerDied","Data":"99f6250d615a16a66ea45ee2a1ca1e3a1b598e7de6b6228de90697684b859142"} Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.766653 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5v4z7" event={"ID":"e709caff-122d-4470-8c64-d378215a3f77","Type":"ContainerDied","Data":"e96c3c5d209d20edd868c0410a04e53329e33e65455e2cc7a36f6b5952263e67"} Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.766664 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5v4z7" Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.766675 4933 scope.go:117] "RemoveContainer" containerID="99f6250d615a16a66ea45ee2a1ca1e3a1b598e7de6b6228de90697684b859142" Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.801335 4933 scope.go:117] "RemoveContainer" containerID="c015b78a700a2d87494acf0698c4c59cea74d2a8d284222102b3628441fde262" Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.811063 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5v4z7"] Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.821523 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5v4z7"] Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.840983 4933 scope.go:117] "RemoveContainer" containerID="7dd314eb29397ebdac9ff4ab0650699804a104b0aef97c5efb2c34f83f1017c8" Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.890619 4933 scope.go:117] "RemoveContainer" containerID="99f6250d615a16a66ea45ee2a1ca1e3a1b598e7de6b6228de90697684b859142" Jan 22 07:56:07 crc kubenswrapper[4933]: E0122 07:56:07.890985 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99f6250d615a16a66ea45ee2a1ca1e3a1b598e7de6b6228de90697684b859142\": container with ID starting with 99f6250d615a16a66ea45ee2a1ca1e3a1b598e7de6b6228de90697684b859142 not found: ID does not exist" containerID="99f6250d615a16a66ea45ee2a1ca1e3a1b598e7de6b6228de90697684b859142" Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.891021 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99f6250d615a16a66ea45ee2a1ca1e3a1b598e7de6b6228de90697684b859142"} err="failed to get container status \"99f6250d615a16a66ea45ee2a1ca1e3a1b598e7de6b6228de90697684b859142\": rpc error: code = NotFound desc = could not find container \"99f6250d615a16a66ea45ee2a1ca1e3a1b598e7de6b6228de90697684b859142\": container with ID starting with 99f6250d615a16a66ea45ee2a1ca1e3a1b598e7de6b6228de90697684b859142 not found: ID does not exist" Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.891042 4933 scope.go:117] "RemoveContainer" containerID="c015b78a700a2d87494acf0698c4c59cea74d2a8d284222102b3628441fde262" Jan 22 07:56:07 crc kubenswrapper[4933]: E0122 07:56:07.894039 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c015b78a700a2d87494acf0698c4c59cea74d2a8d284222102b3628441fde262\": container with ID starting with c015b78a700a2d87494acf0698c4c59cea74d2a8d284222102b3628441fde262 not found: ID does not exist" containerID="c015b78a700a2d87494acf0698c4c59cea74d2a8d284222102b3628441fde262" Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.894128 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c015b78a700a2d87494acf0698c4c59cea74d2a8d284222102b3628441fde262"} err="failed to get container status \"c015b78a700a2d87494acf0698c4c59cea74d2a8d284222102b3628441fde262\": rpc error: code = NotFound desc = could not find container \"c015b78a700a2d87494acf0698c4c59cea74d2a8d284222102b3628441fde262\": container with ID starting with c015b78a700a2d87494acf0698c4c59cea74d2a8d284222102b3628441fde262 not found: ID does not exist" Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.894160 4933 scope.go:117] "RemoveContainer" containerID="7dd314eb29397ebdac9ff4ab0650699804a104b0aef97c5efb2c34f83f1017c8" Jan 22 07:56:07 crc kubenswrapper[4933]: E0122 07:56:07.895227 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7dd314eb29397ebdac9ff4ab0650699804a104b0aef97c5efb2c34f83f1017c8\": container with ID starting with 7dd314eb29397ebdac9ff4ab0650699804a104b0aef97c5efb2c34f83f1017c8 not found: ID does not exist" containerID="7dd314eb29397ebdac9ff4ab0650699804a104b0aef97c5efb2c34f83f1017c8" Jan 22 07:56:07 crc kubenswrapper[4933]: I0122 07:56:07.895270 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7dd314eb29397ebdac9ff4ab0650699804a104b0aef97c5efb2c34f83f1017c8"} err="failed to get container status \"7dd314eb29397ebdac9ff4ab0650699804a104b0aef97c5efb2c34f83f1017c8\": rpc error: code = NotFound desc = could not find container \"7dd314eb29397ebdac9ff4ab0650699804a104b0aef97c5efb2c34f83f1017c8\": container with ID starting with 7dd314eb29397ebdac9ff4ab0650699804a104b0aef97c5efb2c34f83f1017c8 not found: ID does not exist" Jan 22 07:56:08 crc kubenswrapper[4933]: I0122 07:56:08.504005 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e709caff-122d-4470-8c64-d378215a3f77" path="/var/lib/kubelet/pods/e709caff-122d-4470-8c64-d378215a3f77/volumes" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.044352 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-z8t8s"] Jan 22 07:56:23 crc kubenswrapper[4933]: E0122 07:56:23.045578 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e709caff-122d-4470-8c64-d378215a3f77" containerName="registry-server" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.045610 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e709caff-122d-4470-8c64-d378215a3f77" containerName="registry-server" Jan 22 07:56:23 crc kubenswrapper[4933]: E0122 07:56:23.045641 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e709caff-122d-4470-8c64-d378215a3f77" containerName="extract-content" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.045654 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e709caff-122d-4470-8c64-d378215a3f77" containerName="extract-content" Jan 22 07:56:23 crc kubenswrapper[4933]: E0122 07:56:23.045683 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e709caff-122d-4470-8c64-d378215a3f77" containerName="extract-utilities" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.045695 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e709caff-122d-4470-8c64-d378215a3f77" containerName="extract-utilities" Jan 22 07:56:23 crc kubenswrapper[4933]: E0122 07:56:23.045727 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8" containerName="configure-os-openstack-openstack-cell1" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.045738 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8" containerName="configure-os-openstack-openstack-cell1" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.046138 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="e709caff-122d-4470-8c64-d378215a3f77" containerName="registry-server" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.046174 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8" containerName="configure-os-openstack-openstack-cell1" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.047504 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-z8t8s" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.056337 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-z8t8s"] Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.093930 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.095755 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.096603 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.097134 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-d9x6d" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.106278 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpglf\" (UniqueName: \"kubernetes.io/projected/e8b546e4-9a72-4e15-afbe-62d02ed4cc00-kube-api-access-kpglf\") pod \"configure-os-openstack-openstack-cell1-z8t8s\" (UID: \"e8b546e4-9a72-4e15-afbe-62d02ed4cc00\") " pod="openstack/configure-os-openstack-openstack-cell1-z8t8s" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.106532 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8b546e4-9a72-4e15-afbe-62d02ed4cc00-inventory\") pod \"configure-os-openstack-openstack-cell1-z8t8s\" (UID: \"e8b546e4-9a72-4e15-afbe-62d02ed4cc00\") " pod="openstack/configure-os-openstack-openstack-cell1-z8t8s" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.106719 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/e8b546e4-9a72-4e15-afbe-62d02ed4cc00-ssh-key-openstack-cell1\") pod \"configure-os-openstack-openstack-cell1-z8t8s\" (UID: \"e8b546e4-9a72-4e15-afbe-62d02ed4cc00\") " pod="openstack/configure-os-openstack-openstack-cell1-z8t8s" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.209494 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpglf\" (UniqueName: \"kubernetes.io/projected/e8b546e4-9a72-4e15-afbe-62d02ed4cc00-kube-api-access-kpglf\") pod \"configure-os-openstack-openstack-cell1-z8t8s\" (UID: \"e8b546e4-9a72-4e15-afbe-62d02ed4cc00\") " pod="openstack/configure-os-openstack-openstack-cell1-z8t8s" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.209711 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8b546e4-9a72-4e15-afbe-62d02ed4cc00-inventory\") pod \"configure-os-openstack-openstack-cell1-z8t8s\" (UID: \"e8b546e4-9a72-4e15-afbe-62d02ed4cc00\") " pod="openstack/configure-os-openstack-openstack-cell1-z8t8s" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.209789 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/e8b546e4-9a72-4e15-afbe-62d02ed4cc00-ssh-key-openstack-cell1\") pod \"configure-os-openstack-openstack-cell1-z8t8s\" (UID: \"e8b546e4-9a72-4e15-afbe-62d02ed4cc00\") " pod="openstack/configure-os-openstack-openstack-cell1-z8t8s" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.218312 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/e8b546e4-9a72-4e15-afbe-62d02ed4cc00-ssh-key-openstack-cell1\") pod \"configure-os-openstack-openstack-cell1-z8t8s\" (UID: \"e8b546e4-9a72-4e15-afbe-62d02ed4cc00\") " pod="openstack/configure-os-openstack-openstack-cell1-z8t8s" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.220691 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8b546e4-9a72-4e15-afbe-62d02ed4cc00-inventory\") pod \"configure-os-openstack-openstack-cell1-z8t8s\" (UID: \"e8b546e4-9a72-4e15-afbe-62d02ed4cc00\") " pod="openstack/configure-os-openstack-openstack-cell1-z8t8s" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.225372 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpglf\" (UniqueName: \"kubernetes.io/projected/e8b546e4-9a72-4e15-afbe-62d02ed4cc00-kube-api-access-kpglf\") pod \"configure-os-openstack-openstack-cell1-z8t8s\" (UID: \"e8b546e4-9a72-4e15-afbe-62d02ed4cc00\") " pod="openstack/configure-os-openstack-openstack-cell1-z8t8s" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.413478 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-z8t8s" Jan 22 07:56:23 crc kubenswrapper[4933]: I0122 07:56:23.978737 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-z8t8s"] Jan 22 07:56:24 crc kubenswrapper[4933]: I0122 07:56:24.938997 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-z8t8s" event={"ID":"e8b546e4-9a72-4e15-afbe-62d02ed4cc00","Type":"ContainerStarted","Data":"59c9bc16d57fc0841e5eb00cdde29ac5b3e3a7f3d85ce05ed6020a8ea91b5bd6"} Jan 22 07:56:24 crc kubenswrapper[4933]: I0122 07:56:24.939569 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-z8t8s" event={"ID":"e8b546e4-9a72-4e15-afbe-62d02ed4cc00","Type":"ContainerStarted","Data":"d659f6731beea542dbe8f4aa03f30d8db2d49a1f1c29ef8ec21bb0ad3b54b028"} Jan 22 07:56:24 crc kubenswrapper[4933]: I0122 07:56:24.966369 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-openstack-openstack-cell1-z8t8s" podStartSLOduration=1.556984059 podStartE2EDuration="1.966347911s" podCreationTimestamp="2026-01-22 07:56:23 +0000 UTC" firstStartedPulling="2026-01-22 07:56:23.990409743 +0000 UTC m=+7831.827535096" lastFinishedPulling="2026-01-22 07:56:24.399773585 +0000 UTC m=+7832.236898948" observedRunningTime="2026-01-22 07:56:24.957586369 +0000 UTC m=+7832.794711722" watchObservedRunningTime="2026-01-22 07:56:24.966347911 +0000 UTC m=+7832.803473264" Jan 22 07:56:59 crc kubenswrapper[4933]: I0122 07:56:59.318424 4933 generic.go:334] "Generic (PLEG): container finished" podID="e8b546e4-9a72-4e15-afbe-62d02ed4cc00" containerID="59c9bc16d57fc0841e5eb00cdde29ac5b3e3a7f3d85ce05ed6020a8ea91b5bd6" exitCode=2 Jan 22 07:56:59 crc kubenswrapper[4933]: I0122 07:56:59.318541 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-z8t8s" event={"ID":"e8b546e4-9a72-4e15-afbe-62d02ed4cc00","Type":"ContainerDied","Data":"59c9bc16d57fc0841e5eb00cdde29ac5b3e3a7f3d85ce05ed6020a8ea91b5bd6"} Jan 22 07:57:00 crc kubenswrapper[4933]: I0122 07:57:00.836219 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-z8t8s" Jan 22 07:57:00 crc kubenswrapper[4933]: I0122 07:57:00.889194 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8b546e4-9a72-4e15-afbe-62d02ed4cc00-inventory\") pod \"e8b546e4-9a72-4e15-afbe-62d02ed4cc00\" (UID: \"e8b546e4-9a72-4e15-afbe-62d02ed4cc00\") " Jan 22 07:57:00 crc kubenswrapper[4933]: I0122 07:57:00.889706 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/e8b546e4-9a72-4e15-afbe-62d02ed4cc00-ssh-key-openstack-cell1\") pod \"e8b546e4-9a72-4e15-afbe-62d02ed4cc00\" (UID: \"e8b546e4-9a72-4e15-afbe-62d02ed4cc00\") " Jan 22 07:57:00 crc kubenswrapper[4933]: I0122 07:57:00.889789 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kpglf\" (UniqueName: \"kubernetes.io/projected/e8b546e4-9a72-4e15-afbe-62d02ed4cc00-kube-api-access-kpglf\") pod \"e8b546e4-9a72-4e15-afbe-62d02ed4cc00\" (UID: \"e8b546e4-9a72-4e15-afbe-62d02ed4cc00\") " Jan 22 07:57:00 crc kubenswrapper[4933]: I0122 07:57:00.895983 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8b546e4-9a72-4e15-afbe-62d02ed4cc00-kube-api-access-kpglf" (OuterVolumeSpecName: "kube-api-access-kpglf") pod "e8b546e4-9a72-4e15-afbe-62d02ed4cc00" (UID: "e8b546e4-9a72-4e15-afbe-62d02ed4cc00"). InnerVolumeSpecName "kube-api-access-kpglf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:57:00 crc kubenswrapper[4933]: I0122 07:57:00.930863 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8b546e4-9a72-4e15-afbe-62d02ed4cc00-inventory" (OuterVolumeSpecName: "inventory") pod "e8b546e4-9a72-4e15-afbe-62d02ed4cc00" (UID: "e8b546e4-9a72-4e15-afbe-62d02ed4cc00"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:57:00 crc kubenswrapper[4933]: I0122 07:57:00.939905 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8b546e4-9a72-4e15-afbe-62d02ed4cc00-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "e8b546e4-9a72-4e15-afbe-62d02ed4cc00" (UID: "e8b546e4-9a72-4e15-afbe-62d02ed4cc00"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:57:00 crc kubenswrapper[4933]: I0122 07:57:00.992420 4933 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/e8b546e4-9a72-4e15-afbe-62d02ed4cc00-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Jan 22 07:57:00 crc kubenswrapper[4933]: I0122 07:57:00.992452 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kpglf\" (UniqueName: \"kubernetes.io/projected/e8b546e4-9a72-4e15-afbe-62d02ed4cc00-kube-api-access-kpglf\") on node \"crc\" DevicePath \"\"" Jan 22 07:57:00 crc kubenswrapper[4933]: I0122 07:57:00.992464 4933 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8b546e4-9a72-4e15-afbe-62d02ed4cc00-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 07:57:01 crc kubenswrapper[4933]: I0122 07:57:01.346915 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-z8t8s" event={"ID":"e8b546e4-9a72-4e15-afbe-62d02ed4cc00","Type":"ContainerDied","Data":"d659f6731beea542dbe8f4aa03f30d8db2d49a1f1c29ef8ec21bb0ad3b54b028"} Jan 22 07:57:01 crc kubenswrapper[4933]: I0122 07:57:01.347468 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d659f6731beea542dbe8f4aa03f30d8db2d49a1f1c29ef8ec21bb0ad3b54b028" Jan 22 07:57:01 crc kubenswrapper[4933]: I0122 07:57:01.346994 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-z8t8s" Jan 22 07:57:40 crc kubenswrapper[4933]: I0122 07:57:40.942911 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:57:40 crc kubenswrapper[4933]: I0122 07:57:40.943354 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:58:10 crc kubenswrapper[4933]: I0122 07:58:10.449210 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-8mj5q/must-gather-vwq5b"] Jan 22 07:58:10 crc kubenswrapper[4933]: E0122 07:58:10.450399 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8b546e4-9a72-4e15-afbe-62d02ed4cc00" containerName="configure-os-openstack-openstack-cell1" Jan 22 07:58:10 crc kubenswrapper[4933]: I0122 07:58:10.450417 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8b546e4-9a72-4e15-afbe-62d02ed4cc00" containerName="configure-os-openstack-openstack-cell1" Jan 22 07:58:10 crc kubenswrapper[4933]: I0122 07:58:10.450666 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8b546e4-9a72-4e15-afbe-62d02ed4cc00" containerName="configure-os-openstack-openstack-cell1" Jan 22 07:58:10 crc kubenswrapper[4933]: I0122 07:58:10.451948 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8mj5q/must-gather-vwq5b" Jan 22 07:58:10 crc kubenswrapper[4933]: I0122 07:58:10.454300 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-8mj5q"/"kube-root-ca.crt" Jan 22 07:58:10 crc kubenswrapper[4933]: I0122 07:58:10.455404 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-8mj5q"/"openshift-service-ca.crt" Jan 22 07:58:10 crc kubenswrapper[4933]: I0122 07:58:10.455716 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-8mj5q"/"default-dockercfg-rh6f2" Jan 22 07:58:10 crc kubenswrapper[4933]: I0122 07:58:10.471240 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-8mj5q/must-gather-vwq5b"] Jan 22 07:58:10 crc kubenswrapper[4933]: I0122 07:58:10.475806 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/215998a7-626c-40ce-967d-4606db8fe835-must-gather-output\") pod \"must-gather-vwq5b\" (UID: \"215998a7-626c-40ce-967d-4606db8fe835\") " pod="openshift-must-gather-8mj5q/must-gather-vwq5b" Jan 22 07:58:10 crc kubenswrapper[4933]: I0122 07:58:10.476022 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44z5r\" (UniqueName: \"kubernetes.io/projected/215998a7-626c-40ce-967d-4606db8fe835-kube-api-access-44z5r\") pod \"must-gather-vwq5b\" (UID: \"215998a7-626c-40ce-967d-4606db8fe835\") " pod="openshift-must-gather-8mj5q/must-gather-vwq5b" Jan 22 07:58:10 crc kubenswrapper[4933]: I0122 07:58:10.578482 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/215998a7-626c-40ce-967d-4606db8fe835-must-gather-output\") pod \"must-gather-vwq5b\" (UID: \"215998a7-626c-40ce-967d-4606db8fe835\") " pod="openshift-must-gather-8mj5q/must-gather-vwq5b" Jan 22 07:58:10 crc kubenswrapper[4933]: I0122 07:58:10.579001 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44z5r\" (UniqueName: \"kubernetes.io/projected/215998a7-626c-40ce-967d-4606db8fe835-kube-api-access-44z5r\") pod \"must-gather-vwq5b\" (UID: \"215998a7-626c-40ce-967d-4606db8fe835\") " pod="openshift-must-gather-8mj5q/must-gather-vwq5b" Jan 22 07:58:10 crc kubenswrapper[4933]: I0122 07:58:10.579109 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/215998a7-626c-40ce-967d-4606db8fe835-must-gather-output\") pod \"must-gather-vwq5b\" (UID: \"215998a7-626c-40ce-967d-4606db8fe835\") " pod="openshift-must-gather-8mj5q/must-gather-vwq5b" Jan 22 07:58:10 crc kubenswrapper[4933]: I0122 07:58:10.616733 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44z5r\" (UniqueName: \"kubernetes.io/projected/215998a7-626c-40ce-967d-4606db8fe835-kube-api-access-44z5r\") pod \"must-gather-vwq5b\" (UID: \"215998a7-626c-40ce-967d-4606db8fe835\") " pod="openshift-must-gather-8mj5q/must-gather-vwq5b" Jan 22 07:58:10 crc kubenswrapper[4933]: I0122 07:58:10.785566 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8mj5q/must-gather-vwq5b" Jan 22 07:58:10 crc kubenswrapper[4933]: I0122 07:58:10.943742 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:58:10 crc kubenswrapper[4933]: I0122 07:58:10.943810 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:58:11 crc kubenswrapper[4933]: I0122 07:58:11.307741 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-8mj5q/must-gather-vwq5b"] Jan 22 07:58:12 crc kubenswrapper[4933]: I0122 07:58:12.204597 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8mj5q/must-gather-vwq5b" event={"ID":"215998a7-626c-40ce-967d-4606db8fe835","Type":"ContainerStarted","Data":"8d41ef7e75fc63e41618e1866ee20fae0773be7918490bd6a48cf0510efa92db"} Jan 22 07:58:18 crc kubenswrapper[4933]: I0122 07:58:18.284189 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8mj5q/must-gather-vwq5b" event={"ID":"215998a7-626c-40ce-967d-4606db8fe835","Type":"ContainerStarted","Data":"bea88fc1be4524d3d0cb5bd600612fcf0d007d01d3fb7a81be657038cfd01ca9"} Jan 22 07:58:19 crc kubenswrapper[4933]: I0122 07:58:19.293631 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8mj5q/must-gather-vwq5b" event={"ID":"215998a7-626c-40ce-967d-4606db8fe835","Type":"ContainerStarted","Data":"d7d1245058758ec67fb6486260b6d91499421d410b4f54d3421ca2b9db8afafb"} Jan 22 07:58:19 crc kubenswrapper[4933]: I0122 07:58:19.310775 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-8mj5q/must-gather-vwq5b" podStartSLOduration=2.720576296 podStartE2EDuration="9.310755228s" podCreationTimestamp="2026-01-22 07:58:10 +0000 UTC" firstStartedPulling="2026-01-22 07:58:11.307393397 +0000 UTC m=+7939.144518760" lastFinishedPulling="2026-01-22 07:58:17.897572339 +0000 UTC m=+7945.734697692" observedRunningTime="2026-01-22 07:58:19.307362786 +0000 UTC m=+7947.144488159" watchObservedRunningTime="2026-01-22 07:58:19.310755228 +0000 UTC m=+7947.147880581" Jan 22 07:58:22 crc kubenswrapper[4933]: I0122 07:58:22.418317 4933 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zxdg4 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.23:5443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 22 07:58:22 crc kubenswrapper[4933]: I0122 07:58:22.418819 4933 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" podUID="0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.23:5443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 07:58:22 crc kubenswrapper[4933]: I0122 07:58:22.418724 4933 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-zxdg4 container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.23:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 22 07:58:22 crc kubenswrapper[4933]: I0122 07:58:22.418950 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-zxdg4" podUID="0aaa5850-80d2-4e5c-bf1e-8115fe8b19a2" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.23:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 22 07:58:25 crc kubenswrapper[4933]: I0122 07:58:25.621245 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-8mj5q/crc-debug-hb65h"] Jan 22 07:58:25 crc kubenswrapper[4933]: I0122 07:58:25.623101 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8mj5q/crc-debug-hb65h" Jan 22 07:58:25 crc kubenswrapper[4933]: I0122 07:58:25.731598 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nnjvb\" (UniqueName: \"kubernetes.io/projected/85e2bf0f-5d72-494c-a679-f42da8e067b9-kube-api-access-nnjvb\") pod \"crc-debug-hb65h\" (UID: \"85e2bf0f-5d72-494c-a679-f42da8e067b9\") " pod="openshift-must-gather-8mj5q/crc-debug-hb65h" Jan 22 07:58:25 crc kubenswrapper[4933]: I0122 07:58:25.731670 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/85e2bf0f-5d72-494c-a679-f42da8e067b9-host\") pod \"crc-debug-hb65h\" (UID: \"85e2bf0f-5d72-494c-a679-f42da8e067b9\") " pod="openshift-must-gather-8mj5q/crc-debug-hb65h" Jan 22 07:58:25 crc kubenswrapper[4933]: I0122 07:58:25.833428 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nnjvb\" (UniqueName: \"kubernetes.io/projected/85e2bf0f-5d72-494c-a679-f42da8e067b9-kube-api-access-nnjvb\") pod \"crc-debug-hb65h\" (UID: \"85e2bf0f-5d72-494c-a679-f42da8e067b9\") " pod="openshift-must-gather-8mj5q/crc-debug-hb65h" Jan 22 07:58:25 crc kubenswrapper[4933]: I0122 07:58:25.833774 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/85e2bf0f-5d72-494c-a679-f42da8e067b9-host\") pod \"crc-debug-hb65h\" (UID: \"85e2bf0f-5d72-494c-a679-f42da8e067b9\") " pod="openshift-must-gather-8mj5q/crc-debug-hb65h" Jan 22 07:58:25 crc kubenswrapper[4933]: I0122 07:58:25.833907 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/85e2bf0f-5d72-494c-a679-f42da8e067b9-host\") pod \"crc-debug-hb65h\" (UID: \"85e2bf0f-5d72-494c-a679-f42da8e067b9\") " pod="openshift-must-gather-8mj5q/crc-debug-hb65h" Jan 22 07:58:25 crc kubenswrapper[4933]: I0122 07:58:25.856828 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nnjvb\" (UniqueName: \"kubernetes.io/projected/85e2bf0f-5d72-494c-a679-f42da8e067b9-kube-api-access-nnjvb\") pod \"crc-debug-hb65h\" (UID: \"85e2bf0f-5d72-494c-a679-f42da8e067b9\") " pod="openshift-must-gather-8mj5q/crc-debug-hb65h" Jan 22 07:58:25 crc kubenswrapper[4933]: I0122 07:58:25.941926 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8mj5q/crc-debug-hb65h" Jan 22 07:58:25 crc kubenswrapper[4933]: W0122 07:58:25.975664 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod85e2bf0f_5d72_494c_a679_f42da8e067b9.slice/crio-bf2a70bc32fa3d8cb2af67e293280d31cb4c15b66692aaa402ac1ac4207083ff WatchSource:0}: Error finding container bf2a70bc32fa3d8cb2af67e293280d31cb4c15b66692aaa402ac1ac4207083ff: Status 404 returned error can't find the container with id bf2a70bc32fa3d8cb2af67e293280d31cb4c15b66692aaa402ac1ac4207083ff Jan 22 07:58:26 crc kubenswrapper[4933]: I0122 07:58:26.365208 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8mj5q/crc-debug-hb65h" event={"ID":"85e2bf0f-5d72-494c-a679-f42da8e067b9","Type":"ContainerStarted","Data":"bf2a70bc32fa3d8cb2af67e293280d31cb4c15b66692aaa402ac1ac4207083ff"} Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.420534 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_d1dd44ad-b83c-4130-8d7f-d42bca2a3113/alertmanager/0.log" Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.430405 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_d1dd44ad-b83c-4130-8d7f-d42bca2a3113/config-reloader/0.log" Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.442008 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_d1dd44ad-b83c-4130-8d7f-d42bca2a3113/init-config-reloader/0.log" Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.482619 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_efa9140a-0903-4fe7-8bc6-6f82d7e27e56/aodh-api/0.log" Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.600572 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_efa9140a-0903-4fe7-8bc6-6f82d7e27e56/aodh-evaluator/0.log" Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.606673 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_efa9140a-0903-4fe7-8bc6-6f82d7e27e56/aodh-notifier/0.log" Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.619447 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_efa9140a-0903-4fe7-8bc6-6f82d7e27e56/aodh-listener/0.log" Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.640001 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5cbf848d96-gz47x_505d78b7-992d-4d4c-891f-86a61305f83d/barbican-api-log/0.log" Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.647667 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5cbf848d96-gz47x_505d78b7-992d-4d4c-891f-86a61305f83d/barbican-api/0.log" Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.675714 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6486c94768-wxz8r_0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629/barbican-keystone-listener-log/0.log" Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.681954 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6486c94768-wxz8r_0fd2303a-3ca1-4c57-bfc3-ea8b63a8c629/barbican-keystone-listener/0.log" Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.716319 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-69cb547445-wv7p7_a650d0c9-5c44-41b3-b8f6-8b584d976e40/barbican-worker-log/0.log" Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.722592 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-69cb547445-wv7p7_a650d0c9-5c44-41b3-b8f6-8b584d976e40/barbican-worker/0.log" Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.763592 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-openstack-openstack-cell1-mwncg_62f31314-79cb-48c7-a585-9fc2096932e3/bootstrap-openstack-openstack-cell1/0.log" Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.796832 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_8c4ae160-7c59-4237-8476-9d3da141fa96/ceilometer-central-agent/0.log" Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.840546 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_8c4ae160-7c59-4237-8476-9d3da141fa96/ceilometer-notification-agent/0.log" Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.846847 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_8c4ae160-7c59-4237-8476-9d3da141fa96/sg-core/0.log" Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.880676 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_8c4ae160-7c59-4237-8476-9d3da141fa96/proxy-httpd/0.log" Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.899060 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca/cinder-api-log/0.log" Jan 22 07:58:28 crc kubenswrapper[4933]: I0122 07:58:28.955327 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_b3d8f772-24e9-4f04-ba84-6f8f1c36d9ca/cinder-api/0.log" Jan 22 07:58:29 crc kubenswrapper[4933]: I0122 07:58:29.016998 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_716d4067-62fa-47c4-9874-078e19672c93/cinder-scheduler/0.log" Jan 22 07:58:29 crc kubenswrapper[4933]: I0122 07:58:29.044058 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_716d4067-62fa-47c4-9874-078e19672c93/probe/0.log" Jan 22 07:58:29 crc kubenswrapper[4933]: I0122 07:58:29.068293 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-openstack-openstack-cell1-l7b2n_a0667ff9-d0c8-48ae-8389-74bc383cd66b/configure-network-openstack-openstack-cell1/0.log" Jan 22 07:58:29 crc kubenswrapper[4933]: I0122 07:58:29.091257 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-openstack-openstack-cell1-bkcjm_094d9352-b3a7-4f18-9802-646c4b82bf27/configure-os-openstack-openstack-cell1/0.log" Jan 22 07:58:29 crc kubenswrapper[4933]: I0122 07:58:29.111568 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-openstack-openstack-cell1-jrjd6_7ac0961f-f1e7-4eeb-905a-67edf5e3c46a/configure-os-openstack-openstack-cell1/0.log" Jan 22 07:58:29 crc kubenswrapper[4933]: I0122 07:58:29.132008 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-openstack-openstack-cell1-m56mj_dc0a7bd6-8a24-4e74-b1fd-64c8b78de6f8/configure-os-openstack-openstack-cell1/0.log" Jan 22 07:58:29 crc kubenswrapper[4933]: I0122 07:58:29.152366 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-openstack-openstack-cell1-z8t8s_e8b546e4-9a72-4e15-afbe-62d02ed4cc00/configure-os-openstack-openstack-cell1/0.log" Jan 22 07:58:29 crc kubenswrapper[4933]: I0122 07:58:29.169265 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-9c79574d7-xmjkq_84a13c36-50da-4671-ac3a-fc75c150adfc/dnsmasq-dns/0.log" Jan 22 07:58:29 crc kubenswrapper[4933]: I0122 07:58:29.180520 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-9c79574d7-xmjkq_84a13c36-50da-4671-ac3a-fc75c150adfc/init/0.log" Jan 22 07:58:29 crc kubenswrapper[4933]: I0122 07:58:29.203187 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-openstack-openstack-cell1-h4gwh_fba3a485-7601-4024-a522-48d26533750b/download-cache-openstack-openstack-cell1/0.log" Jan 22 07:58:29 crc kubenswrapper[4933]: I0122 07:58:29.222520 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_ef4fb634-4e08-44e1-9f52-c0ceeadefbd4/glance-log/0.log" Jan 22 07:58:29 crc kubenswrapper[4933]: I0122 07:58:29.245212 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_ef4fb634-4e08-44e1-9f52-c0ceeadefbd4/glance-httpd/0.log" Jan 22 07:58:29 crc kubenswrapper[4933]: I0122 07:58:29.261684 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_8526897f-7543-4b1c-979b-601c17f31f54/glance-log/0.log" Jan 22 07:58:29 crc kubenswrapper[4933]: I0122 07:58:29.278853 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_8526897f-7543-4b1c-979b-601c17f31f54/glance-httpd/0.log" Jan 22 07:58:29 crc kubenswrapper[4933]: I0122 07:58:29.486684 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-76899c7dc6-c4kht_0ae1c552-787d-4702-bbe7-2019f8f35738/heat-api/0.log" Jan 22 07:58:29 crc kubenswrapper[4933]: I0122 07:58:29.709020 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-5649c5cc46-vf4xq_8524beec-3d96-43ec-9a41-0ce2a961831a/heat-cfnapi/0.log" Jan 22 07:58:29 crc kubenswrapper[4933]: I0122 07:58:29.729747 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-577d6f656f-2c5lp_657490c7-ddc0-439c-bf19-ebffbfc535bb/heat-engine/0.log" Jan 22 07:58:30 crc kubenswrapper[4933]: I0122 07:58:30.006522 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-6d688745d-jhzmc_d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa/horizon-log/0.log" Jan 22 07:58:30 crc kubenswrapper[4933]: I0122 07:58:30.095578 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-6d688745d-jhzmc_d12f5902-fdce-431a-b6b2-4fc3eaa8ebaa/horizon/0.log" Jan 22 07:58:30 crc kubenswrapper[4933]: I0122 07:58:30.147645 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-openstack-openstack-cell1-6b67h_ea30a0b1-7265-4e11-b38d-36fa9c2e08e3/install-os-openstack-openstack-cell1/0.log" Jan 22 07:58:30 crc kubenswrapper[4933]: I0122 07:58:30.294298 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-7fdd56f9f4-vndmm_935cd692-2979-4e50-81e2-47c2af0738d1/keystone-api/0.log" Jan 22 07:58:30 crc kubenswrapper[4933]: I0122 07:58:30.301897 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_7328c52a-a9e0-4042-9f6c-007d4480f97f/kube-state-metrics/0.log" Jan 22 07:58:30 crc kubenswrapper[4933]: I0122 07:58:30.311722 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-copy-data_17c00104-0b53-4111-a850-25ee465eb8ad/adoption/0.log" Jan 22 07:58:36 crc kubenswrapper[4933]: I0122 07:58:36.049736 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_74eb606e-de69-4502-b07d-e11628d32afc/memcached/0.log" Jan 22 07:58:36 crc kubenswrapper[4933]: I0122 07:58:36.124984 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5dd49c8657-ww7nj_e5da1fca-9300-498a-834d-0a3eed388385/neutron-api/0.log" Jan 22 07:58:36 crc kubenswrapper[4933]: I0122 07:58:36.162030 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5dd49c8657-ww7nj_e5da1fca-9300-498a-834d-0a3eed388385/neutron-httpd/0.log" Jan 22 07:58:36 crc kubenswrapper[4933]: I0122 07:58:36.249222 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_e856da6e-d00f-4b01-af9f-257c680f9882/nova-api-log/0.log" Jan 22 07:58:36 crc kubenswrapper[4933]: I0122 07:58:36.504706 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_e856da6e-d00f-4b01-af9f-257c680f9882/nova-api-api/0.log" Jan 22 07:58:36 crc kubenswrapper[4933]: I0122 07:58:36.578689 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_db56d8cd-22b3-464a-a259-4f7e6c01c565/nova-cell0-conductor-conductor/0.log" Jan 22 07:58:36 crc kubenswrapper[4933]: I0122 07:58:36.654160 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_ee69d559-ca87-4c15-9311-94390d23e206/nova-cell1-conductor-conductor/0.log" Jan 22 07:58:36 crc kubenswrapper[4933]: I0122 07:58:36.733264 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_2411e726-603b-497a-966b-f0519bdef29a/nova-cell1-novncproxy-novncproxy/0.log" Jan 22 07:58:36 crc kubenswrapper[4933]: I0122 07:58:36.812470 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_49c83965-c822-4b11-8c4d-b2a19ec0fc03/nova-metadata-log/0.log" Jan 22 07:58:37 crc kubenswrapper[4933]: I0122 07:58:37.592782 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_49c83965-c822-4b11-8c4d-b2a19ec0fc03/nova-metadata-metadata/0.log" Jan 22 07:58:38 crc kubenswrapper[4933]: I0122 07:58:38.609310 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_524eab2d-c8d7-40dd-b27c-64b020aa9118/nova-scheduler-scheduler/0.log" Jan 22 07:58:38 crc kubenswrapper[4933]: I0122 07:58:38.795479 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-68d465b9c5-6lv2f_67ea81f4-bc8e-4883-9053-410468b0f4f6/octavia-api/0.log" Jan 22 07:58:38 crc kubenswrapper[4933]: I0122 07:58:38.973469 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-68d465b9c5-6lv2f_67ea81f4-bc8e-4883-9053-410468b0f4f6/octavia-api-provider-agent/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.006059 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-68d465b9c5-6lv2f_67ea81f4-bc8e-4883-9053-410468b0f4f6/init/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.050649 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-sd9kx_12794d9c-3ddc-4003-a7fa-d8aad66d74ac/octavia-healthmanager/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.135837 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-sd9kx_12794d9c-3ddc-4003-a7fa-d8aad66d74ac/init/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.186602 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-27ltq_2d46e8ea-1174-431e-8fa3-8ffc44be7919/octavia-housekeeping/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.198184 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-27ltq_2d46e8ea-1174-431e-8fa3-8ffc44be7919/init/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.211556 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-7b97d6bc64-wqqfs_b81027bd-04de-4c5c-a898-7c2d11d62abb/octavia-amphora-httpd/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.232596 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-7b97d6bc64-wqqfs_b81027bd-04de-4c5c-a898-7c2d11d62abb/init/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.378942 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-bpbrn_73086f28-1dbc-4a12-afa9-8440c178dae1/octavia-rsyslog/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.475424 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-bpbrn_73086f28-1dbc-4a12-afa9-8440c178dae1/init/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.623026 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-gkn68_09aef9f6-a8ee-4751-8a6b-64c323cb7bce/octavia-worker/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.632361 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-gkn68_09aef9f6-a8ee-4751-8a6b-64c323cb7bce/init/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.661788 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_cb145d5a-1e4b-43f9-8a29-009a5a89ea2f/galera/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.675838 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_cb145d5a-1e4b-43f9-8a29-009a5a89ea2f/mysql-bootstrap/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.698485 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_7d7f26cd-e0e4-414c-96a2-55ce3f6495af/galera/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.711309 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_7d7f26cd-e0e4-414c-96a2-55ce3f6495af/mysql-bootstrap/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.721524 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_2b45d5fe-96da-4cf8-bdc7-986fc63c2071/openstackclient/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.731659 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-5brn2_03290ff4-6e07-4c4d-8e4d-33a5e1977c4f/openstack-network-exporter/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.745257 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-ctqtw_71d2c844-a72f-49fa-97d9-1d9f236823c4/ovsdb-server/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.758375 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-ctqtw_71d2c844-a72f-49fa-97d9-1d9f236823c4/ovs-vswitchd/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.765001 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-ctqtw_71d2c844-a72f-49fa-97d9-1d9f236823c4/ovsdb-server-init/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.782482 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-qxtvn_308f49ae-ef97-4833-b9b5-e3cef66d305f/ovn-controller/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.794715 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-copy-data_6fabaea7-ea14-4e59-ba7e-c60429929e8c/adoption/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.807344 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_989ef748-4e1c-41fe-b299-90edf5c5b618/ovn-northd/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.817784 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_989ef748-4e1c-41fe-b299-90edf5c5b618/openstack-network-exporter/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.851053 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_1333917e-870b-4157-8a9b-8e799f5ce1e9/ovsdbserver-nb/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.857369 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_1333917e-870b-4157-8a9b-8e799f5ce1e9/openstack-network-exporter/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.885585 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_7d8d8036-af27-4990-972d-42a2137818fe/ovsdbserver-nb/0.log" Jan 22 07:58:39 crc kubenswrapper[4933]: I0122 07:58:39.892015 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_7d8d8036-af27-4990-972d-42a2137818fe/openstack-network-exporter/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.153920 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_66c1626a-9ba7-4e87-ba85-60e7b300579c/ovsdbserver-nb/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.160476 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_66c1626a-9ba7-4e87-ba85-60e7b300579c/openstack-network-exporter/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.299794 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_28e80b7a-c02f-42ef-ac07-ceaa85aa37ed/ovsdbserver-sb/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.304543 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_28e80b7a-c02f-42ef-ac07-ceaa85aa37ed/openstack-network-exporter/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.319993 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_4edbbe04-3891-42df-b567-72040af96322/ovsdbserver-sb/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.327184 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_4edbbe04-3891-42df-b567-72040af96322/openstack-network-exporter/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.350883 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_1e17d563-a1a2-4165-b91e-af8182d086bf/ovsdbserver-sb/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.358821 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_1e17d563-a1a2-4165-b91e-af8182d086bf/openstack-network-exporter/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.548936 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-56899cfc48-xqft6_54e27225-7863-4049-9383-3c57a391d7e0/placement-log/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.670489 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-56899cfc48-xqft6_54e27225-7863-4049-9383-3c57a391d7e0/placement-api/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.690579 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_pre-adoption-validation-openstack-pre-adoption-openstack-ctnkcf_2e0dfb8c-dfb3-487d-8391-78d4a3bee130/pre-adoption-validation-openstack-pre-adoption-openstack-cell1/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.709175 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_92749fdb-fe24-4dcd-ba3a-bf8a89509f23/prometheus/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.716952 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_92749fdb-fe24-4dcd-ba3a-bf8a89509f23/config-reloader/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.724089 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_92749fdb-fe24-4dcd-ba3a-bf8a89509f23/thanos-sidecar/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.791616 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_92749fdb-fe24-4dcd-ba3a-bf8a89509f23/init-config-reloader/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.823597 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_200c4924-621c-4dbc-8b31-626f46b61d15/rabbitmq/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.828367 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_200c4924-621c-4dbc-8b31-626f46b61d15/setup-container/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.917275 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_c6e1501e-9b96-439d-8f12-26df5455b9d1/rabbitmq/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.922724 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_c6e1501e-9b96-439d-8f12-26df5455b9d1/setup-container/0.log" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.942710 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.942754 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.942800 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.943643 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c681476a698bdfba0829493019ebf1b8e76064c7d6a7c94d045eb0a2b4992611"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 07:58:40 crc kubenswrapper[4933]: I0122 07:58:40.943697 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://c681476a698bdfba0829493019ebf1b8e76064c7d6a7c94d045eb0a2b4992611" gracePeriod=600 Jan 22 07:58:41 crc kubenswrapper[4933]: I0122 07:58:41.065532 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-65dd78db8b-67tnw_37f95090-a891-4563-adcf-7aa34d7ff34c/proxy-httpd/0.log" Jan 22 07:58:41 crc kubenswrapper[4933]: I0122 07:58:41.187759 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-65dd78db8b-67tnw_37f95090-a891-4563-adcf-7aa34d7ff34c/proxy-server/0.log" Jan 22 07:58:41 crc kubenswrapper[4933]: I0122 07:58:41.249110 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-gtw5n_a2c3e8e6-9165-4104-b7d2-3940c4801bb3/swift-ring-rebalance/0.log" Jan 22 07:58:41 crc kubenswrapper[4933]: I0122 07:58:41.433087 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tripleo-cleanup-tripleo-cleanup-openstack-cell1-mlrmq_c722274d-78ae-420e-8487-f52eac7984d7/tripleo-cleanup-tripleo-cleanup-openstack-cell1/0.log" Jan 22 07:58:41 crc kubenswrapper[4933]: I0122 07:58:41.492234 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-openstack-openstack-cell1-ht5r9_f691c83f-aa79-4aeb-b6e5-eb7c08edf9d4/validate-network-openstack-openstack-cell1/0.log" Jan 22 07:58:41 crc kubenswrapper[4933]: I0122 07:58:41.611149 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="c681476a698bdfba0829493019ebf1b8e76064c7d6a7c94d045eb0a2b4992611" exitCode=0 Jan 22 07:58:41 crc kubenswrapper[4933]: I0122 07:58:41.611190 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"c681476a698bdfba0829493019ebf1b8e76064c7d6a7c94d045eb0a2b4992611"} Jan 22 07:58:41 crc kubenswrapper[4933]: I0122 07:58:41.611220 4933 scope.go:117] "RemoveContainer" containerID="67ad87c27db02a9e178a1bcee7c5ada3568216417f71e7c98863e733198e7ab4" Jan 22 07:58:42 crc kubenswrapper[4933]: I0122 07:58:42.621950 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8mj5q/crc-debug-hb65h" event={"ID":"85e2bf0f-5d72-494c-a679-f42da8e067b9","Type":"ContainerStarted","Data":"4f82c1cf1dd84cd3e01694c88dcccc118b39f10a528b449e8d219d37ee1c95b3"} Jan 22 07:58:42 crc kubenswrapper[4933]: I0122 07:58:42.624624 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b"} Jan 22 07:58:42 crc kubenswrapper[4933]: I0122 07:58:42.658243 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-8mj5q/crc-debug-hb65h" podStartSLOduration=2.017681881 podStartE2EDuration="17.658203041s" podCreationTimestamp="2026-01-22 07:58:25 +0000 UTC" firstStartedPulling="2026-01-22 07:58:25.97785475 +0000 UTC m=+7953.814980103" lastFinishedPulling="2026-01-22 07:58:41.61837591 +0000 UTC m=+7969.455501263" observedRunningTime="2026-01-22 07:58:42.651723475 +0000 UTC m=+7970.488848858" watchObservedRunningTime="2026-01-22 07:58:42.658203041 +0000 UTC m=+7970.495328394" Jan 22 07:58:53 crc kubenswrapper[4933]: I0122 07:58:53.439538 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-2stg8_29f4dd7e-c2cf-4f64-87e8-2201fe99e751/controller/0.log" Jan 22 07:58:53 crc kubenswrapper[4933]: I0122 07:58:53.455420 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-2stg8_29f4dd7e-c2cf-4f64-87e8-2201fe99e751/kube-rbac-proxy/0.log" Jan 22 07:58:53 crc kubenswrapper[4933]: I0122 07:58:53.480756 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/controller/0.log" Jan 22 07:58:56 crc kubenswrapper[4933]: I0122 07:58:56.378998 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/frr/0.log" Jan 22 07:58:56 crc kubenswrapper[4933]: I0122 07:58:56.390764 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/reloader/0.log" Jan 22 07:58:56 crc kubenswrapper[4933]: I0122 07:58:56.395347 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/frr-metrics/0.log" Jan 22 07:58:56 crc kubenswrapper[4933]: I0122 07:58:56.406904 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/kube-rbac-proxy/0.log" Jan 22 07:58:56 crc kubenswrapper[4933]: I0122 07:58:56.419625 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/kube-rbac-proxy-frr/0.log" Jan 22 07:58:56 crc kubenswrapper[4933]: I0122 07:58:56.428042 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/cp-frr-files/0.log" Jan 22 07:58:56 crc kubenswrapper[4933]: I0122 07:58:56.446902 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/cp-reloader/0.log" Jan 22 07:58:56 crc kubenswrapper[4933]: I0122 07:58:56.457993 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/cp-metrics/0.log" Jan 22 07:58:56 crc kubenswrapper[4933]: I0122 07:58:56.484495 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-rjx6j_42d0dcd8-97a6-489b-9b19-43fd22936816/frr-k8s-webhook-server/0.log" Jan 22 07:58:56 crc kubenswrapper[4933]: I0122 07:58:56.528768 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6c8b57987c-qr4jc_9f8987dd-79ca-4569-8000-088df75be06e/manager/0.log" Jan 22 07:58:56 crc kubenswrapper[4933]: I0122 07:58:56.541965 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-688dc4b4d8-9z74g_fe7410b0-e355-498e-841e-89ae7f5e56de/webhook-server/0.log" Jan 22 07:58:57 crc kubenswrapper[4933]: I0122 07:58:57.813498 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-xlk7v_57ec97d6-d16e-4069-98cc-7dcf56910fad/speaker/0.log" Jan 22 07:58:57 crc kubenswrapper[4933]: I0122 07:58:57.820007 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-xlk7v_57ec97d6-d16e-4069-98cc-7dcf56910fad/kube-rbac-proxy/0.log" Jan 22 07:59:10 crc kubenswrapper[4933]: I0122 07:59:10.600943 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk_6a552e55-190e-4f4c-9234-ebd6d63ee4ad/extract/0.log" Jan 22 07:59:10 crc kubenswrapper[4933]: I0122 07:59:10.609987 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk_6a552e55-190e-4f4c-9234-ebd6d63ee4ad/util/0.log" Jan 22 07:59:10 crc kubenswrapper[4933]: I0122 07:59:10.622085 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk_6a552e55-190e-4f4c-9234-ebd6d63ee4ad/pull/0.log" Jan 22 07:59:10 crc kubenswrapper[4933]: I0122 07:59:10.746417 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-59dd8b7cbf-z6g5l_78b94bce-a7a5-471f-bab2-f57baeff12b6/manager/0.log" Jan 22 07:59:10 crc kubenswrapper[4933]: I0122 07:59:10.828882 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-69cf5d4557-6ktqg_53b0d937-20e6-4a4b-b61b-f172c672c43f/manager/0.log" Jan 22 07:59:10 crc kubenswrapper[4933]: I0122 07:59:10.840298 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-b45d7bf98-sb8mm_c2a2b482-2d1c-4c3b-b1dc-7d3f291b665e/manager/0.log" Jan 22 07:59:10 crc kubenswrapper[4933]: I0122 07:59:10.994995 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-78fdd796fd-x7t54_f4f610a0-0ede-4d86-b1d4-fcd4a70d9c1c/manager/0.log" Jan 22 07:59:11 crc kubenswrapper[4933]: I0122 07:59:11.033416 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-p6w9k_10fc162d-8e83-4741-88be-c1e8dd9f291a/manager/0.log" Jan 22 07:59:11 crc kubenswrapper[4933]: I0122 07:59:11.050364 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-ghk9g_45469fd5-7d9d-44f4-82a1-61d82f8e2dc8/manager/0.log" Jan 22 07:59:11 crc kubenswrapper[4933]: I0122 07:59:11.644895 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-54ccf4f85d-5vl5w_bee42408-9ab2-4e83-a06b-8cb123a853f9/manager/0.log" Jan 22 07:59:11 crc kubenswrapper[4933]: I0122 07:59:11.660650 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-69d6c9f5b8-dmwlv_ab3830be-ff45-443a-9089-29438fca5c75/manager/0.log" Jan 22 07:59:11 crc kubenswrapper[4933]: I0122 07:59:11.800058 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-b8b6d4659-pkpcl_d3f5d746-ae61-4d36-bcce-4530c7f7a899/manager/0.log" Jan 22 07:59:11 crc kubenswrapper[4933]: I0122 07:59:11.817107 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-78c6999f6f-dwrg6_34f0d75a-dc72-4dad-82a6-512c1351210b/manager/0.log" Jan 22 07:59:11 crc kubenswrapper[4933]: I0122 07:59:11.893296 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-dtg9q_e0335df2-7bd1-4b61-8057-7663a730d2ff/manager/0.log" Jan 22 07:59:11 crc kubenswrapper[4933]: I0122 07:59:11.989620 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5d8f59fb49-nkc7j_21941523-226c-4a4c-a099-51df0766a712/manager/0.log" Jan 22 07:59:12 crc kubenswrapper[4933]: I0122 07:59:12.169834 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-6b8bc8d87d-czc9v_300ef4d3-6c13-4a9d-96e6-a707abccca2c/manager/0.log" Jan 22 07:59:12 crc kubenswrapper[4933]: I0122 07:59:12.227162 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7bd9774b6-w25vn_04683325-6972-455c-9ca5-ddf1fd4b9862/manager/0.log" Jan 22 07:59:12 crc kubenswrapper[4933]: I0122 07:59:12.249170 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns_ae3ccc66-eed1-4750-8af1-7f99673b1323/manager/0.log" Jan 22 07:59:12 crc kubenswrapper[4933]: I0122 07:59:12.443913 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-5cd76577f9-bqlhh_df54862e-d3c4-4068-9560-93833cf75eae/operator/0.log" Jan 22 07:59:14 crc kubenswrapper[4933]: I0122 07:59:14.588097 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-647bb87bbd-xc9z6_07b078fc-4665-4e58-934d-f606471d5942/manager/0.log" Jan 22 07:59:14 crc kubenswrapper[4933]: I0122 07:59:14.750336 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-2cblb_283252a7-ff09-4856-9249-7c6cd70dff99/registry-server/0.log" Jan 22 07:59:14 crc kubenswrapper[4933]: I0122 07:59:14.847965 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-7q7hb_289a66a7-9513-4b66-990a-3d9f11919531/manager/0.log" Jan 22 07:59:14 crc kubenswrapper[4933]: I0122 07:59:14.880950 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5d646b7d76-m6g48_2892be27-6da5-4a19-a30e-36f5907f5d70/manager/0.log" Jan 22 07:59:14 crc kubenswrapper[4933]: I0122 07:59:14.903564 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-rdbn6_3028718b-d03f-414e-834d-93eb28eeb369/operator/0.log" Jan 22 07:59:14 crc kubenswrapper[4933]: I0122 07:59:14.938522 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-547cbdb99f-2wjhd_48e1a8f3-00fd-48a6-be02-7c61f0425809/manager/0.log" Jan 22 07:59:15 crc kubenswrapper[4933]: I0122 07:59:15.107453 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-85cd9769bb-556x5_f5292b84-8cb2-4f43-96f9-6304705b15bc/manager/0.log" Jan 22 07:59:15 crc kubenswrapper[4933]: I0122 07:59:15.118114 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-69797bbcbd-9rc7q_0a5c558d-f69d-4299-97e2-00326ec7e416/manager/0.log" Jan 22 07:59:15 crc kubenswrapper[4933]: I0122 07:59:15.132756 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-5ffb9c6597-gtcc5_b5af85fb-e3ef-41b7-8c6b-7afddd5200bd/manager/0.log" Jan 22 07:59:19 crc kubenswrapper[4933]: I0122 07:59:19.149199 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-kq95z_8796a7df-de04-490e-b5a9-c9fad5483d61/control-plane-machine-set-operator/0.log" Jan 22 07:59:19 crc kubenswrapper[4933]: I0122 07:59:19.168684 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-6r979_c800ab14-5d0a-4078-91f5-b47d05d15ccc/kube-rbac-proxy/0.log" Jan 22 07:59:19 crc kubenswrapper[4933]: I0122 07:59:19.180241 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-6r979_c800ab14-5d0a-4078-91f5-b47d05d15ccc/machine-api-operator/0.log" Jan 22 07:59:22 crc kubenswrapper[4933]: I0122 07:59:22.086263 4933 generic.go:334] "Generic (PLEG): container finished" podID="85e2bf0f-5d72-494c-a679-f42da8e067b9" containerID="4f82c1cf1dd84cd3e01694c88dcccc118b39f10a528b449e8d219d37ee1c95b3" exitCode=0 Jan 22 07:59:22 crc kubenswrapper[4933]: I0122 07:59:22.086359 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8mj5q/crc-debug-hb65h" event={"ID":"85e2bf0f-5d72-494c-a679-f42da8e067b9","Type":"ContainerDied","Data":"4f82c1cf1dd84cd3e01694c88dcccc118b39f10a528b449e8d219d37ee1c95b3"} Jan 22 07:59:23 crc kubenswrapper[4933]: I0122 07:59:23.214038 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8mj5q/crc-debug-hb65h" Jan 22 07:59:23 crc kubenswrapper[4933]: I0122 07:59:23.252670 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-8mj5q/crc-debug-hb65h"] Jan 22 07:59:23 crc kubenswrapper[4933]: I0122 07:59:23.263012 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-8mj5q/crc-debug-hb65h"] Jan 22 07:59:23 crc kubenswrapper[4933]: I0122 07:59:23.361467 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nnjvb\" (UniqueName: \"kubernetes.io/projected/85e2bf0f-5d72-494c-a679-f42da8e067b9-kube-api-access-nnjvb\") pod \"85e2bf0f-5d72-494c-a679-f42da8e067b9\" (UID: \"85e2bf0f-5d72-494c-a679-f42da8e067b9\") " Jan 22 07:59:23 crc kubenswrapper[4933]: I0122 07:59:23.361565 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/85e2bf0f-5d72-494c-a679-f42da8e067b9-host\") pod \"85e2bf0f-5d72-494c-a679-f42da8e067b9\" (UID: \"85e2bf0f-5d72-494c-a679-f42da8e067b9\") " Jan 22 07:59:23 crc kubenswrapper[4933]: I0122 07:59:23.361684 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/85e2bf0f-5d72-494c-a679-f42da8e067b9-host" (OuterVolumeSpecName: "host") pod "85e2bf0f-5d72-494c-a679-f42da8e067b9" (UID: "85e2bf0f-5d72-494c-a679-f42da8e067b9"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 07:59:23 crc kubenswrapper[4933]: I0122 07:59:23.362145 4933 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/85e2bf0f-5d72-494c-a679-f42da8e067b9-host\") on node \"crc\" DevicePath \"\"" Jan 22 07:59:23 crc kubenswrapper[4933]: I0122 07:59:23.369315 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85e2bf0f-5d72-494c-a679-f42da8e067b9-kube-api-access-nnjvb" (OuterVolumeSpecName: "kube-api-access-nnjvb") pod "85e2bf0f-5d72-494c-a679-f42da8e067b9" (UID: "85e2bf0f-5d72-494c-a679-f42da8e067b9"). InnerVolumeSpecName "kube-api-access-nnjvb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:59:23 crc kubenswrapper[4933]: I0122 07:59:23.464103 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nnjvb\" (UniqueName: \"kubernetes.io/projected/85e2bf0f-5d72-494c-a679-f42da8e067b9-kube-api-access-nnjvb\") on node \"crc\" DevicePath \"\"" Jan 22 07:59:24 crc kubenswrapper[4933]: I0122 07:59:24.108155 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf2a70bc32fa3d8cb2af67e293280d31cb4c15b66692aaa402ac1ac4207083ff" Jan 22 07:59:24 crc kubenswrapper[4933]: I0122 07:59:24.108216 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8mj5q/crc-debug-hb65h" Jan 22 07:59:24 crc kubenswrapper[4933]: I0122 07:59:24.414215 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-8mj5q/crc-debug-cf8pb"] Jan 22 07:59:24 crc kubenswrapper[4933]: E0122 07:59:24.414694 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85e2bf0f-5d72-494c-a679-f42da8e067b9" containerName="container-00" Jan 22 07:59:24 crc kubenswrapper[4933]: I0122 07:59:24.414708 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="85e2bf0f-5d72-494c-a679-f42da8e067b9" containerName="container-00" Jan 22 07:59:24 crc kubenswrapper[4933]: I0122 07:59:24.414925 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="85e2bf0f-5d72-494c-a679-f42da8e067b9" containerName="container-00" Jan 22 07:59:24 crc kubenswrapper[4933]: I0122 07:59:24.415725 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8mj5q/crc-debug-cf8pb" Jan 22 07:59:24 crc kubenswrapper[4933]: I0122 07:59:24.502206 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85e2bf0f-5d72-494c-a679-f42da8e067b9" path="/var/lib/kubelet/pods/85e2bf0f-5d72-494c-a679-f42da8e067b9/volumes" Jan 22 07:59:24 crc kubenswrapper[4933]: I0122 07:59:24.587415 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6e59068e-738d-4209-8f35-667a17d6e943-host\") pod \"crc-debug-cf8pb\" (UID: \"6e59068e-738d-4209-8f35-667a17d6e943\") " pod="openshift-must-gather-8mj5q/crc-debug-cf8pb" Jan 22 07:59:24 crc kubenswrapper[4933]: I0122 07:59:24.587692 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xr5rb\" (UniqueName: \"kubernetes.io/projected/6e59068e-738d-4209-8f35-667a17d6e943-kube-api-access-xr5rb\") pod \"crc-debug-cf8pb\" (UID: \"6e59068e-738d-4209-8f35-667a17d6e943\") " pod="openshift-must-gather-8mj5q/crc-debug-cf8pb" Jan 22 07:59:24 crc kubenswrapper[4933]: I0122 07:59:24.689910 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xr5rb\" (UniqueName: \"kubernetes.io/projected/6e59068e-738d-4209-8f35-667a17d6e943-kube-api-access-xr5rb\") pod \"crc-debug-cf8pb\" (UID: \"6e59068e-738d-4209-8f35-667a17d6e943\") " pod="openshift-must-gather-8mj5q/crc-debug-cf8pb" Jan 22 07:59:24 crc kubenswrapper[4933]: I0122 07:59:24.690438 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6e59068e-738d-4209-8f35-667a17d6e943-host\") pod \"crc-debug-cf8pb\" (UID: \"6e59068e-738d-4209-8f35-667a17d6e943\") " pod="openshift-must-gather-8mj5q/crc-debug-cf8pb" Jan 22 07:59:24 crc kubenswrapper[4933]: I0122 07:59:24.690558 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6e59068e-738d-4209-8f35-667a17d6e943-host\") pod \"crc-debug-cf8pb\" (UID: \"6e59068e-738d-4209-8f35-667a17d6e943\") " pod="openshift-must-gather-8mj5q/crc-debug-cf8pb" Jan 22 07:59:24 crc kubenswrapper[4933]: I0122 07:59:24.707762 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xr5rb\" (UniqueName: \"kubernetes.io/projected/6e59068e-738d-4209-8f35-667a17d6e943-kube-api-access-xr5rb\") pod \"crc-debug-cf8pb\" (UID: \"6e59068e-738d-4209-8f35-667a17d6e943\") " pod="openshift-must-gather-8mj5q/crc-debug-cf8pb" Jan 22 07:59:24 crc kubenswrapper[4933]: I0122 07:59:24.732179 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8mj5q/crc-debug-cf8pb" Jan 22 07:59:25 crc kubenswrapper[4933]: I0122 07:59:25.119271 4933 generic.go:334] "Generic (PLEG): container finished" podID="6e59068e-738d-4209-8f35-667a17d6e943" containerID="08cfb74ce83030b542966c1bc1912384b007a3297e9271c65505563f58419c7a" exitCode=0 Jan 22 07:59:25 crc kubenswrapper[4933]: I0122 07:59:25.119479 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8mj5q/crc-debug-cf8pb" event={"ID":"6e59068e-738d-4209-8f35-667a17d6e943","Type":"ContainerDied","Data":"08cfb74ce83030b542966c1bc1912384b007a3297e9271c65505563f58419c7a"} Jan 22 07:59:25 crc kubenswrapper[4933]: I0122 07:59:25.119755 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8mj5q/crc-debug-cf8pb" event={"ID":"6e59068e-738d-4209-8f35-667a17d6e943","Type":"ContainerStarted","Data":"75cbd44b192ac35f1bf491a55982c0276588f083b5ffca3507aa530ecd5db42b"} Jan 22 07:59:25 crc kubenswrapper[4933]: I0122 07:59:25.635774 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-8mj5q/crc-debug-cf8pb"] Jan 22 07:59:25 crc kubenswrapper[4933]: I0122 07:59:25.643995 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-8mj5q/crc-debug-cf8pb"] Jan 22 07:59:26 crc kubenswrapper[4933]: I0122 07:59:26.251724 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8mj5q/crc-debug-cf8pb" Jan 22 07:59:26 crc kubenswrapper[4933]: I0122 07:59:26.421642 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6e59068e-738d-4209-8f35-667a17d6e943-host\") pod \"6e59068e-738d-4209-8f35-667a17d6e943\" (UID: \"6e59068e-738d-4209-8f35-667a17d6e943\") " Jan 22 07:59:26 crc kubenswrapper[4933]: I0122 07:59:26.421697 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6e59068e-738d-4209-8f35-667a17d6e943-host" (OuterVolumeSpecName: "host") pod "6e59068e-738d-4209-8f35-667a17d6e943" (UID: "6e59068e-738d-4209-8f35-667a17d6e943"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 07:59:26 crc kubenswrapper[4933]: I0122 07:59:26.421972 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xr5rb\" (UniqueName: \"kubernetes.io/projected/6e59068e-738d-4209-8f35-667a17d6e943-kube-api-access-xr5rb\") pod \"6e59068e-738d-4209-8f35-667a17d6e943\" (UID: \"6e59068e-738d-4209-8f35-667a17d6e943\") " Jan 22 07:59:26 crc kubenswrapper[4933]: I0122 07:59:26.422441 4933 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6e59068e-738d-4209-8f35-667a17d6e943-host\") on node \"crc\" DevicePath \"\"" Jan 22 07:59:26 crc kubenswrapper[4933]: I0122 07:59:26.441324 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e59068e-738d-4209-8f35-667a17d6e943-kube-api-access-xr5rb" (OuterVolumeSpecName: "kube-api-access-xr5rb") pod "6e59068e-738d-4209-8f35-667a17d6e943" (UID: "6e59068e-738d-4209-8f35-667a17d6e943"). InnerVolumeSpecName "kube-api-access-xr5rb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:59:26 crc kubenswrapper[4933]: I0122 07:59:26.502743 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e59068e-738d-4209-8f35-667a17d6e943" path="/var/lib/kubelet/pods/6e59068e-738d-4209-8f35-667a17d6e943/volumes" Jan 22 07:59:26 crc kubenswrapper[4933]: I0122 07:59:26.524106 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xr5rb\" (UniqueName: \"kubernetes.io/projected/6e59068e-738d-4209-8f35-667a17d6e943-kube-api-access-xr5rb\") on node \"crc\" DevicePath \"\"" Jan 22 07:59:26 crc kubenswrapper[4933]: I0122 07:59:26.802652 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-8mj5q/crc-debug-c8h87"] Jan 22 07:59:26 crc kubenswrapper[4933]: E0122 07:59:26.803372 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e59068e-738d-4209-8f35-667a17d6e943" containerName="container-00" Jan 22 07:59:26 crc kubenswrapper[4933]: I0122 07:59:26.803396 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e59068e-738d-4209-8f35-667a17d6e943" containerName="container-00" Jan 22 07:59:26 crc kubenswrapper[4933]: I0122 07:59:26.803720 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e59068e-738d-4209-8f35-667a17d6e943" containerName="container-00" Jan 22 07:59:26 crc kubenswrapper[4933]: I0122 07:59:26.804769 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8mj5q/crc-debug-c8h87" Jan 22 07:59:26 crc kubenswrapper[4933]: I0122 07:59:26.932683 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7738d7f-9c01-4e7f-839a-79d0a0c40b24-host\") pod \"crc-debug-c8h87\" (UID: \"c7738d7f-9c01-4e7f-839a-79d0a0c40b24\") " pod="openshift-must-gather-8mj5q/crc-debug-c8h87" Jan 22 07:59:26 crc kubenswrapper[4933]: I0122 07:59:26.932982 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4r4z4\" (UniqueName: \"kubernetes.io/projected/c7738d7f-9c01-4e7f-839a-79d0a0c40b24-kube-api-access-4r4z4\") pod \"crc-debug-c8h87\" (UID: \"c7738d7f-9c01-4e7f-839a-79d0a0c40b24\") " pod="openshift-must-gather-8mj5q/crc-debug-c8h87" Jan 22 07:59:27 crc kubenswrapper[4933]: I0122 07:59:27.035399 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7738d7f-9c01-4e7f-839a-79d0a0c40b24-host\") pod \"crc-debug-c8h87\" (UID: \"c7738d7f-9c01-4e7f-839a-79d0a0c40b24\") " pod="openshift-must-gather-8mj5q/crc-debug-c8h87" Jan 22 07:59:27 crc kubenswrapper[4933]: I0122 07:59:27.035765 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4r4z4\" (UniqueName: \"kubernetes.io/projected/c7738d7f-9c01-4e7f-839a-79d0a0c40b24-kube-api-access-4r4z4\") pod \"crc-debug-c8h87\" (UID: \"c7738d7f-9c01-4e7f-839a-79d0a0c40b24\") " pod="openshift-must-gather-8mj5q/crc-debug-c8h87" Jan 22 07:59:27 crc kubenswrapper[4933]: I0122 07:59:27.035614 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7738d7f-9c01-4e7f-839a-79d0a0c40b24-host\") pod \"crc-debug-c8h87\" (UID: \"c7738d7f-9c01-4e7f-839a-79d0a0c40b24\") " pod="openshift-must-gather-8mj5q/crc-debug-c8h87" Jan 22 07:59:27 crc kubenswrapper[4933]: I0122 07:59:27.055705 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4r4z4\" (UniqueName: \"kubernetes.io/projected/c7738d7f-9c01-4e7f-839a-79d0a0c40b24-kube-api-access-4r4z4\") pod \"crc-debug-c8h87\" (UID: \"c7738d7f-9c01-4e7f-839a-79d0a0c40b24\") " pod="openshift-must-gather-8mj5q/crc-debug-c8h87" Jan 22 07:59:27 crc kubenswrapper[4933]: I0122 07:59:27.121804 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8mj5q/crc-debug-c8h87" Jan 22 07:59:27 crc kubenswrapper[4933]: I0122 07:59:27.139055 4933 scope.go:117] "RemoveContainer" containerID="08cfb74ce83030b542966c1bc1912384b007a3297e9271c65505563f58419c7a" Jan 22 07:59:27 crc kubenswrapper[4933]: I0122 07:59:27.139202 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8mj5q/crc-debug-cf8pb" Jan 22 07:59:28 crc kubenswrapper[4933]: I0122 07:59:28.153442 4933 generic.go:334] "Generic (PLEG): container finished" podID="c7738d7f-9c01-4e7f-839a-79d0a0c40b24" containerID="b9258a1f5f5911e4a2f9c9d090828a871a5ea9d480563bf2775f4871e90b3f61" exitCode=0 Jan 22 07:59:28 crc kubenswrapper[4933]: I0122 07:59:28.153591 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8mj5q/crc-debug-c8h87" event={"ID":"c7738d7f-9c01-4e7f-839a-79d0a0c40b24","Type":"ContainerDied","Data":"b9258a1f5f5911e4a2f9c9d090828a871a5ea9d480563bf2775f4871e90b3f61"} Jan 22 07:59:28 crc kubenswrapper[4933]: I0122 07:59:28.153947 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-8mj5q/crc-debug-c8h87" event={"ID":"c7738d7f-9c01-4e7f-839a-79d0a0c40b24","Type":"ContainerStarted","Data":"168818e9bfd455d700d47b7951c3c8509d2ba8aeb86ba18589918c1e058c4222"} Jan 22 07:59:28 crc kubenswrapper[4933]: I0122 07:59:28.195052 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-8mj5q/crc-debug-c8h87"] Jan 22 07:59:28 crc kubenswrapper[4933]: I0122 07:59:28.203935 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-8mj5q/crc-debug-c8h87"] Jan 22 07:59:29 crc kubenswrapper[4933]: I0122 07:59:29.273157 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8mj5q/crc-debug-c8h87" Jan 22 07:59:29 crc kubenswrapper[4933]: I0122 07:59:29.395969 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4r4z4\" (UniqueName: \"kubernetes.io/projected/c7738d7f-9c01-4e7f-839a-79d0a0c40b24-kube-api-access-4r4z4\") pod \"c7738d7f-9c01-4e7f-839a-79d0a0c40b24\" (UID: \"c7738d7f-9c01-4e7f-839a-79d0a0c40b24\") " Jan 22 07:59:29 crc kubenswrapper[4933]: I0122 07:59:29.396144 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7738d7f-9c01-4e7f-839a-79d0a0c40b24-host\") pod \"c7738d7f-9c01-4e7f-839a-79d0a0c40b24\" (UID: \"c7738d7f-9c01-4e7f-839a-79d0a0c40b24\") " Jan 22 07:59:29 crc kubenswrapper[4933]: I0122 07:59:29.396189 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c7738d7f-9c01-4e7f-839a-79d0a0c40b24-host" (OuterVolumeSpecName: "host") pod "c7738d7f-9c01-4e7f-839a-79d0a0c40b24" (UID: "c7738d7f-9c01-4e7f-839a-79d0a0c40b24"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 07:59:29 crc kubenswrapper[4933]: I0122 07:59:29.396704 4933 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7738d7f-9c01-4e7f-839a-79d0a0c40b24-host\") on node \"crc\" DevicePath \"\"" Jan 22 07:59:29 crc kubenswrapper[4933]: I0122 07:59:29.401349 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7738d7f-9c01-4e7f-839a-79d0a0c40b24-kube-api-access-4r4z4" (OuterVolumeSpecName: "kube-api-access-4r4z4") pod "c7738d7f-9c01-4e7f-839a-79d0a0c40b24" (UID: "c7738d7f-9c01-4e7f-839a-79d0a0c40b24"). InnerVolumeSpecName "kube-api-access-4r4z4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:59:29 crc kubenswrapper[4933]: I0122 07:59:29.498676 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4r4z4\" (UniqueName: \"kubernetes.io/projected/c7738d7f-9c01-4e7f-839a-79d0a0c40b24-kube-api-access-4r4z4\") on node \"crc\" DevicePath \"\"" Jan 22 07:59:30 crc kubenswrapper[4933]: I0122 07:59:30.173964 4933 scope.go:117] "RemoveContainer" containerID="b9258a1f5f5911e4a2f9c9d090828a871a5ea9d480563bf2775f4871e90b3f61" Jan 22 07:59:30 crc kubenswrapper[4933]: I0122 07:59:30.174359 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-8mj5q/crc-debug-c8h87" Jan 22 07:59:30 crc kubenswrapper[4933]: I0122 07:59:30.430431 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-cwmfv"] Jan 22 07:59:30 crc kubenswrapper[4933]: E0122 07:59:30.432609 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7738d7f-9c01-4e7f-839a-79d0a0c40b24" containerName="container-00" Jan 22 07:59:30 crc kubenswrapper[4933]: I0122 07:59:30.432636 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7738d7f-9c01-4e7f-839a-79d0a0c40b24" containerName="container-00" Jan 22 07:59:30 crc kubenswrapper[4933]: I0122 07:59:30.433026 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7738d7f-9c01-4e7f-839a-79d0a0c40b24" containerName="container-00" Jan 22 07:59:30 crc kubenswrapper[4933]: I0122 07:59:30.436128 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cwmfv" Jan 22 07:59:30 crc kubenswrapper[4933]: I0122 07:59:30.466786 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cwmfv"] Jan 22 07:59:30 crc kubenswrapper[4933]: I0122 07:59:30.502853 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7738d7f-9c01-4e7f-839a-79d0a0c40b24" path="/var/lib/kubelet/pods/c7738d7f-9c01-4e7f-839a-79d0a0c40b24/volumes" Jan 22 07:59:30 crc kubenswrapper[4933]: I0122 07:59:30.623135 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101-utilities\") pod \"certified-operators-cwmfv\" (UID: \"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101\") " pod="openshift-marketplace/certified-operators-cwmfv" Jan 22 07:59:30 crc kubenswrapper[4933]: I0122 07:59:30.623316 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101-catalog-content\") pod \"certified-operators-cwmfv\" (UID: \"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101\") " pod="openshift-marketplace/certified-operators-cwmfv" Jan 22 07:59:30 crc kubenswrapper[4933]: I0122 07:59:30.623935 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7wgf\" (UniqueName: \"kubernetes.io/projected/cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101-kube-api-access-h7wgf\") pod \"certified-operators-cwmfv\" (UID: \"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101\") " pod="openshift-marketplace/certified-operators-cwmfv" Jan 22 07:59:30 crc kubenswrapper[4933]: I0122 07:59:30.726156 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101-catalog-content\") pod \"certified-operators-cwmfv\" (UID: \"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101\") " pod="openshift-marketplace/certified-operators-cwmfv" Jan 22 07:59:30 crc kubenswrapper[4933]: I0122 07:59:30.726576 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7wgf\" (UniqueName: \"kubernetes.io/projected/cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101-kube-api-access-h7wgf\") pod \"certified-operators-cwmfv\" (UID: \"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101\") " pod="openshift-marketplace/certified-operators-cwmfv" Jan 22 07:59:30 crc kubenswrapper[4933]: I0122 07:59:30.726853 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101-catalog-content\") pod \"certified-operators-cwmfv\" (UID: \"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101\") " pod="openshift-marketplace/certified-operators-cwmfv" Jan 22 07:59:30 crc kubenswrapper[4933]: I0122 07:59:30.727060 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101-utilities\") pod \"certified-operators-cwmfv\" (UID: \"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101\") " pod="openshift-marketplace/certified-operators-cwmfv" Jan 22 07:59:30 crc kubenswrapper[4933]: I0122 07:59:30.727468 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101-utilities\") pod \"certified-operators-cwmfv\" (UID: \"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101\") " pod="openshift-marketplace/certified-operators-cwmfv" Jan 22 07:59:30 crc kubenswrapper[4933]: I0122 07:59:30.749846 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7wgf\" (UniqueName: \"kubernetes.io/projected/cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101-kube-api-access-h7wgf\") pod \"certified-operators-cwmfv\" (UID: \"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101\") " pod="openshift-marketplace/certified-operators-cwmfv" Jan 22 07:59:30 crc kubenswrapper[4933]: I0122 07:59:30.762043 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cwmfv" Jan 22 07:59:31 crc kubenswrapper[4933]: I0122 07:59:31.280328 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cwmfv"] Jan 22 07:59:32 crc kubenswrapper[4933]: I0122 07:59:32.203785 4933 generic.go:334] "Generic (PLEG): container finished" podID="cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101" containerID="63b350086cd6cf01279c35cb99c52b6bbf341dd95152e56b5348fbfc424fc22d" exitCode=0 Jan 22 07:59:32 crc kubenswrapper[4933]: I0122 07:59:32.204388 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cwmfv" event={"ID":"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101","Type":"ContainerDied","Data":"63b350086cd6cf01279c35cb99c52b6bbf341dd95152e56b5348fbfc424fc22d"} Jan 22 07:59:32 crc kubenswrapper[4933]: I0122 07:59:32.204591 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cwmfv" event={"ID":"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101","Type":"ContainerStarted","Data":"d8807fa894e85cebe960a878fbf6afd9245ef5710ff54a977d1c26904ed93f08"} Jan 22 07:59:33 crc kubenswrapper[4933]: I0122 07:59:33.214346 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cwmfv" event={"ID":"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101","Type":"ContainerStarted","Data":"51a2afc8f6eb23476f846ee0d058e6fa44e23f8071be79d3e07112fb14108d86"} Jan 22 07:59:34 crc kubenswrapper[4933]: I0122 07:59:34.225238 4933 generic.go:334] "Generic (PLEG): container finished" podID="cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101" containerID="51a2afc8f6eb23476f846ee0d058e6fa44e23f8071be79d3e07112fb14108d86" exitCode=0 Jan 22 07:59:34 crc kubenswrapper[4933]: I0122 07:59:34.225372 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cwmfv" event={"ID":"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101","Type":"ContainerDied","Data":"51a2afc8f6eb23476f846ee0d058e6fa44e23f8071be79d3e07112fb14108d86"} Jan 22 07:59:34 crc kubenswrapper[4933]: I0122 07:59:34.410881 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lsdxv"] Jan 22 07:59:34 crc kubenswrapper[4933]: I0122 07:59:34.414391 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lsdxv" Jan 22 07:59:34 crc kubenswrapper[4933]: I0122 07:59:34.435706 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lsdxv"] Jan 22 07:59:34 crc kubenswrapper[4933]: I0122 07:59:34.438271 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/699435fa-7d6e-4ba1-96f2-46403415349e-utilities\") pod \"redhat-marketplace-lsdxv\" (UID: \"699435fa-7d6e-4ba1-96f2-46403415349e\") " pod="openshift-marketplace/redhat-marketplace-lsdxv" Jan 22 07:59:34 crc kubenswrapper[4933]: I0122 07:59:34.438350 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/699435fa-7d6e-4ba1-96f2-46403415349e-catalog-content\") pod \"redhat-marketplace-lsdxv\" (UID: \"699435fa-7d6e-4ba1-96f2-46403415349e\") " pod="openshift-marketplace/redhat-marketplace-lsdxv" Jan 22 07:59:34 crc kubenswrapper[4933]: I0122 07:59:34.438558 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sq2rp\" (UniqueName: \"kubernetes.io/projected/699435fa-7d6e-4ba1-96f2-46403415349e-kube-api-access-sq2rp\") pod \"redhat-marketplace-lsdxv\" (UID: \"699435fa-7d6e-4ba1-96f2-46403415349e\") " pod="openshift-marketplace/redhat-marketplace-lsdxv" Jan 22 07:59:34 crc kubenswrapper[4933]: I0122 07:59:34.541195 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/699435fa-7d6e-4ba1-96f2-46403415349e-utilities\") pod \"redhat-marketplace-lsdxv\" (UID: \"699435fa-7d6e-4ba1-96f2-46403415349e\") " pod="openshift-marketplace/redhat-marketplace-lsdxv" Jan 22 07:59:34 crc kubenswrapper[4933]: I0122 07:59:34.541555 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/699435fa-7d6e-4ba1-96f2-46403415349e-catalog-content\") pod \"redhat-marketplace-lsdxv\" (UID: \"699435fa-7d6e-4ba1-96f2-46403415349e\") " pod="openshift-marketplace/redhat-marketplace-lsdxv" Jan 22 07:59:34 crc kubenswrapper[4933]: I0122 07:59:34.541676 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/699435fa-7d6e-4ba1-96f2-46403415349e-utilities\") pod \"redhat-marketplace-lsdxv\" (UID: \"699435fa-7d6e-4ba1-96f2-46403415349e\") " pod="openshift-marketplace/redhat-marketplace-lsdxv" Jan 22 07:59:34 crc kubenswrapper[4933]: I0122 07:59:34.542043 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/699435fa-7d6e-4ba1-96f2-46403415349e-catalog-content\") pod \"redhat-marketplace-lsdxv\" (UID: \"699435fa-7d6e-4ba1-96f2-46403415349e\") " pod="openshift-marketplace/redhat-marketplace-lsdxv" Jan 22 07:59:34 crc kubenswrapper[4933]: I0122 07:59:34.542148 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sq2rp\" (UniqueName: \"kubernetes.io/projected/699435fa-7d6e-4ba1-96f2-46403415349e-kube-api-access-sq2rp\") pod \"redhat-marketplace-lsdxv\" (UID: \"699435fa-7d6e-4ba1-96f2-46403415349e\") " pod="openshift-marketplace/redhat-marketplace-lsdxv" Jan 22 07:59:34 crc kubenswrapper[4933]: I0122 07:59:34.560985 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sq2rp\" (UniqueName: \"kubernetes.io/projected/699435fa-7d6e-4ba1-96f2-46403415349e-kube-api-access-sq2rp\") pod \"redhat-marketplace-lsdxv\" (UID: \"699435fa-7d6e-4ba1-96f2-46403415349e\") " pod="openshift-marketplace/redhat-marketplace-lsdxv" Jan 22 07:59:34 crc kubenswrapper[4933]: I0122 07:59:34.749306 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lsdxv" Jan 22 07:59:35 crc kubenswrapper[4933]: I0122 07:59:35.224017 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lsdxv"] Jan 22 07:59:35 crc kubenswrapper[4933]: I0122 07:59:35.236654 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cwmfv" event={"ID":"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101","Type":"ContainerStarted","Data":"9a3f173d24fbbb6e9c78aa181de9eb003826fc0d8cc92c810bf60f5de37fef48"} Jan 22 07:59:35 crc kubenswrapper[4933]: I0122 07:59:35.261808 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-cwmfv" podStartSLOduration=2.821130279 podStartE2EDuration="5.26178821s" podCreationTimestamp="2026-01-22 07:59:30 +0000 UTC" firstStartedPulling="2026-01-22 07:59:32.206319481 +0000 UTC m=+8020.043444834" lastFinishedPulling="2026-01-22 07:59:34.646977412 +0000 UTC m=+8022.484102765" observedRunningTime="2026-01-22 07:59:35.253448037 +0000 UTC m=+8023.090573431" watchObservedRunningTime="2026-01-22 07:59:35.26178821 +0000 UTC m=+8023.098913553" Jan 22 07:59:36 crc kubenswrapper[4933]: I0122 07:59:36.250841 4933 generic.go:334] "Generic (PLEG): container finished" podID="699435fa-7d6e-4ba1-96f2-46403415349e" containerID="06305ea82b4be2d722d0df3f2dbf73f04d67999d8750dbdcc7c197054e6abafd" exitCode=0 Jan 22 07:59:36 crc kubenswrapper[4933]: I0122 07:59:36.251005 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lsdxv" event={"ID":"699435fa-7d6e-4ba1-96f2-46403415349e","Type":"ContainerDied","Data":"06305ea82b4be2d722d0df3f2dbf73f04d67999d8750dbdcc7c197054e6abafd"} Jan 22 07:59:36 crc kubenswrapper[4933]: I0122 07:59:36.251039 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lsdxv" event={"ID":"699435fa-7d6e-4ba1-96f2-46403415349e","Type":"ContainerStarted","Data":"291d2fc5e19e1740e212b46599f4acbb497630c352610163b64580ca1ed064b9"} Jan 22 07:59:37 crc kubenswrapper[4933]: I0122 07:59:37.261014 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lsdxv" event={"ID":"699435fa-7d6e-4ba1-96f2-46403415349e","Type":"ContainerStarted","Data":"ab455306d4c3abc42794d67988cf341973ab0fbb27c9d306edd379b31dd98656"} Jan 22 07:59:38 crc kubenswrapper[4933]: I0122 07:59:38.274898 4933 generic.go:334] "Generic (PLEG): container finished" podID="699435fa-7d6e-4ba1-96f2-46403415349e" containerID="ab455306d4c3abc42794d67988cf341973ab0fbb27c9d306edd379b31dd98656" exitCode=0 Jan 22 07:59:38 crc kubenswrapper[4933]: I0122 07:59:38.274998 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lsdxv" event={"ID":"699435fa-7d6e-4ba1-96f2-46403415349e","Type":"ContainerDied","Data":"ab455306d4c3abc42794d67988cf341973ab0fbb27c9d306edd379b31dd98656"} Jan 22 07:59:39 crc kubenswrapper[4933]: I0122 07:59:39.291217 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lsdxv" event={"ID":"699435fa-7d6e-4ba1-96f2-46403415349e","Type":"ContainerStarted","Data":"888b4c07e150de999ec96adb47acf7859bf7a97cb6da29a896721a302c76a494"} Jan 22 07:59:39 crc kubenswrapper[4933]: I0122 07:59:39.315465 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lsdxv" podStartSLOduration=2.85344046 podStartE2EDuration="5.315442931s" podCreationTimestamp="2026-01-22 07:59:34 +0000 UTC" firstStartedPulling="2026-01-22 07:59:36.254372184 +0000 UTC m=+8024.091497547" lastFinishedPulling="2026-01-22 07:59:38.716374665 +0000 UTC m=+8026.553500018" observedRunningTime="2026-01-22 07:59:39.308555673 +0000 UTC m=+8027.145681106" watchObservedRunningTime="2026-01-22 07:59:39.315442931 +0000 UTC m=+8027.152568284" Jan 22 07:59:40 crc kubenswrapper[4933]: I0122 07:59:40.762631 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-cwmfv" Jan 22 07:59:40 crc kubenswrapper[4933]: I0122 07:59:40.763053 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-cwmfv" Jan 22 07:59:40 crc kubenswrapper[4933]: I0122 07:59:40.814257 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-cwmfv" Jan 22 07:59:41 crc kubenswrapper[4933]: I0122 07:59:41.382755 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-cwmfv" Jan 22 07:59:41 crc kubenswrapper[4933]: I0122 07:59:41.992237 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cwmfv"] Jan 22 07:59:43 crc kubenswrapper[4933]: I0122 07:59:43.336068 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-cwmfv" podUID="cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101" containerName="registry-server" containerID="cri-o://9a3f173d24fbbb6e9c78aa181de9eb003826fc0d8cc92c810bf60f5de37fef48" gracePeriod=2 Jan 22 07:59:44 crc kubenswrapper[4933]: I0122 07:59:44.347957 4933 generic.go:334] "Generic (PLEG): container finished" podID="cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101" containerID="9a3f173d24fbbb6e9c78aa181de9eb003826fc0d8cc92c810bf60f5de37fef48" exitCode=0 Jan 22 07:59:44 crc kubenswrapper[4933]: I0122 07:59:44.348021 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cwmfv" event={"ID":"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101","Type":"ContainerDied","Data":"9a3f173d24fbbb6e9c78aa181de9eb003826fc0d8cc92c810bf60f5de37fef48"} Jan 22 07:59:44 crc kubenswrapper[4933]: I0122 07:59:44.348789 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cwmfv" event={"ID":"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101","Type":"ContainerDied","Data":"d8807fa894e85cebe960a878fbf6afd9245ef5710ff54a977d1c26904ed93f08"} Jan 22 07:59:44 crc kubenswrapper[4933]: I0122 07:59:44.348814 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d8807fa894e85cebe960a878fbf6afd9245ef5710ff54a977d1c26904ed93f08" Jan 22 07:59:44 crc kubenswrapper[4933]: I0122 07:59:44.349687 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cwmfv" Jan 22 07:59:44 crc kubenswrapper[4933]: I0122 07:59:44.473141 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101-catalog-content\") pod \"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101\" (UID: \"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101\") " Jan 22 07:59:44 crc kubenswrapper[4933]: I0122 07:59:44.473287 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7wgf\" (UniqueName: \"kubernetes.io/projected/cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101-kube-api-access-h7wgf\") pod \"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101\" (UID: \"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101\") " Jan 22 07:59:44 crc kubenswrapper[4933]: I0122 07:59:44.473353 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101-utilities\") pod \"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101\" (UID: \"cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101\") " Jan 22 07:59:44 crc kubenswrapper[4933]: I0122 07:59:44.474427 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101-utilities" (OuterVolumeSpecName: "utilities") pod "cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101" (UID: "cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:59:44 crc kubenswrapper[4933]: I0122 07:59:44.483848 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101-kube-api-access-h7wgf" (OuterVolumeSpecName: "kube-api-access-h7wgf") pod "cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101" (UID: "cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101"). InnerVolumeSpecName "kube-api-access-h7wgf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:59:44 crc kubenswrapper[4933]: I0122 07:59:44.576285 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7wgf\" (UniqueName: \"kubernetes.io/projected/cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101-kube-api-access-h7wgf\") on node \"crc\" DevicePath \"\"" Jan 22 07:59:44 crc kubenswrapper[4933]: I0122 07:59:44.576333 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:59:44 crc kubenswrapper[4933]: I0122 07:59:44.583995 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101" (UID: "cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:59:44 crc kubenswrapper[4933]: I0122 07:59:44.678522 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:59:44 crc kubenswrapper[4933]: I0122 07:59:44.750911 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lsdxv" Jan 22 07:59:44 crc kubenswrapper[4933]: I0122 07:59:44.750974 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lsdxv" Jan 22 07:59:44 crc kubenswrapper[4933]: I0122 07:59:44.811631 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lsdxv" Jan 22 07:59:45 crc kubenswrapper[4933]: I0122 07:59:45.362810 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cwmfv" Jan 22 07:59:45 crc kubenswrapper[4933]: I0122 07:59:45.405191 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cwmfv"] Jan 22 07:59:45 crc kubenswrapper[4933]: I0122 07:59:45.415924 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-cwmfv"] Jan 22 07:59:45 crc kubenswrapper[4933]: I0122 07:59:45.447611 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lsdxv" Jan 22 07:59:46 crc kubenswrapper[4933]: I0122 07:59:46.507546 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101" path="/var/lib/kubelet/pods/cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101/volumes" Jan 22 07:59:47 crc kubenswrapper[4933]: I0122 07:59:47.193756 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lsdxv"] Jan 22 07:59:47 crc kubenswrapper[4933]: I0122 07:59:47.387912 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lsdxv" podUID="699435fa-7d6e-4ba1-96f2-46403415349e" containerName="registry-server" containerID="cri-o://888b4c07e150de999ec96adb47acf7859bf7a97cb6da29a896721a302c76a494" gracePeriod=2 Jan 22 07:59:47 crc kubenswrapper[4933]: I0122 07:59:47.879001 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lsdxv" Jan 22 07:59:47 crc kubenswrapper[4933]: I0122 07:59:47.952191 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sq2rp\" (UniqueName: \"kubernetes.io/projected/699435fa-7d6e-4ba1-96f2-46403415349e-kube-api-access-sq2rp\") pod \"699435fa-7d6e-4ba1-96f2-46403415349e\" (UID: \"699435fa-7d6e-4ba1-96f2-46403415349e\") " Jan 22 07:59:47 crc kubenswrapper[4933]: I0122 07:59:47.952399 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/699435fa-7d6e-4ba1-96f2-46403415349e-catalog-content\") pod \"699435fa-7d6e-4ba1-96f2-46403415349e\" (UID: \"699435fa-7d6e-4ba1-96f2-46403415349e\") " Jan 22 07:59:47 crc kubenswrapper[4933]: I0122 07:59:47.952554 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/699435fa-7d6e-4ba1-96f2-46403415349e-utilities\") pod \"699435fa-7d6e-4ba1-96f2-46403415349e\" (UID: \"699435fa-7d6e-4ba1-96f2-46403415349e\") " Jan 22 07:59:47 crc kubenswrapper[4933]: I0122 07:59:47.953665 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/699435fa-7d6e-4ba1-96f2-46403415349e-utilities" (OuterVolumeSpecName: "utilities") pod "699435fa-7d6e-4ba1-96f2-46403415349e" (UID: "699435fa-7d6e-4ba1-96f2-46403415349e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:59:47 crc kubenswrapper[4933]: I0122 07:59:47.959372 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/699435fa-7d6e-4ba1-96f2-46403415349e-kube-api-access-sq2rp" (OuterVolumeSpecName: "kube-api-access-sq2rp") pod "699435fa-7d6e-4ba1-96f2-46403415349e" (UID: "699435fa-7d6e-4ba1-96f2-46403415349e"). InnerVolumeSpecName "kube-api-access-sq2rp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:59:47 crc kubenswrapper[4933]: I0122 07:59:47.991965 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/699435fa-7d6e-4ba1-96f2-46403415349e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "699435fa-7d6e-4ba1-96f2-46403415349e" (UID: "699435fa-7d6e-4ba1-96f2-46403415349e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:59:48 crc kubenswrapper[4933]: I0122 07:59:48.056794 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/699435fa-7d6e-4ba1-96f2-46403415349e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:59:48 crc kubenswrapper[4933]: I0122 07:59:48.056840 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/699435fa-7d6e-4ba1-96f2-46403415349e-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:59:48 crc kubenswrapper[4933]: I0122 07:59:48.056855 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sq2rp\" (UniqueName: \"kubernetes.io/projected/699435fa-7d6e-4ba1-96f2-46403415349e-kube-api-access-sq2rp\") on node \"crc\" DevicePath \"\"" Jan 22 07:59:48 crc kubenswrapper[4933]: I0122 07:59:48.402576 4933 generic.go:334] "Generic (PLEG): container finished" podID="699435fa-7d6e-4ba1-96f2-46403415349e" containerID="888b4c07e150de999ec96adb47acf7859bf7a97cb6da29a896721a302c76a494" exitCode=0 Jan 22 07:59:48 crc kubenswrapper[4933]: I0122 07:59:48.402632 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lsdxv" event={"ID":"699435fa-7d6e-4ba1-96f2-46403415349e","Type":"ContainerDied","Data":"888b4c07e150de999ec96adb47acf7859bf7a97cb6da29a896721a302c76a494"} Jan 22 07:59:48 crc kubenswrapper[4933]: I0122 07:59:48.402667 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lsdxv" event={"ID":"699435fa-7d6e-4ba1-96f2-46403415349e","Type":"ContainerDied","Data":"291d2fc5e19e1740e212b46599f4acbb497630c352610163b64580ca1ed064b9"} Jan 22 07:59:48 crc kubenswrapper[4933]: I0122 07:59:48.402687 4933 scope.go:117] "RemoveContainer" containerID="888b4c07e150de999ec96adb47acf7859bf7a97cb6da29a896721a302c76a494" Jan 22 07:59:48 crc kubenswrapper[4933]: I0122 07:59:48.402872 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lsdxv" Jan 22 07:59:48 crc kubenswrapper[4933]: I0122 07:59:48.436366 4933 scope.go:117] "RemoveContainer" containerID="ab455306d4c3abc42794d67988cf341973ab0fbb27c9d306edd379b31dd98656" Jan 22 07:59:48 crc kubenswrapper[4933]: I0122 07:59:48.443090 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lsdxv"] Jan 22 07:59:48 crc kubenswrapper[4933]: I0122 07:59:48.452994 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lsdxv"] Jan 22 07:59:48 crc kubenswrapper[4933]: I0122 07:59:48.462373 4933 scope.go:117] "RemoveContainer" containerID="06305ea82b4be2d722d0df3f2dbf73f04d67999d8750dbdcc7c197054e6abafd" Jan 22 07:59:48 crc kubenswrapper[4933]: I0122 07:59:48.505007 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="699435fa-7d6e-4ba1-96f2-46403415349e" path="/var/lib/kubelet/pods/699435fa-7d6e-4ba1-96f2-46403415349e/volumes" Jan 22 07:59:48 crc kubenswrapper[4933]: I0122 07:59:48.511962 4933 scope.go:117] "RemoveContainer" containerID="888b4c07e150de999ec96adb47acf7859bf7a97cb6da29a896721a302c76a494" Jan 22 07:59:48 crc kubenswrapper[4933]: E0122 07:59:48.512607 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"888b4c07e150de999ec96adb47acf7859bf7a97cb6da29a896721a302c76a494\": container with ID starting with 888b4c07e150de999ec96adb47acf7859bf7a97cb6da29a896721a302c76a494 not found: ID does not exist" containerID="888b4c07e150de999ec96adb47acf7859bf7a97cb6da29a896721a302c76a494" Jan 22 07:59:48 crc kubenswrapper[4933]: I0122 07:59:48.512652 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"888b4c07e150de999ec96adb47acf7859bf7a97cb6da29a896721a302c76a494"} err="failed to get container status \"888b4c07e150de999ec96adb47acf7859bf7a97cb6da29a896721a302c76a494\": rpc error: code = NotFound desc = could not find container \"888b4c07e150de999ec96adb47acf7859bf7a97cb6da29a896721a302c76a494\": container with ID starting with 888b4c07e150de999ec96adb47acf7859bf7a97cb6da29a896721a302c76a494 not found: ID does not exist" Jan 22 07:59:48 crc kubenswrapper[4933]: I0122 07:59:48.512678 4933 scope.go:117] "RemoveContainer" containerID="ab455306d4c3abc42794d67988cf341973ab0fbb27c9d306edd379b31dd98656" Jan 22 07:59:48 crc kubenswrapper[4933]: E0122 07:59:48.512882 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab455306d4c3abc42794d67988cf341973ab0fbb27c9d306edd379b31dd98656\": container with ID starting with ab455306d4c3abc42794d67988cf341973ab0fbb27c9d306edd379b31dd98656 not found: ID does not exist" containerID="ab455306d4c3abc42794d67988cf341973ab0fbb27c9d306edd379b31dd98656" Jan 22 07:59:48 crc kubenswrapper[4933]: I0122 07:59:48.512907 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab455306d4c3abc42794d67988cf341973ab0fbb27c9d306edd379b31dd98656"} err="failed to get container status \"ab455306d4c3abc42794d67988cf341973ab0fbb27c9d306edd379b31dd98656\": rpc error: code = NotFound desc = could not find container \"ab455306d4c3abc42794d67988cf341973ab0fbb27c9d306edd379b31dd98656\": container with ID starting with ab455306d4c3abc42794d67988cf341973ab0fbb27c9d306edd379b31dd98656 not found: ID does not exist" Jan 22 07:59:48 crc kubenswrapper[4933]: I0122 07:59:48.512923 4933 scope.go:117] "RemoveContainer" containerID="06305ea82b4be2d722d0df3f2dbf73f04d67999d8750dbdcc7c197054e6abafd" Jan 22 07:59:48 crc kubenswrapper[4933]: E0122 07:59:48.513168 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06305ea82b4be2d722d0df3f2dbf73f04d67999d8750dbdcc7c197054e6abafd\": container with ID starting with 06305ea82b4be2d722d0df3f2dbf73f04d67999d8750dbdcc7c197054e6abafd not found: ID does not exist" containerID="06305ea82b4be2d722d0df3f2dbf73f04d67999d8750dbdcc7c197054e6abafd" Jan 22 07:59:48 crc kubenswrapper[4933]: I0122 07:59:48.513201 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06305ea82b4be2d722d0df3f2dbf73f04d67999d8750dbdcc7c197054e6abafd"} err="failed to get container status \"06305ea82b4be2d722d0df3f2dbf73f04d67999d8750dbdcc7c197054e6abafd\": rpc error: code = NotFound desc = could not find container \"06305ea82b4be2d722d0df3f2dbf73f04d67999d8750dbdcc7c197054e6abafd\": container with ID starting with 06305ea82b4be2d722d0df3f2dbf73f04d67999d8750dbdcc7c197054e6abafd not found: ID does not exist" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.183789 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h"] Jan 22 08:00:00 crc kubenswrapper[4933]: E0122 08:00:00.184833 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="699435fa-7d6e-4ba1-96f2-46403415349e" containerName="registry-server" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.184847 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="699435fa-7d6e-4ba1-96f2-46403415349e" containerName="registry-server" Jan 22 08:00:00 crc kubenswrapper[4933]: E0122 08:00:00.184871 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101" containerName="extract-content" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.184877 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101" containerName="extract-content" Jan 22 08:00:00 crc kubenswrapper[4933]: E0122 08:00:00.184894 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101" containerName="extract-utilities" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.184900 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101" containerName="extract-utilities" Jan 22 08:00:00 crc kubenswrapper[4933]: E0122 08:00:00.184912 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101" containerName="registry-server" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.184918 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101" containerName="registry-server" Jan 22 08:00:00 crc kubenswrapper[4933]: E0122 08:00:00.184936 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="699435fa-7d6e-4ba1-96f2-46403415349e" containerName="extract-utilities" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.184945 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="699435fa-7d6e-4ba1-96f2-46403415349e" containerName="extract-utilities" Jan 22 08:00:00 crc kubenswrapper[4933]: E0122 08:00:00.184960 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="699435fa-7d6e-4ba1-96f2-46403415349e" containerName="extract-content" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.184967 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="699435fa-7d6e-4ba1-96f2-46403415349e" containerName="extract-content" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.185188 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="699435fa-7d6e-4ba1-96f2-46403415349e" containerName="registry-server" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.185198 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc1d62f6-f8fb-4d98-8f3d-25a2e78b8101" containerName="registry-server" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.185926 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.187907 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.188102 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.201278 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h"] Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.241371 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/52cc129c-befa-4407-9cda-1b6773ce71d0-config-volume\") pod \"collect-profiles-29484480-rvn9h\" (UID: \"52cc129c-befa-4407-9cda-1b6773ce71d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.241656 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ks9df\" (UniqueName: \"kubernetes.io/projected/52cc129c-befa-4407-9cda-1b6773ce71d0-kube-api-access-ks9df\") pod \"collect-profiles-29484480-rvn9h\" (UID: \"52cc129c-befa-4407-9cda-1b6773ce71d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.241807 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/52cc129c-befa-4407-9cda-1b6773ce71d0-secret-volume\") pod \"collect-profiles-29484480-rvn9h\" (UID: \"52cc129c-befa-4407-9cda-1b6773ce71d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.343821 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/52cc129c-befa-4407-9cda-1b6773ce71d0-config-volume\") pod \"collect-profiles-29484480-rvn9h\" (UID: \"52cc129c-befa-4407-9cda-1b6773ce71d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.343915 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ks9df\" (UniqueName: \"kubernetes.io/projected/52cc129c-befa-4407-9cda-1b6773ce71d0-kube-api-access-ks9df\") pod \"collect-profiles-29484480-rvn9h\" (UID: \"52cc129c-befa-4407-9cda-1b6773ce71d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.343958 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/52cc129c-befa-4407-9cda-1b6773ce71d0-secret-volume\") pod \"collect-profiles-29484480-rvn9h\" (UID: \"52cc129c-befa-4407-9cda-1b6773ce71d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.344786 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/52cc129c-befa-4407-9cda-1b6773ce71d0-config-volume\") pod \"collect-profiles-29484480-rvn9h\" (UID: \"52cc129c-befa-4407-9cda-1b6773ce71d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.349501 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/52cc129c-befa-4407-9cda-1b6773ce71d0-secret-volume\") pod \"collect-profiles-29484480-rvn9h\" (UID: \"52cc129c-befa-4407-9cda-1b6773ce71d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.362313 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ks9df\" (UniqueName: \"kubernetes.io/projected/52cc129c-befa-4407-9cda-1b6773ce71d0-kube-api-access-ks9df\") pod \"collect-profiles-29484480-rvn9h\" (UID: \"52cc129c-befa-4407-9cda-1b6773ce71d0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.509381 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h" Jan 22 08:00:00 crc kubenswrapper[4933]: I0122 08:00:00.976288 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h"] Jan 22 08:00:00 crc kubenswrapper[4933]: W0122 08:00:00.979257 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52cc129c_befa_4407_9cda_1b6773ce71d0.slice/crio-d4cd9f617e8742992c067dc52e6b1a377b5b905a5e3e9e9854d5768acd1ed2b9 WatchSource:0}: Error finding container d4cd9f617e8742992c067dc52e6b1a377b5b905a5e3e9e9854d5768acd1ed2b9: Status 404 returned error can't find the container with id d4cd9f617e8742992c067dc52e6b1a377b5b905a5e3e9e9854d5768acd1ed2b9 Jan 22 08:00:01 crc kubenswrapper[4933]: I0122 08:00:01.546267 4933 generic.go:334] "Generic (PLEG): container finished" podID="52cc129c-befa-4407-9cda-1b6773ce71d0" containerID="481c0cea3203437e4c89157235c523a5a1f197629a5048d28dcbbbb19ae90ba4" exitCode=0 Jan 22 08:00:01 crc kubenswrapper[4933]: I0122 08:00:01.546398 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h" event={"ID":"52cc129c-befa-4407-9cda-1b6773ce71d0","Type":"ContainerDied","Data":"481c0cea3203437e4c89157235c523a5a1f197629a5048d28dcbbbb19ae90ba4"} Jan 22 08:00:01 crc kubenswrapper[4933]: I0122 08:00:01.546618 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h" event={"ID":"52cc129c-befa-4407-9cda-1b6773ce71d0","Type":"ContainerStarted","Data":"d4cd9f617e8742992c067dc52e6b1a377b5b905a5e3e9e9854d5768acd1ed2b9"} Jan 22 08:00:02 crc kubenswrapper[4933]: I0122 08:00:02.888876 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h" Jan 22 08:00:02 crc kubenswrapper[4933]: I0122 08:00:02.907218 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/52cc129c-befa-4407-9cda-1b6773ce71d0-config-volume\") pod \"52cc129c-befa-4407-9cda-1b6773ce71d0\" (UID: \"52cc129c-befa-4407-9cda-1b6773ce71d0\") " Jan 22 08:00:02 crc kubenswrapper[4933]: I0122 08:00:02.907518 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ks9df\" (UniqueName: \"kubernetes.io/projected/52cc129c-befa-4407-9cda-1b6773ce71d0-kube-api-access-ks9df\") pod \"52cc129c-befa-4407-9cda-1b6773ce71d0\" (UID: \"52cc129c-befa-4407-9cda-1b6773ce71d0\") " Jan 22 08:00:02 crc kubenswrapper[4933]: I0122 08:00:02.907582 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/52cc129c-befa-4407-9cda-1b6773ce71d0-secret-volume\") pod \"52cc129c-befa-4407-9cda-1b6773ce71d0\" (UID: \"52cc129c-befa-4407-9cda-1b6773ce71d0\") " Jan 22 08:00:02 crc kubenswrapper[4933]: I0122 08:00:02.908061 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52cc129c-befa-4407-9cda-1b6773ce71d0-config-volume" (OuterVolumeSpecName: "config-volume") pod "52cc129c-befa-4407-9cda-1b6773ce71d0" (UID: "52cc129c-befa-4407-9cda-1b6773ce71d0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 08:00:02 crc kubenswrapper[4933]: I0122 08:00:02.908224 4933 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/52cc129c-befa-4407-9cda-1b6773ce71d0-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 08:00:02 crc kubenswrapper[4933]: I0122 08:00:02.914402 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52cc129c-befa-4407-9cda-1b6773ce71d0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "52cc129c-befa-4407-9cda-1b6773ce71d0" (UID: "52cc129c-befa-4407-9cda-1b6773ce71d0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 08:00:02 crc kubenswrapper[4933]: I0122 08:00:02.916339 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52cc129c-befa-4407-9cda-1b6773ce71d0-kube-api-access-ks9df" (OuterVolumeSpecName: "kube-api-access-ks9df") pod "52cc129c-befa-4407-9cda-1b6773ce71d0" (UID: "52cc129c-befa-4407-9cda-1b6773ce71d0"). InnerVolumeSpecName "kube-api-access-ks9df". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:00:03 crc kubenswrapper[4933]: I0122 08:00:03.012057 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ks9df\" (UniqueName: \"kubernetes.io/projected/52cc129c-befa-4407-9cda-1b6773ce71d0-kube-api-access-ks9df\") on node \"crc\" DevicePath \"\"" Jan 22 08:00:03 crc kubenswrapper[4933]: I0122 08:00:03.012152 4933 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/52cc129c-befa-4407-9cda-1b6773ce71d0-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 08:00:03 crc kubenswrapper[4933]: I0122 08:00:03.569718 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h" event={"ID":"52cc129c-befa-4407-9cda-1b6773ce71d0","Type":"ContainerDied","Data":"d4cd9f617e8742992c067dc52e6b1a377b5b905a5e3e9e9854d5768acd1ed2b9"} Jan 22 08:00:03 crc kubenswrapper[4933]: I0122 08:00:03.569775 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d4cd9f617e8742992c067dc52e6b1a377b5b905a5e3e9e9854d5768acd1ed2b9" Jan 22 08:00:03 crc kubenswrapper[4933]: I0122 08:00:03.569791 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h" Jan 22 08:00:03 crc kubenswrapper[4933]: I0122 08:00:03.962380 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74"] Jan 22 08:00:03 crc kubenswrapper[4933]: I0122 08:00:03.972290 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484435-45t74"] Jan 22 08:00:04 crc kubenswrapper[4933]: I0122 08:00:04.511927 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b83d43bd-6f8b-472a-b999-f7b90507e14b" path="/var/lib/kubelet/pods/b83d43bd-6f8b-472a-b999-f7b90507e14b/volumes" Jan 22 08:00:09 crc kubenswrapper[4933]: I0122 08:00:09.958247 4933 scope.go:117] "RemoveContainer" containerID="b04e7526db1898c258136ecf504d626753553d30f6199ea9a6de4f149a84d89f" Jan 22 08:01:00 crc kubenswrapper[4933]: I0122 08:01:00.181097 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29484481-7tmrk"] Jan 22 08:01:00 crc kubenswrapper[4933]: E0122 08:01:00.182166 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52cc129c-befa-4407-9cda-1b6773ce71d0" containerName="collect-profiles" Jan 22 08:01:00 crc kubenswrapper[4933]: I0122 08:01:00.182183 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="52cc129c-befa-4407-9cda-1b6773ce71d0" containerName="collect-profiles" Jan 22 08:01:00 crc kubenswrapper[4933]: I0122 08:01:00.182448 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="52cc129c-befa-4407-9cda-1b6773ce71d0" containerName="collect-profiles" Jan 22 08:01:00 crc kubenswrapper[4933]: I0122 08:01:00.183326 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29484481-7tmrk" Jan 22 08:01:00 crc kubenswrapper[4933]: I0122 08:01:00.191227 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29484481-7tmrk"] Jan 22 08:01:00 crc kubenswrapper[4933]: I0122 08:01:00.313775 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-combined-ca-bundle\") pod \"keystone-cron-29484481-7tmrk\" (UID: \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\") " pod="openstack/keystone-cron-29484481-7tmrk" Jan 22 08:01:00 crc kubenswrapper[4933]: I0122 08:01:00.313932 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmmpz\" (UniqueName: \"kubernetes.io/projected/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-kube-api-access-nmmpz\") pod \"keystone-cron-29484481-7tmrk\" (UID: \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\") " pod="openstack/keystone-cron-29484481-7tmrk" Jan 22 08:01:00 crc kubenswrapper[4933]: I0122 08:01:00.314051 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-config-data\") pod \"keystone-cron-29484481-7tmrk\" (UID: \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\") " pod="openstack/keystone-cron-29484481-7tmrk" Jan 22 08:01:00 crc kubenswrapper[4933]: I0122 08:01:00.314312 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-fernet-keys\") pod \"keystone-cron-29484481-7tmrk\" (UID: \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\") " pod="openstack/keystone-cron-29484481-7tmrk" Jan 22 08:01:00 crc kubenswrapper[4933]: I0122 08:01:00.416246 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-combined-ca-bundle\") pod \"keystone-cron-29484481-7tmrk\" (UID: \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\") " pod="openstack/keystone-cron-29484481-7tmrk" Jan 22 08:01:00 crc kubenswrapper[4933]: I0122 08:01:00.416295 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmmpz\" (UniqueName: \"kubernetes.io/projected/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-kube-api-access-nmmpz\") pod \"keystone-cron-29484481-7tmrk\" (UID: \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\") " pod="openstack/keystone-cron-29484481-7tmrk" Jan 22 08:01:00 crc kubenswrapper[4933]: I0122 08:01:00.416342 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-config-data\") pod \"keystone-cron-29484481-7tmrk\" (UID: \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\") " pod="openstack/keystone-cron-29484481-7tmrk" Jan 22 08:01:00 crc kubenswrapper[4933]: I0122 08:01:00.416429 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-fernet-keys\") pod \"keystone-cron-29484481-7tmrk\" (UID: \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\") " pod="openstack/keystone-cron-29484481-7tmrk" Jan 22 08:01:00 crc kubenswrapper[4933]: I0122 08:01:00.422749 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-combined-ca-bundle\") pod \"keystone-cron-29484481-7tmrk\" (UID: \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\") " pod="openstack/keystone-cron-29484481-7tmrk" Jan 22 08:01:00 crc kubenswrapper[4933]: I0122 08:01:00.423519 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-fernet-keys\") pod \"keystone-cron-29484481-7tmrk\" (UID: \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\") " pod="openstack/keystone-cron-29484481-7tmrk" Jan 22 08:01:00 crc kubenswrapper[4933]: I0122 08:01:00.423682 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-config-data\") pod \"keystone-cron-29484481-7tmrk\" (UID: \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\") " pod="openstack/keystone-cron-29484481-7tmrk" Jan 22 08:01:00 crc kubenswrapper[4933]: I0122 08:01:00.435464 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmmpz\" (UniqueName: \"kubernetes.io/projected/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-kube-api-access-nmmpz\") pod \"keystone-cron-29484481-7tmrk\" (UID: \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\") " pod="openstack/keystone-cron-29484481-7tmrk" Jan 22 08:01:00 crc kubenswrapper[4933]: I0122 08:01:00.518471 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29484481-7tmrk" Jan 22 08:01:00 crc kubenswrapper[4933]: I0122 08:01:00.999371 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29484481-7tmrk"] Jan 22 08:01:01 crc kubenswrapper[4933]: I0122 08:01:01.210315 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29484481-7tmrk" event={"ID":"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d","Type":"ContainerStarted","Data":"f0e7dd72e4b6cf7547d6bf8e9c8da611586eef96d852c2157125587b293f9697"} Jan 22 08:01:02 crc kubenswrapper[4933]: I0122 08:01:02.220594 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29484481-7tmrk" event={"ID":"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d","Type":"ContainerStarted","Data":"0837945dc8606b0b32e56a0ec70ceed024717e949b30288c98dd2ccda9135896"} Jan 22 08:01:02 crc kubenswrapper[4933]: I0122 08:01:02.240125 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29484481-7tmrk" podStartSLOduration=2.240104516 podStartE2EDuration="2.240104516s" podCreationTimestamp="2026-01-22 08:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 08:01:02.233496075 +0000 UTC m=+8110.070621428" watchObservedRunningTime="2026-01-22 08:01:02.240104516 +0000 UTC m=+8110.077229869" Jan 22 08:01:02 crc kubenswrapper[4933]: I0122 08:01:02.351796 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-fdmt6_cfa84f31-ab21-494f-932c-77b809b656c0/cert-manager-controller/0.log" Jan 22 08:01:02 crc kubenswrapper[4933]: I0122 08:01:02.372580 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-mrwsn_06a56c7d-200d-4473-8766-ddb0f9f75cd4/cert-manager-cainjector/0.log" Jan 22 08:01:02 crc kubenswrapper[4933]: I0122 08:01:02.384736 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-m2xft_f81fa480-c38d-4a0e-8adc-51332ceab483/cert-manager-webhook/0.log" Jan 22 08:01:04 crc kubenswrapper[4933]: I0122 08:01:04.238277 4933 generic.go:334] "Generic (PLEG): container finished" podID="914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d" containerID="0837945dc8606b0b32e56a0ec70ceed024717e949b30288c98dd2ccda9135896" exitCode=0 Jan 22 08:01:04 crc kubenswrapper[4933]: I0122 08:01:04.238362 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29484481-7tmrk" event={"ID":"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d","Type":"ContainerDied","Data":"0837945dc8606b0b32e56a0ec70ceed024717e949b30288c98dd2ccda9135896"} Jan 22 08:01:05 crc kubenswrapper[4933]: I0122 08:01:05.597291 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29484481-7tmrk" Jan 22 08:01:05 crc kubenswrapper[4933]: I0122 08:01:05.725895 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nmmpz\" (UniqueName: \"kubernetes.io/projected/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-kube-api-access-nmmpz\") pod \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\" (UID: \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\") " Jan 22 08:01:05 crc kubenswrapper[4933]: I0122 08:01:05.726023 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-combined-ca-bundle\") pod \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\" (UID: \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\") " Jan 22 08:01:05 crc kubenswrapper[4933]: I0122 08:01:05.726258 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-fernet-keys\") pod \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\" (UID: \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\") " Jan 22 08:01:05 crc kubenswrapper[4933]: I0122 08:01:05.726307 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-config-data\") pod \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\" (UID: \"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d\") " Jan 22 08:01:05 crc kubenswrapper[4933]: I0122 08:01:05.731836 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d" (UID: "914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 08:01:05 crc kubenswrapper[4933]: I0122 08:01:05.732597 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-kube-api-access-nmmpz" (OuterVolumeSpecName: "kube-api-access-nmmpz") pod "914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d" (UID: "914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d"). InnerVolumeSpecName "kube-api-access-nmmpz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:01:05 crc kubenswrapper[4933]: I0122 08:01:05.764369 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d" (UID: "914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 08:01:05 crc kubenswrapper[4933]: I0122 08:01:05.794013 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-config-data" (OuterVolumeSpecName: "config-data") pod "914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d" (UID: "914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 08:01:05 crc kubenswrapper[4933]: I0122 08:01:05.828472 4933 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 22 08:01:05 crc kubenswrapper[4933]: I0122 08:01:05.828506 4933 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 08:01:05 crc kubenswrapper[4933]: I0122 08:01:05.828519 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nmmpz\" (UniqueName: \"kubernetes.io/projected/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-kube-api-access-nmmpz\") on node \"crc\" DevicePath \"\"" Jan 22 08:01:05 crc kubenswrapper[4933]: I0122 08:01:05.828531 4933 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 08:01:06 crc kubenswrapper[4933]: I0122 08:01:06.266544 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29484481-7tmrk" event={"ID":"914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d","Type":"ContainerDied","Data":"f0e7dd72e4b6cf7547d6bf8e9c8da611586eef96d852c2157125587b293f9697"} Jan 22 08:01:06 crc kubenswrapper[4933]: I0122 08:01:06.266786 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f0e7dd72e4b6cf7547d6bf8e9c8da611586eef96d852c2157125587b293f9697" Jan 22 08:01:06 crc kubenswrapper[4933]: I0122 08:01:06.266924 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29484481-7tmrk" Jan 22 08:01:07 crc kubenswrapper[4933]: I0122 08:01:07.772875 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-9b5pb_bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0/nmstate-console-plugin/0.log" Jan 22 08:01:07 crc kubenswrapper[4933]: I0122 08:01:07.800418 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-z4cfk_95abb851-2f05-43e0-8c35-92027baf4a2c/nmstate-handler/0.log" Jan 22 08:01:07 crc kubenswrapper[4933]: I0122 08:01:07.810999 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-88t8r_7f8f46df-3251-4fc5-87fb-10b877fe5878/nmstate-metrics/0.log" Jan 22 08:01:07 crc kubenswrapper[4933]: I0122 08:01:07.817750 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-88t8r_7f8f46df-3251-4fc5-87fb-10b877fe5878/kube-rbac-proxy/0.log" Jan 22 08:01:07 crc kubenswrapper[4933]: I0122 08:01:07.831001 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-49j2k_85167be7-eaf1-4931-ac95-f80007d35c42/nmstate-operator/0.log" Jan 22 08:01:07 crc kubenswrapper[4933]: I0122 08:01:07.840370 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-dtghh_40102dae-8b84-4138-bd5d-4a3b7c1b3492/nmstate-webhook/0.log" Jan 22 08:01:10 crc kubenswrapper[4933]: I0122 08:01:10.943016 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:01:10 crc kubenswrapper[4933]: I0122 08:01:10.943633 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:01:13 crc kubenswrapper[4933]: I0122 08:01:13.306236 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-jxw4c_a79423c3-5348-464f-9866-425625f2a8f1/prometheus-operator/0.log" Jan 22 08:01:13 crc kubenswrapper[4933]: I0122 08:01:13.326941 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-697444475f-hzfjv_f4cb4026-e4bf-45f2-9373-71f24b0ecd16/prometheus-operator-admission-webhook/0.log" Jan 22 08:01:13 crc kubenswrapper[4933]: I0122 08:01:13.340441 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-697444475f-jv7mp_f6675820-e9ac-4e84-8e2f-2971304513ea/prometheus-operator-admission-webhook/0.log" Jan 22 08:01:13 crc kubenswrapper[4933]: I0122 08:01:13.361950 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-2n7hh_1ec7b8e4-e17f-4d56-8f1c-b04751ad3bf8/operator/0.log" Jan 22 08:01:13 crc kubenswrapper[4933]: I0122 08:01:13.372449 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-5b8t8_cc705ce5-1fcd-4528-b638-c5e1d062c035/perses-operator/0.log" Jan 22 08:01:19 crc kubenswrapper[4933]: I0122 08:01:19.429186 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-2stg8_29f4dd7e-c2cf-4f64-87e8-2201fe99e751/controller/0.log" Jan 22 08:01:19 crc kubenswrapper[4933]: I0122 08:01:19.438621 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-2stg8_29f4dd7e-c2cf-4f64-87e8-2201fe99e751/kube-rbac-proxy/0.log" Jan 22 08:01:19 crc kubenswrapper[4933]: I0122 08:01:19.466994 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/controller/0.log" Jan 22 08:01:22 crc kubenswrapper[4933]: I0122 08:01:22.537873 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/frr/0.log" Jan 22 08:01:22 crc kubenswrapper[4933]: I0122 08:01:22.552937 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/reloader/0.log" Jan 22 08:01:22 crc kubenswrapper[4933]: I0122 08:01:22.557336 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/frr-metrics/0.log" Jan 22 08:01:22 crc kubenswrapper[4933]: I0122 08:01:22.568704 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/kube-rbac-proxy/0.log" Jan 22 08:01:22 crc kubenswrapper[4933]: I0122 08:01:22.577508 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/kube-rbac-proxy-frr/0.log" Jan 22 08:01:22 crc kubenswrapper[4933]: I0122 08:01:22.582823 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/cp-frr-files/0.log" Jan 22 08:01:22 crc kubenswrapper[4933]: I0122 08:01:22.589855 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/cp-reloader/0.log" Jan 22 08:01:22 crc kubenswrapper[4933]: I0122 08:01:22.600693 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/cp-metrics/0.log" Jan 22 08:01:22 crc kubenswrapper[4933]: I0122 08:01:22.613762 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-rjx6j_42d0dcd8-97a6-489b-9b19-43fd22936816/frr-k8s-webhook-server/0.log" Jan 22 08:01:22 crc kubenswrapper[4933]: I0122 08:01:22.644923 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6c8b57987c-qr4jc_9f8987dd-79ca-4569-8000-088df75be06e/manager/0.log" Jan 22 08:01:22 crc kubenswrapper[4933]: I0122 08:01:22.654268 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-688dc4b4d8-9z74g_fe7410b0-e355-498e-841e-89ae7f5e56de/webhook-server/0.log" Jan 22 08:01:23 crc kubenswrapper[4933]: I0122 08:01:23.322952 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-xlk7v_57ec97d6-d16e-4069-98cc-7dcf56910fad/speaker/0.log" Jan 22 08:01:23 crc kubenswrapper[4933]: I0122 08:01:23.332079 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-xlk7v_57ec97d6-d16e-4069-98cc-7dcf56910fad/kube-rbac-proxy/0.log" Jan 22 08:01:25 crc kubenswrapper[4933]: I0122 08:01:25.201506 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj_3b3b2cb0-2124-4792-b8fa-7c0a3438c186/extract/0.log" Jan 22 08:01:25 crc kubenswrapper[4933]: I0122 08:01:25.224451 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj_3b3b2cb0-2124-4792-b8fa-7c0a3438c186/util/0.log" Jan 22 08:01:25 crc kubenswrapper[4933]: I0122 08:01:25.248727 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asr2rj_3b3b2cb0-2124-4792-b8fa-7c0a3438c186/pull/0.log" Jan 22 08:01:25 crc kubenswrapper[4933]: I0122 08:01:25.262303 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57_d078e767-9507-4d8c-a559-3c9df7a99923/extract/0.log" Jan 22 08:01:25 crc kubenswrapper[4933]: I0122 08:01:25.272298 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57_d078e767-9507-4d8c-a559-3c9df7a99923/util/0.log" Jan 22 08:01:25 crc kubenswrapper[4933]: I0122 08:01:25.280631 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcrnt57_d078e767-9507-4d8c-a559-3c9df7a99923/pull/0.log" Jan 22 08:01:25 crc kubenswrapper[4933]: I0122 08:01:25.293765 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj_5c82fbe7-be8e-4ffd-8c03-f0270785d0fa/extract/0.log" Jan 22 08:01:25 crc kubenswrapper[4933]: I0122 08:01:25.307402 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj_5c82fbe7-be8e-4ffd-8c03-f0270785d0fa/util/0.log" Jan 22 08:01:25 crc kubenswrapper[4933]: I0122 08:01:25.320431 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7138bnpj_5c82fbe7-be8e-4ffd-8c03-f0270785d0fa/pull/0.log" Jan 22 08:01:25 crc kubenswrapper[4933]: I0122 08:01:25.335899 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5_627af3d3-01cc-49d7-833f-7a9f5f313302/extract/0.log" Jan 22 08:01:25 crc kubenswrapper[4933]: I0122 08:01:25.347581 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5_627af3d3-01cc-49d7-833f-7a9f5f313302/util/0.log" Jan 22 08:01:25 crc kubenswrapper[4933]: I0122 08:01:25.371783 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f088jdw5_627af3d3-01cc-49d7-833f-7a9f5f313302/pull/0.log" Jan 22 08:01:26 crc kubenswrapper[4933]: I0122 08:01:26.410502 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5jcpg_65b68271-f234-4a74-aa04-b5113c8c1d89/registry-server/0.log" Jan 22 08:01:26 crc kubenswrapper[4933]: I0122 08:01:26.438727 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5jcpg_65b68271-f234-4a74-aa04-b5113c8c1d89/extract-utilities/0.log" Jan 22 08:01:26 crc kubenswrapper[4933]: I0122 08:01:26.463439 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-5jcpg_65b68271-f234-4a74-aa04-b5113c8c1d89/extract-content/0.log" Jan 22 08:01:27 crc kubenswrapper[4933]: I0122 08:01:27.064241 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-rjpmf_4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5/registry-server/0.log" Jan 22 08:01:27 crc kubenswrapper[4933]: I0122 08:01:27.073047 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-rjpmf_4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5/extract-utilities/0.log" Jan 22 08:01:27 crc kubenswrapper[4933]: I0122 08:01:27.087983 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-rjpmf_4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5/extract-content/0.log" Jan 22 08:01:27 crc kubenswrapper[4933]: I0122 08:01:27.107203 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-9pxct_48949ed4-e60d-474a-9b57-1cf96d9428d6/marketplace-operator/0.log" Jan 22 08:01:27 crc kubenswrapper[4933]: I0122 08:01:27.434023 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kvffj_ed5c6166-9794-440e-9f36-6ae46897815b/registry-server/0.log" Jan 22 08:01:27 crc kubenswrapper[4933]: I0122 08:01:27.440751 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kvffj_ed5c6166-9794-440e-9f36-6ae46897815b/extract-utilities/0.log" Jan 22 08:01:27 crc kubenswrapper[4933]: I0122 08:01:27.447620 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kvffj_ed5c6166-9794-440e-9f36-6ae46897815b/extract-content/0.log" Jan 22 08:01:28 crc kubenswrapper[4933]: I0122 08:01:28.602836 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-fcs2s_4902f901-8853-4071-bc3a-0a5b32065bf3/registry-server/0.log" Jan 22 08:01:28 crc kubenswrapper[4933]: I0122 08:01:28.610773 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-fcs2s_4902f901-8853-4071-bc3a-0a5b32065bf3/extract-utilities/0.log" Jan 22 08:01:28 crc kubenswrapper[4933]: I0122 08:01:28.618820 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-fcs2s_4902f901-8853-4071-bc3a-0a5b32065bf3/extract-content/0.log" Jan 22 08:01:32 crc kubenswrapper[4933]: I0122 08:01:32.157627 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-jxw4c_a79423c3-5348-464f-9866-425625f2a8f1/prometheus-operator/0.log" Jan 22 08:01:32 crc kubenswrapper[4933]: I0122 08:01:32.168003 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-697444475f-hzfjv_f4cb4026-e4bf-45f2-9373-71f24b0ecd16/prometheus-operator-admission-webhook/0.log" Jan 22 08:01:32 crc kubenswrapper[4933]: I0122 08:01:32.177585 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-697444475f-jv7mp_f6675820-e9ac-4e84-8e2f-2971304513ea/prometheus-operator-admission-webhook/0.log" Jan 22 08:01:32 crc kubenswrapper[4933]: I0122 08:01:32.206189 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-2n7hh_1ec7b8e4-e17f-4d56-8f1c-b04751ad3bf8/operator/0.log" Jan 22 08:01:32 crc kubenswrapper[4933]: I0122 08:01:32.216918 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-5b8t8_cc705ce5-1fcd-4528-b638-c5e1d062c035/perses-operator/0.log" Jan 22 08:01:40 crc kubenswrapper[4933]: I0122 08:01:40.942701 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:01:40 crc kubenswrapper[4933]: I0122 08:01:40.943257 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:02:10 crc kubenswrapper[4933]: I0122 08:02:10.943420 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:02:10 crc kubenswrapper[4933]: I0122 08:02:10.944212 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:02:10 crc kubenswrapper[4933]: I0122 08:02:10.944278 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 08:02:10 crc kubenswrapper[4933]: I0122 08:02:10.945619 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 08:02:10 crc kubenswrapper[4933]: I0122 08:02:10.945683 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" gracePeriod=600 Jan 22 08:02:11 crc kubenswrapper[4933]: E0122 08:02:11.070690 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:02:11 crc kubenswrapper[4933]: I0122 08:02:11.644577 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vxdf2"] Jan 22 08:02:11 crc kubenswrapper[4933]: E0122 08:02:11.645259 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d" containerName="keystone-cron" Jan 22 08:02:11 crc kubenswrapper[4933]: I0122 08:02:11.645343 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d" containerName="keystone-cron" Jan 22 08:02:11 crc kubenswrapper[4933]: I0122 08:02:11.645635 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="914c73cc-c8f0-4f6c-9301-ec11c2fd3b1d" containerName="keystone-cron" Jan 22 08:02:11 crc kubenswrapper[4933]: I0122 08:02:11.650675 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vxdf2" Jan 22 08:02:11 crc kubenswrapper[4933]: I0122 08:02:11.663154 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vxdf2"] Jan 22 08:02:11 crc kubenswrapper[4933]: I0122 08:02:11.716664 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0396623b-d6eb-467b-9210-5710a76d959a-catalog-content\") pod \"community-operators-vxdf2\" (UID: \"0396623b-d6eb-467b-9210-5710a76d959a\") " pod="openshift-marketplace/community-operators-vxdf2" Jan 22 08:02:11 crc kubenswrapper[4933]: I0122 08:02:11.717031 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0396623b-d6eb-467b-9210-5710a76d959a-utilities\") pod \"community-operators-vxdf2\" (UID: \"0396623b-d6eb-467b-9210-5710a76d959a\") " pod="openshift-marketplace/community-operators-vxdf2" Jan 22 08:02:11 crc kubenswrapper[4933]: I0122 08:02:11.717451 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvq6m\" (UniqueName: \"kubernetes.io/projected/0396623b-d6eb-467b-9210-5710a76d959a-kube-api-access-hvq6m\") pod \"community-operators-vxdf2\" (UID: \"0396623b-d6eb-467b-9210-5710a76d959a\") " pod="openshift-marketplace/community-operators-vxdf2" Jan 22 08:02:11 crc kubenswrapper[4933]: I0122 08:02:11.819582 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvq6m\" (UniqueName: \"kubernetes.io/projected/0396623b-d6eb-467b-9210-5710a76d959a-kube-api-access-hvq6m\") pod \"community-operators-vxdf2\" (UID: \"0396623b-d6eb-467b-9210-5710a76d959a\") " pod="openshift-marketplace/community-operators-vxdf2" Jan 22 08:02:11 crc kubenswrapper[4933]: I0122 08:02:11.819716 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0396623b-d6eb-467b-9210-5710a76d959a-catalog-content\") pod \"community-operators-vxdf2\" (UID: \"0396623b-d6eb-467b-9210-5710a76d959a\") " pod="openshift-marketplace/community-operators-vxdf2" Jan 22 08:02:11 crc kubenswrapper[4933]: I0122 08:02:11.819751 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0396623b-d6eb-467b-9210-5710a76d959a-utilities\") pod \"community-operators-vxdf2\" (UID: \"0396623b-d6eb-467b-9210-5710a76d959a\") " pod="openshift-marketplace/community-operators-vxdf2" Jan 22 08:02:11 crc kubenswrapper[4933]: I0122 08:02:11.820506 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0396623b-d6eb-467b-9210-5710a76d959a-utilities\") pod \"community-operators-vxdf2\" (UID: \"0396623b-d6eb-467b-9210-5710a76d959a\") " pod="openshift-marketplace/community-operators-vxdf2" Jan 22 08:02:11 crc kubenswrapper[4933]: I0122 08:02:11.821159 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0396623b-d6eb-467b-9210-5710a76d959a-catalog-content\") pod \"community-operators-vxdf2\" (UID: \"0396623b-d6eb-467b-9210-5710a76d959a\") " pod="openshift-marketplace/community-operators-vxdf2" Jan 22 08:02:11 crc kubenswrapper[4933]: I0122 08:02:11.852005 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvq6m\" (UniqueName: \"kubernetes.io/projected/0396623b-d6eb-467b-9210-5710a76d959a-kube-api-access-hvq6m\") pod \"community-operators-vxdf2\" (UID: \"0396623b-d6eb-467b-9210-5710a76d959a\") " pod="openshift-marketplace/community-operators-vxdf2" Jan 22 08:02:11 crc kubenswrapper[4933]: I0122 08:02:11.938099 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" exitCode=0 Jan 22 08:02:11 crc kubenswrapper[4933]: I0122 08:02:11.938162 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b"} Jan 22 08:02:11 crc kubenswrapper[4933]: I0122 08:02:11.938209 4933 scope.go:117] "RemoveContainer" containerID="c681476a698bdfba0829493019ebf1b8e76064c7d6a7c94d045eb0a2b4992611" Jan 22 08:02:11 crc kubenswrapper[4933]: I0122 08:02:11.939021 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:02:11 crc kubenswrapper[4933]: E0122 08:02:11.939410 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:02:11 crc kubenswrapper[4933]: I0122 08:02:11.990677 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vxdf2" Jan 22 08:02:12 crc kubenswrapper[4933]: I0122 08:02:12.673466 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vxdf2"] Jan 22 08:02:12 crc kubenswrapper[4933]: I0122 08:02:12.955273 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vxdf2" event={"ID":"0396623b-d6eb-467b-9210-5710a76d959a","Type":"ContainerStarted","Data":"e8d2c6da89abfa99702745dacbcbe29b2d2a65824b6fc0de48826aff29a77a77"} Jan 22 08:02:12 crc kubenswrapper[4933]: I0122 08:02:12.955602 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vxdf2" event={"ID":"0396623b-d6eb-467b-9210-5710a76d959a","Type":"ContainerStarted","Data":"6613c70af33ce64537315711eb00883ef1239e237ccf3038d477caf21096bc54"} Jan 22 08:02:13 crc kubenswrapper[4933]: I0122 08:02:13.970671 4933 generic.go:334] "Generic (PLEG): container finished" podID="0396623b-d6eb-467b-9210-5710a76d959a" containerID="e8d2c6da89abfa99702745dacbcbe29b2d2a65824b6fc0de48826aff29a77a77" exitCode=0 Jan 22 08:02:13 crc kubenswrapper[4933]: I0122 08:02:13.971174 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vxdf2" event={"ID":"0396623b-d6eb-467b-9210-5710a76d959a","Type":"ContainerDied","Data":"e8d2c6da89abfa99702745dacbcbe29b2d2a65824b6fc0de48826aff29a77a77"} Jan 22 08:02:13 crc kubenswrapper[4933]: I0122 08:02:13.977625 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 08:02:21 crc kubenswrapper[4933]: I0122 08:02:21.031594 4933 generic.go:334] "Generic (PLEG): container finished" podID="0396623b-d6eb-467b-9210-5710a76d959a" containerID="bd2fe57b81c85a90d03aa53cf92be42d48a94941e76d72edae2ddfc7f8e81b60" exitCode=0 Jan 22 08:02:21 crc kubenswrapper[4933]: I0122 08:02:21.031765 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vxdf2" event={"ID":"0396623b-d6eb-467b-9210-5710a76d959a","Type":"ContainerDied","Data":"bd2fe57b81c85a90d03aa53cf92be42d48a94941e76d72edae2ddfc7f8e81b60"} Jan 22 08:02:22 crc kubenswrapper[4933]: I0122 08:02:22.501958 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:02:22 crc kubenswrapper[4933]: E0122 08:02:22.502733 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:02:24 crc kubenswrapper[4933]: I0122 08:02:24.059163 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vxdf2" event={"ID":"0396623b-d6eb-467b-9210-5710a76d959a","Type":"ContainerStarted","Data":"1dfc7aa94afebcbd5e82ec469b473d06f686a301ae7c0c5bbd7babfc577d9367"} Jan 22 08:02:24 crc kubenswrapper[4933]: I0122 08:02:24.079263 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vxdf2" podStartSLOduration=3.872245783 podStartE2EDuration="13.07924339s" podCreationTimestamp="2026-01-22 08:02:11 +0000 UTC" firstStartedPulling="2026-01-22 08:02:13.977279123 +0000 UTC m=+8181.814404486" lastFinishedPulling="2026-01-22 08:02:23.18427674 +0000 UTC m=+8191.021402093" observedRunningTime="2026-01-22 08:02:24.074493115 +0000 UTC m=+8191.911618468" watchObservedRunningTime="2026-01-22 08:02:24.07924339 +0000 UTC m=+8191.916368743" Jan 22 08:02:31 crc kubenswrapper[4933]: I0122 08:02:31.990833 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vxdf2" Jan 22 08:02:31 crc kubenswrapper[4933]: I0122 08:02:31.991453 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vxdf2" Jan 22 08:02:32 crc kubenswrapper[4933]: I0122 08:02:32.036348 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vxdf2" Jan 22 08:02:32 crc kubenswrapper[4933]: I0122 08:02:32.189658 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vxdf2" Jan 22 08:02:32 crc kubenswrapper[4933]: I0122 08:02:32.307443 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vxdf2"] Jan 22 08:02:32 crc kubenswrapper[4933]: I0122 08:02:32.356704 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rjpmf"] Jan 22 08:02:32 crc kubenswrapper[4933]: I0122 08:02:32.357306 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rjpmf" podUID="4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5" containerName="registry-server" containerID="cri-o://be140cd4b37c2c9b24a2cd46714214d1fea8a2bb6a24eae56d0b90e9f8f20892" gracePeriod=2 Jan 22 08:02:33 crc kubenswrapper[4933]: I0122 08:02:33.149460 4933 generic.go:334] "Generic (PLEG): container finished" podID="4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5" containerID="be140cd4b37c2c9b24a2cd46714214d1fea8a2bb6a24eae56d0b90e9f8f20892" exitCode=0 Jan 22 08:02:33 crc kubenswrapper[4933]: I0122 08:02:33.149571 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rjpmf" event={"ID":"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5","Type":"ContainerDied","Data":"be140cd4b37c2c9b24a2cd46714214d1fea8a2bb6a24eae56d0b90e9f8f20892"} Jan 22 08:02:33 crc kubenswrapper[4933]: I0122 08:02:33.639727 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rjpmf" Jan 22 08:02:33 crc kubenswrapper[4933]: I0122 08:02:33.804577 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5-catalog-content\") pod \"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5\" (UID: \"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5\") " Jan 22 08:02:33 crc kubenswrapper[4933]: I0122 08:02:33.805033 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6pk99\" (UniqueName: \"kubernetes.io/projected/4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5-kube-api-access-6pk99\") pod \"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5\" (UID: \"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5\") " Jan 22 08:02:33 crc kubenswrapper[4933]: I0122 08:02:33.809352 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5-utilities\") pod \"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5\" (UID: \"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5\") " Jan 22 08:02:33 crc kubenswrapper[4933]: I0122 08:02:33.810857 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5-utilities" (OuterVolumeSpecName: "utilities") pod "4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5" (UID: "4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:02:33 crc kubenswrapper[4933]: I0122 08:02:33.827022 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5-kube-api-access-6pk99" (OuterVolumeSpecName: "kube-api-access-6pk99") pod "4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5" (UID: "4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5"). InnerVolumeSpecName "kube-api-access-6pk99". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:02:33 crc kubenswrapper[4933]: I0122 08:02:33.914563 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6pk99\" (UniqueName: \"kubernetes.io/projected/4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5-kube-api-access-6pk99\") on node \"crc\" DevicePath \"\"" Jan 22 08:02:33 crc kubenswrapper[4933]: I0122 08:02:33.914602 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 08:02:33 crc kubenswrapper[4933]: I0122 08:02:33.916675 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5" (UID: "4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:02:34 crc kubenswrapper[4933]: I0122 08:02:34.016963 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 08:02:34 crc kubenswrapper[4933]: I0122 08:02:34.191178 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rjpmf" event={"ID":"4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5","Type":"ContainerDied","Data":"69574079243eb4100c9ee0116e21e160b05155c56819a6cba0876ce9b89a8b92"} Jan 22 08:02:34 crc kubenswrapper[4933]: I0122 08:02:34.191226 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rjpmf" Jan 22 08:02:34 crc kubenswrapper[4933]: I0122 08:02:34.191269 4933 scope.go:117] "RemoveContainer" containerID="be140cd4b37c2c9b24a2cd46714214d1fea8a2bb6a24eae56d0b90e9f8f20892" Jan 22 08:02:34 crc kubenswrapper[4933]: I0122 08:02:34.219729 4933 scope.go:117] "RemoveContainer" containerID="c7357cd665ad54998be4b28d0f6baef02e78862766ef793107ca722870f0bcec" Jan 22 08:02:34 crc kubenswrapper[4933]: I0122 08:02:34.242738 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rjpmf"] Jan 22 08:02:34 crc kubenswrapper[4933]: I0122 08:02:34.250931 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rjpmf"] Jan 22 08:02:34 crc kubenswrapper[4933]: I0122 08:02:34.261348 4933 scope.go:117] "RemoveContainer" containerID="95827192ac5e21d7fd699ce05d24b8a882085b16008c8c4e05b7e0a015ce4193" Jan 22 08:02:34 crc kubenswrapper[4933]: I0122 08:02:34.507902 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5" path="/var/lib/kubelet/pods/4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5/volumes" Jan 22 08:02:36 crc kubenswrapper[4933]: I0122 08:02:36.490746 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:02:36 crc kubenswrapper[4933]: E0122 08:02:36.491542 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:02:49 crc kubenswrapper[4933]: I0122 08:02:49.491207 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:02:49 crc kubenswrapper[4933]: E0122 08:02:49.491960 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:03:01 crc kubenswrapper[4933]: I0122 08:03:01.490683 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:03:01 crc kubenswrapper[4933]: E0122 08:03:01.491483 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:03:13 crc kubenswrapper[4933]: I0122 08:03:13.490664 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:03:13 crc kubenswrapper[4933]: E0122 08:03:13.491615 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:03:27 crc kubenswrapper[4933]: I0122 08:03:27.491028 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:03:27 crc kubenswrapper[4933]: E0122 08:03:27.491802 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:03:29 crc kubenswrapper[4933]: I0122 08:03:29.339840 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-jxw4c_a79423c3-5348-464f-9866-425625f2a8f1/prometheus-operator/0.log" Jan 22 08:03:29 crc kubenswrapper[4933]: I0122 08:03:29.357371 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-697444475f-hzfjv_f4cb4026-e4bf-45f2-9373-71f24b0ecd16/prometheus-operator-admission-webhook/0.log" Jan 22 08:03:29 crc kubenswrapper[4933]: I0122 08:03:29.368604 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-697444475f-jv7mp_f6675820-e9ac-4e84-8e2f-2971304513ea/prometheus-operator-admission-webhook/0.log" Jan 22 08:03:29 crc kubenswrapper[4933]: I0122 08:03:29.389914 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-2n7hh_1ec7b8e4-e17f-4d56-8f1c-b04751ad3bf8/operator/0.log" Jan 22 08:03:29 crc kubenswrapper[4933]: I0122 08:03:29.401332 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-5b8t8_cc705ce5-1fcd-4528-b638-c5e1d062c035/perses-operator/0.log" Jan 22 08:03:29 crc kubenswrapper[4933]: I0122 08:03:29.559772 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-fdmt6_cfa84f31-ab21-494f-932c-77b809b656c0/cert-manager-controller/0.log" Jan 22 08:03:29 crc kubenswrapper[4933]: I0122 08:03:29.581853 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-mrwsn_06a56c7d-200d-4473-8766-ddb0f9f75cd4/cert-manager-cainjector/0.log" Jan 22 08:03:29 crc kubenswrapper[4933]: I0122 08:03:29.591053 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-m2xft_f81fa480-c38d-4a0e-8adc-51332ceab483/cert-manager-webhook/0.log" Jan 22 08:03:30 crc kubenswrapper[4933]: I0122 08:03:30.589266 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk_6a552e55-190e-4f4c-9234-ebd6d63ee4ad/extract/0.log" Jan 22 08:03:30 crc kubenswrapper[4933]: I0122 08:03:30.614276 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk_6a552e55-190e-4f4c-9234-ebd6d63ee4ad/util/0.log" Jan 22 08:03:30 crc kubenswrapper[4933]: I0122 08:03:30.629844 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk_6a552e55-190e-4f4c-9234-ebd6d63ee4ad/pull/0.log" Jan 22 08:03:30 crc kubenswrapper[4933]: I0122 08:03:30.842718 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-59dd8b7cbf-z6g5l_78b94bce-a7a5-471f-bab2-f57baeff12b6/manager/0.log" Jan 22 08:03:31 crc kubenswrapper[4933]: I0122 08:03:31.002725 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-69cf5d4557-6ktqg_53b0d937-20e6-4a4b-b61b-f172c672c43f/manager/0.log" Jan 22 08:03:31 crc kubenswrapper[4933]: I0122 08:03:31.020900 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-b45d7bf98-sb8mm_c2a2b482-2d1c-4c3b-b1dc-7d3f291b665e/manager/0.log" Jan 22 08:03:31 crc kubenswrapper[4933]: I0122 08:03:31.229246 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-2stg8_29f4dd7e-c2cf-4f64-87e8-2201fe99e751/controller/0.log" Jan 22 08:03:31 crc kubenswrapper[4933]: I0122 08:03:31.238359 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-2stg8_29f4dd7e-c2cf-4f64-87e8-2201fe99e751/kube-rbac-proxy/0.log" Jan 22 08:03:31 crc kubenswrapper[4933]: I0122 08:03:31.244374 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-78fdd796fd-x7t54_f4f610a0-0ede-4d86-b1d4-fcd4a70d9c1c/manager/0.log" Jan 22 08:03:31 crc kubenswrapper[4933]: I0122 08:03:31.263001 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/controller/0.log" Jan 22 08:03:31 crc kubenswrapper[4933]: I0122 08:03:31.316300 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-p6w9k_10fc162d-8e83-4741-88be-c1e8dd9f291a/manager/0.log" Jan 22 08:03:31 crc kubenswrapper[4933]: I0122 08:03:31.371615 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-ghk9g_45469fd5-7d9d-44f4-82a1-61d82f8e2dc8/manager/0.log" Jan 22 08:03:32 crc kubenswrapper[4933]: I0122 08:03:32.257028 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-54ccf4f85d-5vl5w_bee42408-9ab2-4e83-a06b-8cb123a853f9/manager/0.log" Jan 22 08:03:32 crc kubenswrapper[4933]: I0122 08:03:32.271174 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-69d6c9f5b8-dmwlv_ab3830be-ff45-443a-9089-29438fca5c75/manager/0.log" Jan 22 08:03:32 crc kubenswrapper[4933]: I0122 08:03:32.440886 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-b8b6d4659-pkpcl_d3f5d746-ae61-4d36-bcce-4530c7f7a899/manager/0.log" Jan 22 08:03:32 crc kubenswrapper[4933]: I0122 08:03:32.466721 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-78c6999f6f-dwrg6_34f0d75a-dc72-4dad-82a6-512c1351210b/manager/0.log" Jan 22 08:03:32 crc kubenswrapper[4933]: I0122 08:03:32.547929 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-dtg9q_e0335df2-7bd1-4b61-8057-7663a730d2ff/manager/0.log" Jan 22 08:03:32 crc kubenswrapper[4933]: I0122 08:03:32.649475 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5d8f59fb49-nkc7j_21941523-226c-4a4c-a099-51df0766a712/manager/0.log" Jan 22 08:03:32 crc kubenswrapper[4933]: I0122 08:03:32.881594 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-6b8bc8d87d-czc9v_300ef4d3-6c13-4a9d-96e6-a707abccca2c/manager/0.log" Jan 22 08:03:32 crc kubenswrapper[4933]: I0122 08:03:32.966015 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7bd9774b6-w25vn_04683325-6972-455c-9ca5-ddf1fd4b9862/manager/0.log" Jan 22 08:03:32 crc kubenswrapper[4933]: I0122 08:03:32.980846 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns_ae3ccc66-eed1-4750-8af1-7f99673b1323/manager/0.log" Jan 22 08:03:33 crc kubenswrapper[4933]: I0122 08:03:33.226837 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-5cd76577f9-bqlhh_df54862e-d3c4-4068-9560-93833cf75eae/operator/0.log" Jan 22 08:03:36 crc kubenswrapper[4933]: I0122 08:03:36.535133 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/frr/0.log" Jan 22 08:03:36 crc kubenswrapper[4933]: I0122 08:03:36.554899 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/reloader/0.log" Jan 22 08:03:36 crc kubenswrapper[4933]: I0122 08:03:36.563324 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/frr-metrics/0.log" Jan 22 08:03:36 crc kubenswrapper[4933]: I0122 08:03:36.578788 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/kube-rbac-proxy/0.log" Jan 22 08:03:36 crc kubenswrapper[4933]: I0122 08:03:36.593154 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/kube-rbac-proxy-frr/0.log" Jan 22 08:03:36 crc kubenswrapper[4933]: I0122 08:03:36.602298 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/cp-frr-files/0.log" Jan 22 08:03:36 crc kubenswrapper[4933]: I0122 08:03:36.614174 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/cp-reloader/0.log" Jan 22 08:03:36 crc kubenswrapper[4933]: I0122 08:03:36.619609 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-tgvxb_f619cfe1-61b2-4726-a08f-b41cc24ae488/cp-metrics/0.log" Jan 22 08:03:36 crc kubenswrapper[4933]: I0122 08:03:36.632995 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-rjx6j_42d0dcd8-97a6-489b-9b19-43fd22936816/frr-k8s-webhook-server/0.log" Jan 22 08:03:36 crc kubenswrapper[4933]: I0122 08:03:36.667991 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6c8b57987c-qr4jc_9f8987dd-79ca-4569-8000-088df75be06e/manager/0.log" Jan 22 08:03:36 crc kubenswrapper[4933]: I0122 08:03:36.681227 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-688dc4b4d8-9z74g_fe7410b0-e355-498e-841e-89ae7f5e56de/webhook-server/0.log" Jan 22 08:03:36 crc kubenswrapper[4933]: I0122 08:03:36.753718 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-647bb87bbd-xc9z6_07b078fc-4665-4e58-934d-f606471d5942/manager/0.log" Jan 22 08:03:36 crc kubenswrapper[4933]: I0122 08:03:36.996652 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-2cblb_283252a7-ff09-4856-9249-7c6cd70dff99/registry-server/0.log" Jan 22 08:03:37 crc kubenswrapper[4933]: I0122 08:03:37.165385 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-7q7hb_289a66a7-9513-4b66-990a-3d9f11919531/manager/0.log" Jan 22 08:03:37 crc kubenswrapper[4933]: I0122 08:03:37.205958 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5d646b7d76-m6g48_2892be27-6da5-4a19-a30e-36f5907f5d70/manager/0.log" Jan 22 08:03:37 crc kubenswrapper[4933]: I0122 08:03:37.235066 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-rdbn6_3028718b-d03f-414e-834d-93eb28eeb369/operator/0.log" Jan 22 08:03:37 crc kubenswrapper[4933]: I0122 08:03:37.281358 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-547cbdb99f-2wjhd_48e1a8f3-00fd-48a6-be02-7c61f0425809/manager/0.log" Jan 22 08:03:37 crc kubenswrapper[4933]: I0122 08:03:37.545490 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-85cd9769bb-556x5_f5292b84-8cb2-4f43-96f9-6304705b15bc/manager/0.log" Jan 22 08:03:37 crc kubenswrapper[4933]: I0122 08:03:37.555627 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-69797bbcbd-9rc7q_0a5c558d-f69d-4299-97e2-00326ec7e416/manager/0.log" Jan 22 08:03:37 crc kubenswrapper[4933]: I0122 08:03:37.566878 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-5ffb9c6597-gtcc5_b5af85fb-e3ef-41b7-8c6b-7afddd5200bd/manager/0.log" Jan 22 08:03:37 crc kubenswrapper[4933]: I0122 08:03:37.658315 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-xlk7v_57ec97d6-d16e-4069-98cc-7dcf56910fad/speaker/0.log" Jan 22 08:03:37 crc kubenswrapper[4933]: I0122 08:03:37.667300 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-xlk7v_57ec97d6-d16e-4069-98cc-7dcf56910fad/kube-rbac-proxy/0.log" Jan 22 08:03:38 crc kubenswrapper[4933]: I0122 08:03:38.604364 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-fdmt6_cfa84f31-ab21-494f-932c-77b809b656c0/cert-manager-controller/0.log" Jan 22 08:03:38 crc kubenswrapper[4933]: I0122 08:03:38.621602 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-mrwsn_06a56c7d-200d-4473-8766-ddb0f9f75cd4/cert-manager-cainjector/0.log" Jan 22 08:03:38 crc kubenswrapper[4933]: I0122 08:03:38.631684 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-m2xft_f81fa480-c38d-4a0e-8adc-51332ceab483/cert-manager-webhook/0.log" Jan 22 08:03:38 crc kubenswrapper[4933]: I0122 08:03:38.984821 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-9b5pb_bad89ee9-f9e4-4a74-9f79-5a9ef46b9ec0/nmstate-console-plugin/0.log" Jan 22 08:03:39 crc kubenswrapper[4933]: I0122 08:03:39.007107 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-z4cfk_95abb851-2f05-43e0-8c35-92027baf4a2c/nmstate-handler/0.log" Jan 22 08:03:39 crc kubenswrapper[4933]: I0122 08:03:39.023231 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-88t8r_7f8f46df-3251-4fc5-87fb-10b877fe5878/nmstate-metrics/0.log" Jan 22 08:03:39 crc kubenswrapper[4933]: I0122 08:03:39.031403 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-88t8r_7f8f46df-3251-4fc5-87fb-10b877fe5878/kube-rbac-proxy/0.log" Jan 22 08:03:39 crc kubenswrapper[4933]: I0122 08:03:39.044716 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-49j2k_85167be7-eaf1-4931-ac95-f80007d35c42/nmstate-operator/0.log" Jan 22 08:03:39 crc kubenswrapper[4933]: I0122 08:03:39.060851 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-dtghh_40102dae-8b84-4138-bd5d-4a3b7c1b3492/nmstate-webhook/0.log" Jan 22 08:03:39 crc kubenswrapper[4933]: I0122 08:03:39.298928 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-kq95z_8796a7df-de04-490e-b5a9-c9fad5483d61/control-plane-machine-set-operator/0.log" Jan 22 08:03:39 crc kubenswrapper[4933]: I0122 08:03:39.319907 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-6r979_c800ab14-5d0a-4078-91f5-b47d05d15ccc/kube-rbac-proxy/0.log" Jan 22 08:03:39 crc kubenswrapper[4933]: I0122 08:03:39.332227 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-6r979_c800ab14-5d0a-4078-91f5-b47d05d15ccc/machine-api-operator/0.log" Jan 22 08:03:40 crc kubenswrapper[4933]: I0122 08:03:40.148915 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk_6a552e55-190e-4f4c-9234-ebd6d63ee4ad/extract/0.log" Jan 22 08:03:40 crc kubenswrapper[4933]: I0122 08:03:40.157854 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk_6a552e55-190e-4f4c-9234-ebd6d63ee4ad/util/0.log" Jan 22 08:03:40 crc kubenswrapper[4933]: I0122 08:03:40.169629 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3078a1b977323d6d8f95a4018ef06f377c198eb1741282ac05e933b603vrtpk_6a552e55-190e-4f4c-9234-ebd6d63ee4ad/pull/0.log" Jan 22 08:03:40 crc kubenswrapper[4933]: I0122 08:03:40.309648 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-59dd8b7cbf-z6g5l_78b94bce-a7a5-471f-bab2-f57baeff12b6/manager/0.log" Jan 22 08:03:40 crc kubenswrapper[4933]: I0122 08:03:40.398434 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-69cf5d4557-6ktqg_53b0d937-20e6-4a4b-b61b-f172c672c43f/manager/0.log" Jan 22 08:03:40 crc kubenswrapper[4933]: I0122 08:03:40.409682 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-b45d7bf98-sb8mm_c2a2b482-2d1c-4c3b-b1dc-7d3f291b665e/manager/0.log" Jan 22 08:03:40 crc kubenswrapper[4933]: I0122 08:03:40.492219 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:03:40 crc kubenswrapper[4933]: E0122 08:03:40.492594 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:03:40 crc kubenswrapper[4933]: I0122 08:03:40.567443 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-78fdd796fd-x7t54_f4f610a0-0ede-4d86-b1d4-fcd4a70d9c1c/manager/0.log" Jan 22 08:03:40 crc kubenswrapper[4933]: I0122 08:03:40.606681 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-p6w9k_10fc162d-8e83-4741-88be-c1e8dd9f291a/manager/0.log" Jan 22 08:03:40 crc kubenswrapper[4933]: I0122 08:03:40.631264 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-ghk9g_45469fd5-7d9d-44f4-82a1-61d82f8e2dc8/manager/0.log" Jan 22 08:03:41 crc kubenswrapper[4933]: I0122 08:03:41.317830 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-54ccf4f85d-5vl5w_bee42408-9ab2-4e83-a06b-8cb123a853f9/manager/0.log" Jan 22 08:03:41 crc kubenswrapper[4933]: I0122 08:03:41.329142 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-69d6c9f5b8-dmwlv_ab3830be-ff45-443a-9089-29438fca5c75/manager/0.log" Jan 22 08:03:41 crc kubenswrapper[4933]: I0122 08:03:41.469143 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-b8b6d4659-pkpcl_d3f5d746-ae61-4d36-bcce-4530c7f7a899/manager/0.log" Jan 22 08:03:41 crc kubenswrapper[4933]: I0122 08:03:41.478025 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-78c6999f6f-dwrg6_34f0d75a-dc72-4dad-82a6-512c1351210b/manager/0.log" Jan 22 08:03:41 crc kubenswrapper[4933]: I0122 08:03:41.536784 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-dtg9q_e0335df2-7bd1-4b61-8057-7663a730d2ff/manager/0.log" Jan 22 08:03:41 crc kubenswrapper[4933]: I0122 08:03:41.611931 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5d8f59fb49-nkc7j_21941523-226c-4a4c-a099-51df0766a712/manager/0.log" Jan 22 08:03:41 crc kubenswrapper[4933]: I0122 08:03:41.749122 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-6b8bc8d87d-czc9v_300ef4d3-6c13-4a9d-96e6-a707abccca2c/manager/0.log" Jan 22 08:03:41 crc kubenswrapper[4933]: I0122 08:03:41.805217 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7bd9774b6-w25vn_04683325-6972-455c-9ca5-ddf1fd4b9862/manager/0.log" Jan 22 08:03:41 crc kubenswrapper[4933]: I0122 08:03:41.821860 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-7fbbcdb4d6vmqns_ae3ccc66-eed1-4750-8af1-7f99673b1323/manager/0.log" Jan 22 08:03:41 crc kubenswrapper[4933]: I0122 08:03:41.991443 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-5cd76577f9-bqlhh_df54862e-d3c4-4068-9560-93833cf75eae/operator/0.log" Jan 22 08:03:44 crc kubenswrapper[4933]: I0122 08:03:44.480253 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-647bb87bbd-xc9z6_07b078fc-4665-4e58-934d-f606471d5942/manager/0.log" Jan 22 08:03:44 crc kubenswrapper[4933]: I0122 08:03:44.608849 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-2cblb_283252a7-ff09-4856-9249-7c6cd70dff99/registry-server/0.log" Jan 22 08:03:44 crc kubenswrapper[4933]: I0122 08:03:44.709975 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-7q7hb_289a66a7-9513-4b66-990a-3d9f11919531/manager/0.log" Jan 22 08:03:44 crc kubenswrapper[4933]: I0122 08:03:44.747791 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5d646b7d76-m6g48_2892be27-6da5-4a19-a30e-36f5907f5d70/manager/0.log" Jan 22 08:03:44 crc kubenswrapper[4933]: I0122 08:03:44.784623 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-rdbn6_3028718b-d03f-414e-834d-93eb28eeb369/operator/0.log" Jan 22 08:03:44 crc kubenswrapper[4933]: I0122 08:03:44.815702 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-547cbdb99f-2wjhd_48e1a8f3-00fd-48a6-be02-7c61f0425809/manager/0.log" Jan 22 08:03:45 crc kubenswrapper[4933]: I0122 08:03:45.018022 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-85cd9769bb-556x5_f5292b84-8cb2-4f43-96f9-6304705b15bc/manager/0.log" Jan 22 08:03:45 crc kubenswrapper[4933]: I0122 08:03:45.026952 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-69797bbcbd-9rc7q_0a5c558d-f69d-4299-97e2-00326ec7e416/manager/0.log" Jan 22 08:03:45 crc kubenswrapper[4933]: I0122 08:03:45.039717 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-5ffb9c6597-gtcc5_b5af85fb-e3ef-41b7-8c6b-7afddd5200bd/manager/0.log" Jan 22 08:03:47 crc kubenswrapper[4933]: I0122 08:03:47.001110 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-q8l78_83dfdde7-cd49-49e0-85a0-0165d464b2c7/kube-multus-additional-cni-plugins/0.log" Jan 22 08:03:47 crc kubenswrapper[4933]: I0122 08:03:47.011485 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-q8l78_83dfdde7-cd49-49e0-85a0-0165d464b2c7/egress-router-binary-copy/0.log" Jan 22 08:03:47 crc kubenswrapper[4933]: I0122 08:03:47.020254 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-q8l78_83dfdde7-cd49-49e0-85a0-0165d464b2c7/cni-plugins/0.log" Jan 22 08:03:47 crc kubenswrapper[4933]: I0122 08:03:47.030336 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-q8l78_83dfdde7-cd49-49e0-85a0-0165d464b2c7/bond-cni-plugin/0.log" Jan 22 08:03:47 crc kubenswrapper[4933]: I0122 08:03:47.040913 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-q8l78_83dfdde7-cd49-49e0-85a0-0165d464b2c7/routeoverride-cni/0.log" Jan 22 08:03:47 crc kubenswrapper[4933]: I0122 08:03:47.052941 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-q8l78_83dfdde7-cd49-49e0-85a0-0165d464b2c7/whereabouts-cni-bincopy/0.log" Jan 22 08:03:47 crc kubenswrapper[4933]: I0122 08:03:47.062414 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-q8l78_83dfdde7-cd49-49e0-85a0-0165d464b2c7/whereabouts-cni/0.log" Jan 22 08:03:47 crc kubenswrapper[4933]: I0122 08:03:47.103164 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-admission-controller-857f4d67dd-sk5qf_5f757f81-9a44-488a-8a60-4814d2bc418d/multus-admission-controller/0.log" Jan 22 08:03:47 crc kubenswrapper[4933]: I0122 08:03:47.112524 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-admission-controller-857f4d67dd-sk5qf_5f757f81-9a44-488a-8a60-4814d2bc418d/kube-rbac-proxy/0.log" Jan 22 08:03:47 crc kubenswrapper[4933]: I0122 08:03:47.165663 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jr6rw_f066dd84-0cd5-4e8c-8411-cf12cc83ea7d/kube-multus/2.log" Jan 22 08:03:47 crc kubenswrapper[4933]: I0122 08:03:47.309239 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jr6rw_f066dd84-0cd5-4e8c-8411-cf12cc83ea7d/kube-multus/3.log" Jan 22 08:03:47 crc kubenswrapper[4933]: I0122 08:03:47.368125 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_network-metrics-daemon-t8rgm_0902347a-c5e2-4891-812b-cfe6efc32261/network-metrics-daemon/0.log" Jan 22 08:03:47 crc kubenswrapper[4933]: I0122 08:03:47.375834 4933 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_network-metrics-daemon-t8rgm_0902347a-c5e2-4891-812b-cfe6efc32261/kube-rbac-proxy/0.log" Jan 22 08:03:54 crc kubenswrapper[4933]: I0122 08:03:54.492228 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:03:54 crc kubenswrapper[4933]: E0122 08:03:54.492971 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:04:06 crc kubenswrapper[4933]: I0122 08:04:06.490401 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:04:06 crc kubenswrapper[4933]: E0122 08:04:06.491105 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:04:17 crc kubenswrapper[4933]: I0122 08:04:17.491756 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:04:17 crc kubenswrapper[4933]: E0122 08:04:17.493040 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:04:29 crc kubenswrapper[4933]: I0122 08:04:29.490920 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:04:29 crc kubenswrapper[4933]: E0122 08:04:29.491912 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:04:41 crc kubenswrapper[4933]: I0122 08:04:41.492907 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:04:41 crc kubenswrapper[4933]: E0122 08:04:41.493727 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:04:56 crc kubenswrapper[4933]: I0122 08:04:56.491061 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:04:56 crc kubenswrapper[4933]: E0122 08:04:56.491848 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:05:07 crc kubenswrapper[4933]: I0122 08:05:07.491555 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:05:07 crc kubenswrapper[4933]: E0122 08:05:07.492474 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:05:10 crc kubenswrapper[4933]: I0122 08:05:10.182709 4933 scope.go:117] "RemoveContainer" containerID="4f82c1cf1dd84cd3e01694c88dcccc118b39f10a528b449e8d219d37ee1c95b3" Jan 22 08:05:21 crc kubenswrapper[4933]: I0122 08:05:21.073975 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:05:21 crc kubenswrapper[4933]: E0122 08:05:21.075459 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:05:31 crc kubenswrapper[4933]: I0122 08:05:31.491620 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:05:31 crc kubenswrapper[4933]: E0122 08:05:31.492642 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:05:45 crc kubenswrapper[4933]: I0122 08:05:45.491375 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:05:45 crc kubenswrapper[4933]: E0122 08:05:45.492428 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:05:59 crc kubenswrapper[4933]: I0122 08:05:59.491211 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:05:59 crc kubenswrapper[4933]: E0122 08:05:59.492042 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:06:10 crc kubenswrapper[4933]: I0122 08:06:10.258535 4933 scope.go:117] "RemoveContainer" containerID="63b350086cd6cf01279c35cb99c52b6bbf341dd95152e56b5348fbfc424fc22d" Jan 22 08:06:10 crc kubenswrapper[4933]: I0122 08:06:10.320583 4933 scope.go:117] "RemoveContainer" containerID="9a3f173d24fbbb6e9c78aa181de9eb003826fc0d8cc92c810bf60f5de37fef48" Jan 22 08:06:10 crc kubenswrapper[4933]: I0122 08:06:10.357663 4933 scope.go:117] "RemoveContainer" containerID="51a2afc8f6eb23476f846ee0d058e6fa44e23f8071be79d3e07112fb14108d86" Jan 22 08:06:13 crc kubenswrapper[4933]: I0122 08:06:13.491343 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:06:13 crc kubenswrapper[4933]: E0122 08:06:13.491995 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:06:27 crc kubenswrapper[4933]: I0122 08:06:27.491917 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:06:27 crc kubenswrapper[4933]: E0122 08:06:27.494205 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:06:39 crc kubenswrapper[4933]: I0122 08:06:39.491923 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:06:39 crc kubenswrapper[4933]: E0122 08:06:39.492810 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:06:51 crc kubenswrapper[4933]: I0122 08:06:51.493557 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:06:51 crc kubenswrapper[4933]: E0122 08:06:51.494811 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:07:04 crc kubenswrapper[4933]: I0122 08:07:04.491822 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:07:04 crc kubenswrapper[4933]: E0122 08:07:04.492667 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:07:16 crc kubenswrapper[4933]: I0122 08:07:16.491304 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:07:17 crc kubenswrapper[4933]: I0122 08:07:17.367801 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"0eb1b1ad00eba263f6765d2eb1d795c255fb8b5e6c5cd831ddc2b8fc4281ee98"} Jan 22 08:09:31 crc kubenswrapper[4933]: I0122 08:09:31.991407 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-829br"] Jan 22 08:09:31 crc kubenswrapper[4933]: E0122 08:09:31.998225 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5" containerName="extract-content" Jan 22 08:09:31 crc kubenswrapper[4933]: I0122 08:09:31.998290 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5" containerName="extract-content" Jan 22 08:09:31 crc kubenswrapper[4933]: E0122 08:09:31.998318 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5" containerName="registry-server" Jan 22 08:09:31 crc kubenswrapper[4933]: I0122 08:09:31.998326 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5" containerName="registry-server" Jan 22 08:09:31 crc kubenswrapper[4933]: E0122 08:09:31.998370 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5" containerName="extract-utilities" Jan 22 08:09:31 crc kubenswrapper[4933]: I0122 08:09:31.998382 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5" containerName="extract-utilities" Jan 22 08:09:31 crc kubenswrapper[4933]: I0122 08:09:31.999456 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ea2db51-8bf0-44ca-9f17-6bb2fef3cbb5" containerName="registry-server" Jan 22 08:09:32 crc kubenswrapper[4933]: I0122 08:09:32.001382 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-829br" Jan 22 08:09:32 crc kubenswrapper[4933]: I0122 08:09:32.005483 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e64d6e2b-7db7-426f-a7c3-9ddd3590d507-utilities\") pod \"certified-operators-829br\" (UID: \"e64d6e2b-7db7-426f-a7c3-9ddd3590d507\") " pod="openshift-marketplace/certified-operators-829br" Jan 22 08:09:32 crc kubenswrapper[4933]: I0122 08:09:32.005752 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e64d6e2b-7db7-426f-a7c3-9ddd3590d507-catalog-content\") pod \"certified-operators-829br\" (UID: \"e64d6e2b-7db7-426f-a7c3-9ddd3590d507\") " pod="openshift-marketplace/certified-operators-829br" Jan 22 08:09:32 crc kubenswrapper[4933]: I0122 08:09:32.006226 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzl6j\" (UniqueName: \"kubernetes.io/projected/e64d6e2b-7db7-426f-a7c3-9ddd3590d507-kube-api-access-xzl6j\") pod \"certified-operators-829br\" (UID: \"e64d6e2b-7db7-426f-a7c3-9ddd3590d507\") " pod="openshift-marketplace/certified-operators-829br" Jan 22 08:09:32 crc kubenswrapper[4933]: I0122 08:09:32.020724 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-829br"] Jan 22 08:09:32 crc kubenswrapper[4933]: I0122 08:09:32.108841 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzl6j\" (UniqueName: \"kubernetes.io/projected/e64d6e2b-7db7-426f-a7c3-9ddd3590d507-kube-api-access-xzl6j\") pod \"certified-operators-829br\" (UID: \"e64d6e2b-7db7-426f-a7c3-9ddd3590d507\") " pod="openshift-marketplace/certified-operators-829br" Jan 22 08:09:32 crc kubenswrapper[4933]: I0122 08:09:32.108935 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e64d6e2b-7db7-426f-a7c3-9ddd3590d507-utilities\") pod \"certified-operators-829br\" (UID: \"e64d6e2b-7db7-426f-a7c3-9ddd3590d507\") " pod="openshift-marketplace/certified-operators-829br" Jan 22 08:09:32 crc kubenswrapper[4933]: I0122 08:09:32.109005 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e64d6e2b-7db7-426f-a7c3-9ddd3590d507-catalog-content\") pod \"certified-operators-829br\" (UID: \"e64d6e2b-7db7-426f-a7c3-9ddd3590d507\") " pod="openshift-marketplace/certified-operators-829br" Jan 22 08:09:32 crc kubenswrapper[4933]: I0122 08:09:32.109640 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e64d6e2b-7db7-426f-a7c3-9ddd3590d507-catalog-content\") pod \"certified-operators-829br\" (UID: \"e64d6e2b-7db7-426f-a7c3-9ddd3590d507\") " pod="openshift-marketplace/certified-operators-829br" Jan 22 08:09:32 crc kubenswrapper[4933]: I0122 08:09:32.110264 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e64d6e2b-7db7-426f-a7c3-9ddd3590d507-utilities\") pod \"certified-operators-829br\" (UID: \"e64d6e2b-7db7-426f-a7c3-9ddd3590d507\") " pod="openshift-marketplace/certified-operators-829br" Jan 22 08:09:32 crc kubenswrapper[4933]: I0122 08:09:32.132600 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzl6j\" (UniqueName: \"kubernetes.io/projected/e64d6e2b-7db7-426f-a7c3-9ddd3590d507-kube-api-access-xzl6j\") pod \"certified-operators-829br\" (UID: \"e64d6e2b-7db7-426f-a7c3-9ddd3590d507\") " pod="openshift-marketplace/certified-operators-829br" Jan 22 08:09:32 crc kubenswrapper[4933]: I0122 08:09:32.327610 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-829br" Jan 22 08:09:33 crc kubenswrapper[4933]: I0122 08:09:32.908489 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-829br"] Jan 22 08:09:33 crc kubenswrapper[4933]: I0122 08:09:33.698538 4933 generic.go:334] "Generic (PLEG): container finished" podID="e64d6e2b-7db7-426f-a7c3-9ddd3590d507" containerID="9ceddf770c1cc891a5461621052b4a9d2268026e6bb06205e722e3ada6de18f4" exitCode=0 Jan 22 08:09:33 crc kubenswrapper[4933]: I0122 08:09:33.698621 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-829br" event={"ID":"e64d6e2b-7db7-426f-a7c3-9ddd3590d507","Type":"ContainerDied","Data":"9ceddf770c1cc891a5461621052b4a9d2268026e6bb06205e722e3ada6de18f4"} Jan 22 08:09:33 crc kubenswrapper[4933]: I0122 08:09:33.699018 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-829br" event={"ID":"e64d6e2b-7db7-426f-a7c3-9ddd3590d507","Type":"ContainerStarted","Data":"1753be06af3ba2caf25b1f55e90f53e5907cc7a2c2cdd9f1058713300ffc97ca"} Jan 22 08:09:33 crc kubenswrapper[4933]: I0122 08:09:33.700985 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 08:09:34 crc kubenswrapper[4933]: I0122 08:09:34.714766 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-829br" event={"ID":"e64d6e2b-7db7-426f-a7c3-9ddd3590d507","Type":"ContainerStarted","Data":"eace8775166113b63c9677bc371f9113e11773ddd137994d845dcbd963519ee3"} Jan 22 08:09:35 crc kubenswrapper[4933]: I0122 08:09:35.729973 4933 generic.go:334] "Generic (PLEG): container finished" podID="e64d6e2b-7db7-426f-a7c3-9ddd3590d507" containerID="eace8775166113b63c9677bc371f9113e11773ddd137994d845dcbd963519ee3" exitCode=0 Jan 22 08:09:35 crc kubenswrapper[4933]: I0122 08:09:35.730043 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-829br" event={"ID":"e64d6e2b-7db7-426f-a7c3-9ddd3590d507","Type":"ContainerDied","Data":"eace8775166113b63c9677bc371f9113e11773ddd137994d845dcbd963519ee3"} Jan 22 08:09:36 crc kubenswrapper[4933]: I0122 08:09:36.740965 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-829br" event={"ID":"e64d6e2b-7db7-426f-a7c3-9ddd3590d507","Type":"ContainerStarted","Data":"30d79ed60c8a06e464a9ed8f37270259e20d6cca48efa4b93fdc9cebb62b3947"} Jan 22 08:09:36 crc kubenswrapper[4933]: I0122 08:09:36.760734 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-829br" podStartSLOduration=3.326568923 podStartE2EDuration="5.760717215s" podCreationTimestamp="2026-01-22 08:09:31 +0000 UTC" firstStartedPulling="2026-01-22 08:09:33.700686815 +0000 UTC m=+8621.537812168" lastFinishedPulling="2026-01-22 08:09:36.134835117 +0000 UTC m=+8623.971960460" observedRunningTime="2026-01-22 08:09:36.759766722 +0000 UTC m=+8624.596892075" watchObservedRunningTime="2026-01-22 08:09:36.760717215 +0000 UTC m=+8624.597842568" Jan 22 08:09:40 crc kubenswrapper[4933]: I0122 08:09:40.943518 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:09:40 crc kubenswrapper[4933]: I0122 08:09:40.944878 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:09:42 crc kubenswrapper[4933]: I0122 08:09:42.328925 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-829br" Jan 22 08:09:42 crc kubenswrapper[4933]: I0122 08:09:42.328987 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-829br" Jan 22 08:09:42 crc kubenswrapper[4933]: I0122 08:09:42.404990 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-829br" Jan 22 08:09:42 crc kubenswrapper[4933]: I0122 08:09:42.872322 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-829br" Jan 22 08:09:42 crc kubenswrapper[4933]: I0122 08:09:42.954320 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-829br"] Jan 22 08:09:44 crc kubenswrapper[4933]: I0122 08:09:44.834787 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-829br" podUID="e64d6e2b-7db7-426f-a7c3-9ddd3590d507" containerName="registry-server" containerID="cri-o://30d79ed60c8a06e464a9ed8f37270259e20d6cca48efa4b93fdc9cebb62b3947" gracePeriod=2 Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.318374 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-829br" Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.325526 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e64d6e2b-7db7-426f-a7c3-9ddd3590d507-utilities\") pod \"e64d6e2b-7db7-426f-a7c3-9ddd3590d507\" (UID: \"e64d6e2b-7db7-426f-a7c3-9ddd3590d507\") " Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.325609 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e64d6e2b-7db7-426f-a7c3-9ddd3590d507-catalog-content\") pod \"e64d6e2b-7db7-426f-a7c3-9ddd3590d507\" (UID: \"e64d6e2b-7db7-426f-a7c3-9ddd3590d507\") " Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.328254 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e64d6e2b-7db7-426f-a7c3-9ddd3590d507-utilities" (OuterVolumeSpecName: "utilities") pod "e64d6e2b-7db7-426f-a7c3-9ddd3590d507" (UID: "e64d6e2b-7db7-426f-a7c3-9ddd3590d507"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.397443 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e64d6e2b-7db7-426f-a7c3-9ddd3590d507-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e64d6e2b-7db7-426f-a7c3-9ddd3590d507" (UID: "e64d6e2b-7db7-426f-a7c3-9ddd3590d507"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.427440 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xzl6j\" (UniqueName: \"kubernetes.io/projected/e64d6e2b-7db7-426f-a7c3-9ddd3590d507-kube-api-access-xzl6j\") pod \"e64d6e2b-7db7-426f-a7c3-9ddd3590d507\" (UID: \"e64d6e2b-7db7-426f-a7c3-9ddd3590d507\") " Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.427942 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e64d6e2b-7db7-426f-a7c3-9ddd3590d507-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.427962 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e64d6e2b-7db7-426f-a7c3-9ddd3590d507-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.433325 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e64d6e2b-7db7-426f-a7c3-9ddd3590d507-kube-api-access-xzl6j" (OuterVolumeSpecName: "kube-api-access-xzl6j") pod "e64d6e2b-7db7-426f-a7c3-9ddd3590d507" (UID: "e64d6e2b-7db7-426f-a7c3-9ddd3590d507"). InnerVolumeSpecName "kube-api-access-xzl6j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.530447 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xzl6j\" (UniqueName: \"kubernetes.io/projected/e64d6e2b-7db7-426f-a7c3-9ddd3590d507-kube-api-access-xzl6j\") on node \"crc\" DevicePath \"\"" Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.848339 4933 generic.go:334] "Generic (PLEG): container finished" podID="e64d6e2b-7db7-426f-a7c3-9ddd3590d507" containerID="30d79ed60c8a06e464a9ed8f37270259e20d6cca48efa4b93fdc9cebb62b3947" exitCode=0 Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.848395 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-829br" event={"ID":"e64d6e2b-7db7-426f-a7c3-9ddd3590d507","Type":"ContainerDied","Data":"30d79ed60c8a06e464a9ed8f37270259e20d6cca48efa4b93fdc9cebb62b3947"} Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.848424 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-829br" event={"ID":"e64d6e2b-7db7-426f-a7c3-9ddd3590d507","Type":"ContainerDied","Data":"1753be06af3ba2caf25b1f55e90f53e5907cc7a2c2cdd9f1058713300ffc97ca"} Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.848444 4933 scope.go:117] "RemoveContainer" containerID="30d79ed60c8a06e464a9ed8f37270259e20d6cca48efa4b93fdc9cebb62b3947" Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.848608 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-829br" Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.895753 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-829br"] Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.902605 4933 scope.go:117] "RemoveContainer" containerID="eace8775166113b63c9677bc371f9113e11773ddd137994d845dcbd963519ee3" Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.905721 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-829br"] Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.924739 4933 scope.go:117] "RemoveContainer" containerID="9ceddf770c1cc891a5461621052b4a9d2268026e6bb06205e722e3ada6de18f4" Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.982729 4933 scope.go:117] "RemoveContainer" containerID="30d79ed60c8a06e464a9ed8f37270259e20d6cca48efa4b93fdc9cebb62b3947" Jan 22 08:09:45 crc kubenswrapper[4933]: E0122 08:09:45.983202 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30d79ed60c8a06e464a9ed8f37270259e20d6cca48efa4b93fdc9cebb62b3947\": container with ID starting with 30d79ed60c8a06e464a9ed8f37270259e20d6cca48efa4b93fdc9cebb62b3947 not found: ID does not exist" containerID="30d79ed60c8a06e464a9ed8f37270259e20d6cca48efa4b93fdc9cebb62b3947" Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.983251 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30d79ed60c8a06e464a9ed8f37270259e20d6cca48efa4b93fdc9cebb62b3947"} err="failed to get container status \"30d79ed60c8a06e464a9ed8f37270259e20d6cca48efa4b93fdc9cebb62b3947\": rpc error: code = NotFound desc = could not find container \"30d79ed60c8a06e464a9ed8f37270259e20d6cca48efa4b93fdc9cebb62b3947\": container with ID starting with 30d79ed60c8a06e464a9ed8f37270259e20d6cca48efa4b93fdc9cebb62b3947 not found: ID does not exist" Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.983282 4933 scope.go:117] "RemoveContainer" containerID="eace8775166113b63c9677bc371f9113e11773ddd137994d845dcbd963519ee3" Jan 22 08:09:45 crc kubenswrapper[4933]: E0122 08:09:45.983623 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eace8775166113b63c9677bc371f9113e11773ddd137994d845dcbd963519ee3\": container with ID starting with eace8775166113b63c9677bc371f9113e11773ddd137994d845dcbd963519ee3 not found: ID does not exist" containerID="eace8775166113b63c9677bc371f9113e11773ddd137994d845dcbd963519ee3" Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.983693 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eace8775166113b63c9677bc371f9113e11773ddd137994d845dcbd963519ee3"} err="failed to get container status \"eace8775166113b63c9677bc371f9113e11773ddd137994d845dcbd963519ee3\": rpc error: code = NotFound desc = could not find container \"eace8775166113b63c9677bc371f9113e11773ddd137994d845dcbd963519ee3\": container with ID starting with eace8775166113b63c9677bc371f9113e11773ddd137994d845dcbd963519ee3 not found: ID does not exist" Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.983729 4933 scope.go:117] "RemoveContainer" containerID="9ceddf770c1cc891a5461621052b4a9d2268026e6bb06205e722e3ada6de18f4" Jan 22 08:09:45 crc kubenswrapper[4933]: E0122 08:09:45.984106 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ceddf770c1cc891a5461621052b4a9d2268026e6bb06205e722e3ada6de18f4\": container with ID starting with 9ceddf770c1cc891a5461621052b4a9d2268026e6bb06205e722e3ada6de18f4 not found: ID does not exist" containerID="9ceddf770c1cc891a5461621052b4a9d2268026e6bb06205e722e3ada6de18f4" Jan 22 08:09:45 crc kubenswrapper[4933]: I0122 08:09:45.984152 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ceddf770c1cc891a5461621052b4a9d2268026e6bb06205e722e3ada6de18f4"} err="failed to get container status \"9ceddf770c1cc891a5461621052b4a9d2268026e6bb06205e722e3ada6de18f4\": rpc error: code = NotFound desc = could not find container \"9ceddf770c1cc891a5461621052b4a9d2268026e6bb06205e722e3ada6de18f4\": container with ID starting with 9ceddf770c1cc891a5461621052b4a9d2268026e6bb06205e722e3ada6de18f4 not found: ID does not exist" Jan 22 08:09:46 crc kubenswrapper[4933]: I0122 08:09:46.523738 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e64d6e2b-7db7-426f-a7c3-9ddd3590d507" path="/var/lib/kubelet/pods/e64d6e2b-7db7-426f-a7c3-9ddd3590d507/volumes" Jan 22 08:10:10 crc kubenswrapper[4933]: I0122 08:10:10.943491 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:10:10 crc kubenswrapper[4933]: I0122 08:10:10.944068 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:10:34 crc kubenswrapper[4933]: I0122 08:10:34.593190 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-t5jxb"] Jan 22 08:10:34 crc kubenswrapper[4933]: E0122 08:10:34.594315 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e64d6e2b-7db7-426f-a7c3-9ddd3590d507" containerName="registry-server" Jan 22 08:10:34 crc kubenswrapper[4933]: I0122 08:10:34.594336 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e64d6e2b-7db7-426f-a7c3-9ddd3590d507" containerName="registry-server" Jan 22 08:10:34 crc kubenswrapper[4933]: E0122 08:10:34.594369 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e64d6e2b-7db7-426f-a7c3-9ddd3590d507" containerName="extract-utilities" Jan 22 08:10:34 crc kubenswrapper[4933]: I0122 08:10:34.594381 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e64d6e2b-7db7-426f-a7c3-9ddd3590d507" containerName="extract-utilities" Jan 22 08:10:34 crc kubenswrapper[4933]: E0122 08:10:34.594415 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e64d6e2b-7db7-426f-a7c3-9ddd3590d507" containerName="extract-content" Jan 22 08:10:34 crc kubenswrapper[4933]: I0122 08:10:34.594428 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="e64d6e2b-7db7-426f-a7c3-9ddd3590d507" containerName="extract-content" Jan 22 08:10:34 crc kubenswrapper[4933]: I0122 08:10:34.594695 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="e64d6e2b-7db7-426f-a7c3-9ddd3590d507" containerName="registry-server" Jan 22 08:10:34 crc kubenswrapper[4933]: I0122 08:10:34.596802 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t5jxb" Jan 22 08:10:34 crc kubenswrapper[4933]: I0122 08:10:34.605876 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-t5jxb"] Jan 22 08:10:34 crc kubenswrapper[4933]: I0122 08:10:34.627454 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjdpv\" (UniqueName: \"kubernetes.io/projected/4995eeb9-6f18-4d7b-99cb-0b95110e3ca6-kube-api-access-jjdpv\") pod \"redhat-marketplace-t5jxb\" (UID: \"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6\") " pod="openshift-marketplace/redhat-marketplace-t5jxb" Jan 22 08:10:34 crc kubenswrapper[4933]: I0122 08:10:34.627592 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4995eeb9-6f18-4d7b-99cb-0b95110e3ca6-utilities\") pod \"redhat-marketplace-t5jxb\" (UID: \"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6\") " pod="openshift-marketplace/redhat-marketplace-t5jxb" Jan 22 08:10:34 crc kubenswrapper[4933]: I0122 08:10:34.627819 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4995eeb9-6f18-4d7b-99cb-0b95110e3ca6-catalog-content\") pod \"redhat-marketplace-t5jxb\" (UID: \"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6\") " pod="openshift-marketplace/redhat-marketplace-t5jxb" Jan 22 08:10:34 crc kubenswrapper[4933]: I0122 08:10:34.729237 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjdpv\" (UniqueName: \"kubernetes.io/projected/4995eeb9-6f18-4d7b-99cb-0b95110e3ca6-kube-api-access-jjdpv\") pod \"redhat-marketplace-t5jxb\" (UID: \"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6\") " pod="openshift-marketplace/redhat-marketplace-t5jxb" Jan 22 08:10:34 crc kubenswrapper[4933]: I0122 08:10:34.729339 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4995eeb9-6f18-4d7b-99cb-0b95110e3ca6-utilities\") pod \"redhat-marketplace-t5jxb\" (UID: \"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6\") " pod="openshift-marketplace/redhat-marketplace-t5jxb" Jan 22 08:10:34 crc kubenswrapper[4933]: I0122 08:10:34.729416 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4995eeb9-6f18-4d7b-99cb-0b95110e3ca6-catalog-content\") pod \"redhat-marketplace-t5jxb\" (UID: \"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6\") " pod="openshift-marketplace/redhat-marketplace-t5jxb" Jan 22 08:10:34 crc kubenswrapper[4933]: I0122 08:10:34.729892 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4995eeb9-6f18-4d7b-99cb-0b95110e3ca6-utilities\") pod \"redhat-marketplace-t5jxb\" (UID: \"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6\") " pod="openshift-marketplace/redhat-marketplace-t5jxb" Jan 22 08:10:34 crc kubenswrapper[4933]: I0122 08:10:34.730011 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4995eeb9-6f18-4d7b-99cb-0b95110e3ca6-catalog-content\") pod \"redhat-marketplace-t5jxb\" (UID: \"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6\") " pod="openshift-marketplace/redhat-marketplace-t5jxb" Jan 22 08:10:34 crc kubenswrapper[4933]: I0122 08:10:34.747842 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjdpv\" (UniqueName: \"kubernetes.io/projected/4995eeb9-6f18-4d7b-99cb-0b95110e3ca6-kube-api-access-jjdpv\") pod \"redhat-marketplace-t5jxb\" (UID: \"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6\") " pod="openshift-marketplace/redhat-marketplace-t5jxb" Jan 22 08:10:34 crc kubenswrapper[4933]: I0122 08:10:34.926021 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t5jxb" Jan 22 08:10:35 crc kubenswrapper[4933]: I0122 08:10:35.425986 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-t5jxb"] Jan 22 08:10:35 crc kubenswrapper[4933]: I0122 08:10:35.596631 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t5jxb" event={"ID":"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6","Type":"ContainerStarted","Data":"2558bcfbe7a5e26875b0545c38e6d2a8fb2761bb23a87005ee90e895f8148cfa"} Jan 22 08:10:36 crc kubenswrapper[4933]: I0122 08:10:36.606683 4933 generic.go:334] "Generic (PLEG): container finished" podID="4995eeb9-6f18-4d7b-99cb-0b95110e3ca6" containerID="63ae641425c7a373b1021694f0868d5448727304b757de28fc110d5b3d260750" exitCode=0 Jan 22 08:10:36 crc kubenswrapper[4933]: I0122 08:10:36.606728 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t5jxb" event={"ID":"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6","Type":"ContainerDied","Data":"63ae641425c7a373b1021694f0868d5448727304b757de28fc110d5b3d260750"} Jan 22 08:10:37 crc kubenswrapper[4933]: I0122 08:10:37.621875 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t5jxb" event={"ID":"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6","Type":"ContainerStarted","Data":"84cffe640cfad9ecc2225347c7499da53d005fb914351d5aa09b7041a6dd11fd"} Jan 22 08:10:38 crc kubenswrapper[4933]: I0122 08:10:38.636757 4933 generic.go:334] "Generic (PLEG): container finished" podID="4995eeb9-6f18-4d7b-99cb-0b95110e3ca6" containerID="84cffe640cfad9ecc2225347c7499da53d005fb914351d5aa09b7041a6dd11fd" exitCode=0 Jan 22 08:10:38 crc kubenswrapper[4933]: I0122 08:10:38.636868 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t5jxb" event={"ID":"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6","Type":"ContainerDied","Data":"84cffe640cfad9ecc2225347c7499da53d005fb914351d5aa09b7041a6dd11fd"} Jan 22 08:10:39 crc kubenswrapper[4933]: I0122 08:10:39.648313 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t5jxb" event={"ID":"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6","Type":"ContainerStarted","Data":"2a64c76ac22674446de9cafd9ee813e685da8c2351466ee8ad404a80ebe4c147"} Jan 22 08:10:39 crc kubenswrapper[4933]: I0122 08:10:39.674123 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-t5jxb" podStartSLOduration=3.27183372 podStartE2EDuration="5.674104267s" podCreationTimestamp="2026-01-22 08:10:34 +0000 UTC" firstStartedPulling="2026-01-22 08:10:36.611160436 +0000 UTC m=+8684.448285799" lastFinishedPulling="2026-01-22 08:10:39.013431003 +0000 UTC m=+8686.850556346" observedRunningTime="2026-01-22 08:10:39.664534894 +0000 UTC m=+8687.501660247" watchObservedRunningTime="2026-01-22 08:10:39.674104267 +0000 UTC m=+8687.511229620" Jan 22 08:10:40 crc kubenswrapper[4933]: I0122 08:10:40.944134 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:10:40 crc kubenswrapper[4933]: I0122 08:10:40.945031 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:10:40 crc kubenswrapper[4933]: I0122 08:10:40.945324 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 08:10:40 crc kubenswrapper[4933]: I0122 08:10:40.946335 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0eb1b1ad00eba263f6765d2eb1d795c255fb8b5e6c5cd831ddc2b8fc4281ee98"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 08:10:40 crc kubenswrapper[4933]: I0122 08:10:40.946473 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://0eb1b1ad00eba263f6765d2eb1d795c255fb8b5e6c5cd831ddc2b8fc4281ee98" gracePeriod=600 Jan 22 08:10:41 crc kubenswrapper[4933]: I0122 08:10:41.671577 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="0eb1b1ad00eba263f6765d2eb1d795c255fb8b5e6c5cd831ddc2b8fc4281ee98" exitCode=0 Jan 22 08:10:41 crc kubenswrapper[4933]: I0122 08:10:41.671679 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"0eb1b1ad00eba263f6765d2eb1d795c255fb8b5e6c5cd831ddc2b8fc4281ee98"} Jan 22 08:10:41 crc kubenswrapper[4933]: I0122 08:10:41.672169 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4"} Jan 22 08:10:41 crc kubenswrapper[4933]: I0122 08:10:41.672193 4933 scope.go:117] "RemoveContainer" containerID="cfab95e41af998dabe63c89a764948bcf7a828c9de3e04ff4298ff3b6fe7d56b" Jan 22 08:10:44 crc kubenswrapper[4933]: I0122 08:10:44.926404 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-t5jxb" Jan 22 08:10:44 crc kubenswrapper[4933]: I0122 08:10:44.928398 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-t5jxb" Jan 22 08:10:44 crc kubenswrapper[4933]: I0122 08:10:44.983122 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-t5jxb" Jan 22 08:10:45 crc kubenswrapper[4933]: I0122 08:10:45.786245 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-t5jxb" Jan 22 08:10:45 crc kubenswrapper[4933]: I0122 08:10:45.850761 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-t5jxb"] Jan 22 08:10:47 crc kubenswrapper[4933]: I0122 08:10:47.752440 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-t5jxb" podUID="4995eeb9-6f18-4d7b-99cb-0b95110e3ca6" containerName="registry-server" containerID="cri-o://2a64c76ac22674446de9cafd9ee813e685da8c2351466ee8ad404a80ebe4c147" gracePeriod=2 Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.213966 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t5jxb" Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.378687 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4995eeb9-6f18-4d7b-99cb-0b95110e3ca6-utilities\") pod \"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6\" (UID: \"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6\") " Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.378873 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jjdpv\" (UniqueName: \"kubernetes.io/projected/4995eeb9-6f18-4d7b-99cb-0b95110e3ca6-kube-api-access-jjdpv\") pod \"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6\" (UID: \"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6\") " Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.378982 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4995eeb9-6f18-4d7b-99cb-0b95110e3ca6-catalog-content\") pod \"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6\" (UID: \"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6\") " Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.379671 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4995eeb9-6f18-4d7b-99cb-0b95110e3ca6-utilities" (OuterVolumeSpecName: "utilities") pod "4995eeb9-6f18-4d7b-99cb-0b95110e3ca6" (UID: "4995eeb9-6f18-4d7b-99cb-0b95110e3ca6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.380094 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4995eeb9-6f18-4d7b-99cb-0b95110e3ca6-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.384823 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4995eeb9-6f18-4d7b-99cb-0b95110e3ca6-kube-api-access-jjdpv" (OuterVolumeSpecName: "kube-api-access-jjdpv") pod "4995eeb9-6f18-4d7b-99cb-0b95110e3ca6" (UID: "4995eeb9-6f18-4d7b-99cb-0b95110e3ca6"). InnerVolumeSpecName "kube-api-access-jjdpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.402200 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4995eeb9-6f18-4d7b-99cb-0b95110e3ca6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4995eeb9-6f18-4d7b-99cb-0b95110e3ca6" (UID: "4995eeb9-6f18-4d7b-99cb-0b95110e3ca6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.482251 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jjdpv\" (UniqueName: \"kubernetes.io/projected/4995eeb9-6f18-4d7b-99cb-0b95110e3ca6-kube-api-access-jjdpv\") on node \"crc\" DevicePath \"\"" Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.482299 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4995eeb9-6f18-4d7b-99cb-0b95110e3ca6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.765006 4933 generic.go:334] "Generic (PLEG): container finished" podID="4995eeb9-6f18-4d7b-99cb-0b95110e3ca6" containerID="2a64c76ac22674446de9cafd9ee813e685da8c2351466ee8ad404a80ebe4c147" exitCode=0 Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.765059 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t5jxb" event={"ID":"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6","Type":"ContainerDied","Data":"2a64c76ac22674446de9cafd9ee813e685da8c2351466ee8ad404a80ebe4c147"} Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.765137 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t5jxb" event={"ID":"4995eeb9-6f18-4d7b-99cb-0b95110e3ca6","Type":"ContainerDied","Data":"2558bcfbe7a5e26875b0545c38e6d2a8fb2761bb23a87005ee90e895f8148cfa"} Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.765151 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t5jxb" Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.765158 4933 scope.go:117] "RemoveContainer" containerID="2a64c76ac22674446de9cafd9ee813e685da8c2351466ee8ad404a80ebe4c147" Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.792241 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-t5jxb"] Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.795653 4933 scope.go:117] "RemoveContainer" containerID="84cffe640cfad9ecc2225347c7499da53d005fb914351d5aa09b7041a6dd11fd" Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.805601 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-t5jxb"] Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.829735 4933 scope.go:117] "RemoveContainer" containerID="63ae641425c7a373b1021694f0868d5448727304b757de28fc110d5b3d260750" Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.900389 4933 scope.go:117] "RemoveContainer" containerID="2a64c76ac22674446de9cafd9ee813e685da8c2351466ee8ad404a80ebe4c147" Jan 22 08:10:48 crc kubenswrapper[4933]: E0122 08:10:48.900862 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a64c76ac22674446de9cafd9ee813e685da8c2351466ee8ad404a80ebe4c147\": container with ID starting with 2a64c76ac22674446de9cafd9ee813e685da8c2351466ee8ad404a80ebe4c147 not found: ID does not exist" containerID="2a64c76ac22674446de9cafd9ee813e685da8c2351466ee8ad404a80ebe4c147" Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.900961 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a64c76ac22674446de9cafd9ee813e685da8c2351466ee8ad404a80ebe4c147"} err="failed to get container status \"2a64c76ac22674446de9cafd9ee813e685da8c2351466ee8ad404a80ebe4c147\": rpc error: code = NotFound desc = could not find container \"2a64c76ac22674446de9cafd9ee813e685da8c2351466ee8ad404a80ebe4c147\": container with ID starting with 2a64c76ac22674446de9cafd9ee813e685da8c2351466ee8ad404a80ebe4c147 not found: ID does not exist" Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.901036 4933 scope.go:117] "RemoveContainer" containerID="84cffe640cfad9ecc2225347c7499da53d005fb914351d5aa09b7041a6dd11fd" Jan 22 08:10:48 crc kubenswrapper[4933]: E0122 08:10:48.901689 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84cffe640cfad9ecc2225347c7499da53d005fb914351d5aa09b7041a6dd11fd\": container with ID starting with 84cffe640cfad9ecc2225347c7499da53d005fb914351d5aa09b7041a6dd11fd not found: ID does not exist" containerID="84cffe640cfad9ecc2225347c7499da53d005fb914351d5aa09b7041a6dd11fd" Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.901745 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84cffe640cfad9ecc2225347c7499da53d005fb914351d5aa09b7041a6dd11fd"} err="failed to get container status \"84cffe640cfad9ecc2225347c7499da53d005fb914351d5aa09b7041a6dd11fd\": rpc error: code = NotFound desc = could not find container \"84cffe640cfad9ecc2225347c7499da53d005fb914351d5aa09b7041a6dd11fd\": container with ID starting with 84cffe640cfad9ecc2225347c7499da53d005fb914351d5aa09b7041a6dd11fd not found: ID does not exist" Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.901779 4933 scope.go:117] "RemoveContainer" containerID="63ae641425c7a373b1021694f0868d5448727304b757de28fc110d5b3d260750" Jan 22 08:10:48 crc kubenswrapper[4933]: E0122 08:10:48.902170 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63ae641425c7a373b1021694f0868d5448727304b757de28fc110d5b3d260750\": container with ID starting with 63ae641425c7a373b1021694f0868d5448727304b757de28fc110d5b3d260750 not found: ID does not exist" containerID="63ae641425c7a373b1021694f0868d5448727304b757de28fc110d5b3d260750" Jan 22 08:10:48 crc kubenswrapper[4933]: I0122 08:10:48.902202 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63ae641425c7a373b1021694f0868d5448727304b757de28fc110d5b3d260750"} err="failed to get container status \"63ae641425c7a373b1021694f0868d5448727304b757de28fc110d5b3d260750\": rpc error: code = NotFound desc = could not find container \"63ae641425c7a373b1021694f0868d5448727304b757de28fc110d5b3d260750\": container with ID starting with 63ae641425c7a373b1021694f0868d5448727304b757de28fc110d5b3d260750 not found: ID does not exist" Jan 22 08:10:50 crc kubenswrapper[4933]: I0122 08:10:50.506032 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4995eeb9-6f18-4d7b-99cb-0b95110e3ca6" path="/var/lib/kubelet/pods/4995eeb9-6f18-4d7b-99cb-0b95110e3ca6/volumes" Jan 22 08:11:16 crc kubenswrapper[4933]: I0122 08:11:16.403931 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ktjlv"] Jan 22 08:11:16 crc kubenswrapper[4933]: E0122 08:11:16.404830 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4995eeb9-6f18-4d7b-99cb-0b95110e3ca6" containerName="registry-server" Jan 22 08:11:16 crc kubenswrapper[4933]: I0122 08:11:16.404845 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4995eeb9-6f18-4d7b-99cb-0b95110e3ca6" containerName="registry-server" Jan 22 08:11:16 crc kubenswrapper[4933]: E0122 08:11:16.410536 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4995eeb9-6f18-4d7b-99cb-0b95110e3ca6" containerName="extract-content" Jan 22 08:11:16 crc kubenswrapper[4933]: I0122 08:11:16.411604 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4995eeb9-6f18-4d7b-99cb-0b95110e3ca6" containerName="extract-content" Jan 22 08:11:16 crc kubenswrapper[4933]: E0122 08:11:16.411667 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4995eeb9-6f18-4d7b-99cb-0b95110e3ca6" containerName="extract-utilities" Jan 22 08:11:16 crc kubenswrapper[4933]: I0122 08:11:16.411675 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="4995eeb9-6f18-4d7b-99cb-0b95110e3ca6" containerName="extract-utilities" Jan 22 08:11:16 crc kubenswrapper[4933]: I0122 08:11:16.412029 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="4995eeb9-6f18-4d7b-99cb-0b95110e3ca6" containerName="registry-server" Jan 22 08:11:16 crc kubenswrapper[4933]: I0122 08:11:16.413503 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ktjlv" Jan 22 08:11:16 crc kubenswrapper[4933]: I0122 08:11:16.439570 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ktjlv"] Jan 22 08:11:16 crc kubenswrapper[4933]: I0122 08:11:16.542817 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3121446d-3c4d-4f20-b0b0-807328db53b7-catalog-content\") pod \"redhat-operators-ktjlv\" (UID: \"3121446d-3c4d-4f20-b0b0-807328db53b7\") " pod="openshift-marketplace/redhat-operators-ktjlv" Jan 22 08:11:16 crc kubenswrapper[4933]: I0122 08:11:16.543213 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwtdv\" (UniqueName: \"kubernetes.io/projected/3121446d-3c4d-4f20-b0b0-807328db53b7-kube-api-access-nwtdv\") pod \"redhat-operators-ktjlv\" (UID: \"3121446d-3c4d-4f20-b0b0-807328db53b7\") " pod="openshift-marketplace/redhat-operators-ktjlv" Jan 22 08:11:16 crc kubenswrapper[4933]: I0122 08:11:16.543353 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3121446d-3c4d-4f20-b0b0-807328db53b7-utilities\") pod \"redhat-operators-ktjlv\" (UID: \"3121446d-3c4d-4f20-b0b0-807328db53b7\") " pod="openshift-marketplace/redhat-operators-ktjlv" Jan 22 08:11:16 crc kubenswrapper[4933]: I0122 08:11:16.645635 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3121446d-3c4d-4f20-b0b0-807328db53b7-catalog-content\") pod \"redhat-operators-ktjlv\" (UID: \"3121446d-3c4d-4f20-b0b0-807328db53b7\") " pod="openshift-marketplace/redhat-operators-ktjlv" Jan 22 08:11:16 crc kubenswrapper[4933]: I0122 08:11:16.645790 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwtdv\" (UniqueName: \"kubernetes.io/projected/3121446d-3c4d-4f20-b0b0-807328db53b7-kube-api-access-nwtdv\") pod \"redhat-operators-ktjlv\" (UID: \"3121446d-3c4d-4f20-b0b0-807328db53b7\") " pod="openshift-marketplace/redhat-operators-ktjlv" Jan 22 08:11:16 crc kubenswrapper[4933]: I0122 08:11:16.645854 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3121446d-3c4d-4f20-b0b0-807328db53b7-utilities\") pod \"redhat-operators-ktjlv\" (UID: \"3121446d-3c4d-4f20-b0b0-807328db53b7\") " pod="openshift-marketplace/redhat-operators-ktjlv" Jan 22 08:11:16 crc kubenswrapper[4933]: I0122 08:11:16.646548 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3121446d-3c4d-4f20-b0b0-807328db53b7-utilities\") pod \"redhat-operators-ktjlv\" (UID: \"3121446d-3c4d-4f20-b0b0-807328db53b7\") " pod="openshift-marketplace/redhat-operators-ktjlv" Jan 22 08:11:16 crc kubenswrapper[4933]: I0122 08:11:16.646763 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3121446d-3c4d-4f20-b0b0-807328db53b7-catalog-content\") pod \"redhat-operators-ktjlv\" (UID: \"3121446d-3c4d-4f20-b0b0-807328db53b7\") " pod="openshift-marketplace/redhat-operators-ktjlv" Jan 22 08:11:16 crc kubenswrapper[4933]: I0122 08:11:16.666950 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwtdv\" (UniqueName: \"kubernetes.io/projected/3121446d-3c4d-4f20-b0b0-807328db53b7-kube-api-access-nwtdv\") pod \"redhat-operators-ktjlv\" (UID: \"3121446d-3c4d-4f20-b0b0-807328db53b7\") " pod="openshift-marketplace/redhat-operators-ktjlv" Jan 22 08:11:16 crc kubenswrapper[4933]: I0122 08:11:16.756326 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ktjlv" Jan 22 08:11:17 crc kubenswrapper[4933]: I0122 08:11:17.270837 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ktjlv"] Jan 22 08:11:18 crc kubenswrapper[4933]: I0122 08:11:18.088705 4933 generic.go:334] "Generic (PLEG): container finished" podID="3121446d-3c4d-4f20-b0b0-807328db53b7" containerID="e79d28445d9a238db115e8e9c25d6b2a44e2259aa1d69958135b3073175da232" exitCode=0 Jan 22 08:11:18 crc kubenswrapper[4933]: I0122 08:11:18.088799 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ktjlv" event={"ID":"3121446d-3c4d-4f20-b0b0-807328db53b7","Type":"ContainerDied","Data":"e79d28445d9a238db115e8e9c25d6b2a44e2259aa1d69958135b3073175da232"} Jan 22 08:11:18 crc kubenswrapper[4933]: I0122 08:11:18.088997 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ktjlv" event={"ID":"3121446d-3c4d-4f20-b0b0-807328db53b7","Type":"ContainerStarted","Data":"c1a3ac93cdaa66dff7c1df51affaac0d6f3a149418494b45349fd7867a9cc9de"} Jan 22 08:11:19 crc kubenswrapper[4933]: I0122 08:11:19.100698 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ktjlv" event={"ID":"3121446d-3c4d-4f20-b0b0-807328db53b7","Type":"ContainerStarted","Data":"fb13a031355175789116d66f29b5c0c0816faea7ad45c9f62916734ca6580fb7"} Jan 22 08:11:21 crc kubenswrapper[4933]: I0122 08:11:21.129719 4933 generic.go:334] "Generic (PLEG): container finished" podID="3121446d-3c4d-4f20-b0b0-807328db53b7" containerID="fb13a031355175789116d66f29b5c0c0816faea7ad45c9f62916734ca6580fb7" exitCode=0 Jan 22 08:11:21 crc kubenswrapper[4933]: I0122 08:11:21.129836 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ktjlv" event={"ID":"3121446d-3c4d-4f20-b0b0-807328db53b7","Type":"ContainerDied","Data":"fb13a031355175789116d66f29b5c0c0816faea7ad45c9f62916734ca6580fb7"} Jan 22 08:11:22 crc kubenswrapper[4933]: I0122 08:11:22.156426 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ktjlv" event={"ID":"3121446d-3c4d-4f20-b0b0-807328db53b7","Type":"ContainerStarted","Data":"e094959eaec6f52b2015e17d0720bb16db362f360655cb2a840765b4bb8267cd"} Jan 22 08:11:22 crc kubenswrapper[4933]: I0122 08:11:22.178302 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ktjlv" podStartSLOduration=2.660078678 podStartE2EDuration="6.178282446s" podCreationTimestamp="2026-01-22 08:11:16 +0000 UTC" firstStartedPulling="2026-01-22 08:11:18.091482725 +0000 UTC m=+8725.928608078" lastFinishedPulling="2026-01-22 08:11:21.609686493 +0000 UTC m=+8729.446811846" observedRunningTime="2026-01-22 08:11:22.176970164 +0000 UTC m=+8730.014095517" watchObservedRunningTime="2026-01-22 08:11:22.178282446 +0000 UTC m=+8730.015407819" Jan 22 08:11:26 crc kubenswrapper[4933]: I0122 08:11:26.757271 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ktjlv" Jan 22 08:11:26 crc kubenswrapper[4933]: I0122 08:11:26.757781 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ktjlv" Jan 22 08:11:27 crc kubenswrapper[4933]: I0122 08:11:27.814624 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ktjlv" podUID="3121446d-3c4d-4f20-b0b0-807328db53b7" containerName="registry-server" probeResult="failure" output=< Jan 22 08:11:27 crc kubenswrapper[4933]: timeout: failed to connect service ":50051" within 1s Jan 22 08:11:27 crc kubenswrapper[4933]: > Jan 22 08:11:36 crc kubenswrapper[4933]: I0122 08:11:36.963495 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ktjlv" Jan 22 08:11:37 crc kubenswrapper[4933]: I0122 08:11:37.030608 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ktjlv" Jan 22 08:11:37 crc kubenswrapper[4933]: I0122 08:11:37.236804 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ktjlv"] Jan 22 08:11:38 crc kubenswrapper[4933]: I0122 08:11:38.314441 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ktjlv" podUID="3121446d-3c4d-4f20-b0b0-807328db53b7" containerName="registry-server" containerID="cri-o://e094959eaec6f52b2015e17d0720bb16db362f360655cb2a840765b4bb8267cd" gracePeriod=2 Jan 22 08:11:38 crc kubenswrapper[4933]: I0122 08:11:38.808562 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ktjlv" Jan 22 08:11:38 crc kubenswrapper[4933]: I0122 08:11:38.879781 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3121446d-3c4d-4f20-b0b0-807328db53b7-catalog-content\") pod \"3121446d-3c4d-4f20-b0b0-807328db53b7\" (UID: \"3121446d-3c4d-4f20-b0b0-807328db53b7\") " Jan 22 08:11:38 crc kubenswrapper[4933]: I0122 08:11:38.879829 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nwtdv\" (UniqueName: \"kubernetes.io/projected/3121446d-3c4d-4f20-b0b0-807328db53b7-kube-api-access-nwtdv\") pod \"3121446d-3c4d-4f20-b0b0-807328db53b7\" (UID: \"3121446d-3c4d-4f20-b0b0-807328db53b7\") " Jan 22 08:11:38 crc kubenswrapper[4933]: I0122 08:11:38.879877 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3121446d-3c4d-4f20-b0b0-807328db53b7-utilities\") pod \"3121446d-3c4d-4f20-b0b0-807328db53b7\" (UID: \"3121446d-3c4d-4f20-b0b0-807328db53b7\") " Jan 22 08:11:38 crc kubenswrapper[4933]: I0122 08:11:38.880760 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3121446d-3c4d-4f20-b0b0-807328db53b7-utilities" (OuterVolumeSpecName: "utilities") pod "3121446d-3c4d-4f20-b0b0-807328db53b7" (UID: "3121446d-3c4d-4f20-b0b0-807328db53b7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:11:38 crc kubenswrapper[4933]: I0122 08:11:38.885143 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3121446d-3c4d-4f20-b0b0-807328db53b7-kube-api-access-nwtdv" (OuterVolumeSpecName: "kube-api-access-nwtdv") pod "3121446d-3c4d-4f20-b0b0-807328db53b7" (UID: "3121446d-3c4d-4f20-b0b0-807328db53b7"). InnerVolumeSpecName "kube-api-access-nwtdv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:11:38 crc kubenswrapper[4933]: I0122 08:11:38.983035 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nwtdv\" (UniqueName: \"kubernetes.io/projected/3121446d-3c4d-4f20-b0b0-807328db53b7-kube-api-access-nwtdv\") on node \"crc\" DevicePath \"\"" Jan 22 08:11:38 crc kubenswrapper[4933]: I0122 08:11:38.983090 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3121446d-3c4d-4f20-b0b0-807328db53b7-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 08:11:38 crc kubenswrapper[4933]: I0122 08:11:38.999372 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3121446d-3c4d-4f20-b0b0-807328db53b7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3121446d-3c4d-4f20-b0b0-807328db53b7" (UID: "3121446d-3c4d-4f20-b0b0-807328db53b7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:11:39 crc kubenswrapper[4933]: I0122 08:11:39.085208 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3121446d-3c4d-4f20-b0b0-807328db53b7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 08:11:39 crc kubenswrapper[4933]: I0122 08:11:39.324888 4933 generic.go:334] "Generic (PLEG): container finished" podID="3121446d-3c4d-4f20-b0b0-807328db53b7" containerID="e094959eaec6f52b2015e17d0720bb16db362f360655cb2a840765b4bb8267cd" exitCode=0 Jan 22 08:11:39 crc kubenswrapper[4933]: I0122 08:11:39.324938 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ktjlv" event={"ID":"3121446d-3c4d-4f20-b0b0-807328db53b7","Type":"ContainerDied","Data":"e094959eaec6f52b2015e17d0720bb16db362f360655cb2a840765b4bb8267cd"} Jan 22 08:11:39 crc kubenswrapper[4933]: I0122 08:11:39.324967 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ktjlv" event={"ID":"3121446d-3c4d-4f20-b0b0-807328db53b7","Type":"ContainerDied","Data":"c1a3ac93cdaa66dff7c1df51affaac0d6f3a149418494b45349fd7867a9cc9de"} Jan 22 08:11:39 crc kubenswrapper[4933]: I0122 08:11:39.324983 4933 scope.go:117] "RemoveContainer" containerID="e094959eaec6f52b2015e17d0720bb16db362f360655cb2a840765b4bb8267cd" Jan 22 08:11:39 crc kubenswrapper[4933]: I0122 08:11:39.325125 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ktjlv" Jan 22 08:11:39 crc kubenswrapper[4933]: I0122 08:11:39.362761 4933 scope.go:117] "RemoveContainer" containerID="fb13a031355175789116d66f29b5c0c0816faea7ad45c9f62916734ca6580fb7" Jan 22 08:11:39 crc kubenswrapper[4933]: I0122 08:11:39.370936 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ktjlv"] Jan 22 08:11:39 crc kubenswrapper[4933]: I0122 08:11:39.381952 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ktjlv"] Jan 22 08:11:39 crc kubenswrapper[4933]: I0122 08:11:39.385226 4933 scope.go:117] "RemoveContainer" containerID="e79d28445d9a238db115e8e9c25d6b2a44e2259aa1d69958135b3073175da232" Jan 22 08:11:39 crc kubenswrapper[4933]: I0122 08:11:39.429738 4933 scope.go:117] "RemoveContainer" containerID="e094959eaec6f52b2015e17d0720bb16db362f360655cb2a840765b4bb8267cd" Jan 22 08:11:39 crc kubenswrapper[4933]: E0122 08:11:39.430132 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e094959eaec6f52b2015e17d0720bb16db362f360655cb2a840765b4bb8267cd\": container with ID starting with e094959eaec6f52b2015e17d0720bb16db362f360655cb2a840765b4bb8267cd not found: ID does not exist" containerID="e094959eaec6f52b2015e17d0720bb16db362f360655cb2a840765b4bb8267cd" Jan 22 08:11:39 crc kubenswrapper[4933]: I0122 08:11:39.430176 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e094959eaec6f52b2015e17d0720bb16db362f360655cb2a840765b4bb8267cd"} err="failed to get container status \"e094959eaec6f52b2015e17d0720bb16db362f360655cb2a840765b4bb8267cd\": rpc error: code = NotFound desc = could not find container \"e094959eaec6f52b2015e17d0720bb16db362f360655cb2a840765b4bb8267cd\": container with ID starting with e094959eaec6f52b2015e17d0720bb16db362f360655cb2a840765b4bb8267cd not found: ID does not exist" Jan 22 08:11:39 crc kubenswrapper[4933]: I0122 08:11:39.430203 4933 scope.go:117] "RemoveContainer" containerID="fb13a031355175789116d66f29b5c0c0816faea7ad45c9f62916734ca6580fb7" Jan 22 08:11:39 crc kubenswrapper[4933]: E0122 08:11:39.430438 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb13a031355175789116d66f29b5c0c0816faea7ad45c9f62916734ca6580fb7\": container with ID starting with fb13a031355175789116d66f29b5c0c0816faea7ad45c9f62916734ca6580fb7 not found: ID does not exist" containerID="fb13a031355175789116d66f29b5c0c0816faea7ad45c9f62916734ca6580fb7" Jan 22 08:11:39 crc kubenswrapper[4933]: I0122 08:11:39.430468 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb13a031355175789116d66f29b5c0c0816faea7ad45c9f62916734ca6580fb7"} err="failed to get container status \"fb13a031355175789116d66f29b5c0c0816faea7ad45c9f62916734ca6580fb7\": rpc error: code = NotFound desc = could not find container \"fb13a031355175789116d66f29b5c0c0816faea7ad45c9f62916734ca6580fb7\": container with ID starting with fb13a031355175789116d66f29b5c0c0816faea7ad45c9f62916734ca6580fb7 not found: ID does not exist" Jan 22 08:11:39 crc kubenswrapper[4933]: I0122 08:11:39.430514 4933 scope.go:117] "RemoveContainer" containerID="e79d28445d9a238db115e8e9c25d6b2a44e2259aa1d69958135b3073175da232" Jan 22 08:11:39 crc kubenswrapper[4933]: E0122 08:11:39.430728 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e79d28445d9a238db115e8e9c25d6b2a44e2259aa1d69958135b3073175da232\": container with ID starting with e79d28445d9a238db115e8e9c25d6b2a44e2259aa1d69958135b3073175da232 not found: ID does not exist" containerID="e79d28445d9a238db115e8e9c25d6b2a44e2259aa1d69958135b3073175da232" Jan 22 08:11:39 crc kubenswrapper[4933]: I0122 08:11:39.430757 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e79d28445d9a238db115e8e9c25d6b2a44e2259aa1d69958135b3073175da232"} err="failed to get container status \"e79d28445d9a238db115e8e9c25d6b2a44e2259aa1d69958135b3073175da232\": rpc error: code = NotFound desc = could not find container \"e79d28445d9a238db115e8e9c25d6b2a44e2259aa1d69958135b3073175da232\": container with ID starting with e79d28445d9a238db115e8e9c25d6b2a44e2259aa1d69958135b3073175da232 not found: ID does not exist" Jan 22 08:11:40 crc kubenswrapper[4933]: I0122 08:11:40.505585 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3121446d-3c4d-4f20-b0b0-807328db53b7" path="/var/lib/kubelet/pods/3121446d-3c4d-4f20-b0b0-807328db53b7/volumes" Jan 22 08:13:10 crc kubenswrapper[4933]: I0122 08:13:10.946635 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:13:10 crc kubenswrapper[4933]: I0122 08:13:10.948434 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:13:25 crc kubenswrapper[4933]: I0122 08:13:25.388055 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rjwjw"] Jan 22 08:13:25 crc kubenswrapper[4933]: E0122 08:13:25.395885 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3121446d-3c4d-4f20-b0b0-807328db53b7" containerName="extract-utilities" Jan 22 08:13:25 crc kubenswrapper[4933]: I0122 08:13:25.396246 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3121446d-3c4d-4f20-b0b0-807328db53b7" containerName="extract-utilities" Jan 22 08:13:25 crc kubenswrapper[4933]: E0122 08:13:25.396389 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3121446d-3c4d-4f20-b0b0-807328db53b7" containerName="registry-server" Jan 22 08:13:25 crc kubenswrapper[4933]: I0122 08:13:25.396489 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3121446d-3c4d-4f20-b0b0-807328db53b7" containerName="registry-server" Jan 22 08:13:25 crc kubenswrapper[4933]: E0122 08:13:25.396668 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3121446d-3c4d-4f20-b0b0-807328db53b7" containerName="extract-content" Jan 22 08:13:25 crc kubenswrapper[4933]: I0122 08:13:25.396774 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="3121446d-3c4d-4f20-b0b0-807328db53b7" containerName="extract-content" Jan 22 08:13:25 crc kubenswrapper[4933]: I0122 08:13:25.397233 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="3121446d-3c4d-4f20-b0b0-807328db53b7" containerName="registry-server" Jan 22 08:13:25 crc kubenswrapper[4933]: I0122 08:13:25.402003 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rjwjw" Jan 22 08:13:25 crc kubenswrapper[4933]: I0122 08:13:25.406695 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rjwjw"] Jan 22 08:13:25 crc kubenswrapper[4933]: I0122 08:13:25.542343 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92562b1d-d099-4404-8184-5bf5bf0e16e4-utilities\") pod \"community-operators-rjwjw\" (UID: \"92562b1d-d099-4404-8184-5bf5bf0e16e4\") " pod="openshift-marketplace/community-operators-rjwjw" Jan 22 08:13:25 crc kubenswrapper[4933]: I0122 08:13:25.542752 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92562b1d-d099-4404-8184-5bf5bf0e16e4-catalog-content\") pod \"community-operators-rjwjw\" (UID: \"92562b1d-d099-4404-8184-5bf5bf0e16e4\") " pod="openshift-marketplace/community-operators-rjwjw" Jan 22 08:13:25 crc kubenswrapper[4933]: I0122 08:13:25.543214 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mthz5\" (UniqueName: \"kubernetes.io/projected/92562b1d-d099-4404-8184-5bf5bf0e16e4-kube-api-access-mthz5\") pod \"community-operators-rjwjw\" (UID: \"92562b1d-d099-4404-8184-5bf5bf0e16e4\") " pod="openshift-marketplace/community-operators-rjwjw" Jan 22 08:13:25 crc kubenswrapper[4933]: I0122 08:13:25.644859 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mthz5\" (UniqueName: \"kubernetes.io/projected/92562b1d-d099-4404-8184-5bf5bf0e16e4-kube-api-access-mthz5\") pod \"community-operators-rjwjw\" (UID: \"92562b1d-d099-4404-8184-5bf5bf0e16e4\") " pod="openshift-marketplace/community-operators-rjwjw" Jan 22 08:13:25 crc kubenswrapper[4933]: I0122 08:13:25.644955 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92562b1d-d099-4404-8184-5bf5bf0e16e4-utilities\") pod \"community-operators-rjwjw\" (UID: \"92562b1d-d099-4404-8184-5bf5bf0e16e4\") " pod="openshift-marketplace/community-operators-rjwjw" Jan 22 08:13:25 crc kubenswrapper[4933]: I0122 08:13:25.645117 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92562b1d-d099-4404-8184-5bf5bf0e16e4-catalog-content\") pod \"community-operators-rjwjw\" (UID: \"92562b1d-d099-4404-8184-5bf5bf0e16e4\") " pod="openshift-marketplace/community-operators-rjwjw" Jan 22 08:13:25 crc kubenswrapper[4933]: I0122 08:13:25.645707 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92562b1d-d099-4404-8184-5bf5bf0e16e4-utilities\") pod \"community-operators-rjwjw\" (UID: \"92562b1d-d099-4404-8184-5bf5bf0e16e4\") " pod="openshift-marketplace/community-operators-rjwjw" Jan 22 08:13:25 crc kubenswrapper[4933]: I0122 08:13:25.645806 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92562b1d-d099-4404-8184-5bf5bf0e16e4-catalog-content\") pod \"community-operators-rjwjw\" (UID: \"92562b1d-d099-4404-8184-5bf5bf0e16e4\") " pod="openshift-marketplace/community-operators-rjwjw" Jan 22 08:13:26 crc kubenswrapper[4933]: I0122 08:13:26.301730 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mthz5\" (UniqueName: \"kubernetes.io/projected/92562b1d-d099-4404-8184-5bf5bf0e16e4-kube-api-access-mthz5\") pod \"community-operators-rjwjw\" (UID: \"92562b1d-d099-4404-8184-5bf5bf0e16e4\") " pod="openshift-marketplace/community-operators-rjwjw" Jan 22 08:13:26 crc kubenswrapper[4933]: I0122 08:13:26.343820 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rjwjw" Jan 22 08:13:26 crc kubenswrapper[4933]: I0122 08:13:26.887545 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rjwjw"] Jan 22 08:13:26 crc kubenswrapper[4933]: W0122 08:13:26.892027 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod92562b1d_d099_4404_8184_5bf5bf0e16e4.slice/crio-f8748031b8a1db2acf1d167fcbbc9b7d7773eb8fbe7fbe81fb82f1481cfc8776 WatchSource:0}: Error finding container f8748031b8a1db2acf1d167fcbbc9b7d7773eb8fbe7fbe81fb82f1481cfc8776: Status 404 returned error can't find the container with id f8748031b8a1db2acf1d167fcbbc9b7d7773eb8fbe7fbe81fb82f1481cfc8776 Jan 22 08:13:27 crc kubenswrapper[4933]: I0122 08:13:27.464797 4933 generic.go:334] "Generic (PLEG): container finished" podID="92562b1d-d099-4404-8184-5bf5bf0e16e4" containerID="3200863502af2ff7c2760f35b8723ca87ac5b027bdc8c5cb72189dc1482cc138" exitCode=0 Jan 22 08:13:27 crc kubenswrapper[4933]: I0122 08:13:27.464897 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rjwjw" event={"ID":"92562b1d-d099-4404-8184-5bf5bf0e16e4","Type":"ContainerDied","Data":"3200863502af2ff7c2760f35b8723ca87ac5b027bdc8c5cb72189dc1482cc138"} Jan 22 08:13:27 crc kubenswrapper[4933]: I0122 08:13:27.465347 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rjwjw" event={"ID":"92562b1d-d099-4404-8184-5bf5bf0e16e4","Type":"ContainerStarted","Data":"f8748031b8a1db2acf1d167fcbbc9b7d7773eb8fbe7fbe81fb82f1481cfc8776"} Jan 22 08:13:29 crc kubenswrapper[4933]: I0122 08:13:29.505020 4933 generic.go:334] "Generic (PLEG): container finished" podID="92562b1d-d099-4404-8184-5bf5bf0e16e4" containerID="0982a88cb5eaf9e806d4c96cf9ec5ea5a33e3827adfab47e5e90b38f0d85448c" exitCode=0 Jan 22 08:13:29 crc kubenswrapper[4933]: I0122 08:13:29.505137 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rjwjw" event={"ID":"92562b1d-d099-4404-8184-5bf5bf0e16e4","Type":"ContainerDied","Data":"0982a88cb5eaf9e806d4c96cf9ec5ea5a33e3827adfab47e5e90b38f0d85448c"} Jan 22 08:13:30 crc kubenswrapper[4933]: I0122 08:13:30.533512 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rjwjw" event={"ID":"92562b1d-d099-4404-8184-5bf5bf0e16e4","Type":"ContainerStarted","Data":"0c5a526103f31dc6c50498a93ddd00873a9e2ce4d2a30bf8a0a61b0febe6416b"} Jan 22 08:13:36 crc kubenswrapper[4933]: I0122 08:13:36.344357 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rjwjw" Jan 22 08:13:36 crc kubenswrapper[4933]: I0122 08:13:36.344951 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rjwjw" Jan 22 08:13:36 crc kubenswrapper[4933]: I0122 08:13:36.468860 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rjwjw" Jan 22 08:13:36 crc kubenswrapper[4933]: I0122 08:13:36.499988 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rjwjw" podStartSLOduration=9.04788398 podStartE2EDuration="11.499966179s" podCreationTimestamp="2026-01-22 08:13:25 +0000 UTC" firstStartedPulling="2026-01-22 08:13:27.467516561 +0000 UTC m=+8855.304641924" lastFinishedPulling="2026-01-22 08:13:29.91959877 +0000 UTC m=+8857.756724123" observedRunningTime="2026-01-22 08:13:30.557838799 +0000 UTC m=+8858.394964152" watchObservedRunningTime="2026-01-22 08:13:36.499966179 +0000 UTC m=+8864.337091532" Jan 22 08:13:36 crc kubenswrapper[4933]: I0122 08:13:36.654944 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rjwjw" Jan 22 08:13:36 crc kubenswrapper[4933]: I0122 08:13:36.715599 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rjwjw"] Jan 22 08:13:38 crc kubenswrapper[4933]: I0122 08:13:38.618952 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rjwjw" podUID="92562b1d-d099-4404-8184-5bf5bf0e16e4" containerName="registry-server" containerID="cri-o://0c5a526103f31dc6c50498a93ddd00873a9e2ce4d2a30bf8a0a61b0febe6416b" gracePeriod=2 Jan 22 08:13:39 crc kubenswrapper[4933]: I0122 08:13:39.122841 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rjwjw" Jan 22 08:13:39 crc kubenswrapper[4933]: I0122 08:13:39.239119 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92562b1d-d099-4404-8184-5bf5bf0e16e4-utilities\") pod \"92562b1d-d099-4404-8184-5bf5bf0e16e4\" (UID: \"92562b1d-d099-4404-8184-5bf5bf0e16e4\") " Jan 22 08:13:39 crc kubenswrapper[4933]: I0122 08:13:39.239830 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92562b1d-d099-4404-8184-5bf5bf0e16e4-catalog-content\") pod \"92562b1d-d099-4404-8184-5bf5bf0e16e4\" (UID: \"92562b1d-d099-4404-8184-5bf5bf0e16e4\") " Jan 22 08:13:39 crc kubenswrapper[4933]: I0122 08:13:39.240266 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mthz5\" (UniqueName: \"kubernetes.io/projected/92562b1d-d099-4404-8184-5bf5bf0e16e4-kube-api-access-mthz5\") pod \"92562b1d-d099-4404-8184-5bf5bf0e16e4\" (UID: \"92562b1d-d099-4404-8184-5bf5bf0e16e4\") " Jan 22 08:13:39 crc kubenswrapper[4933]: I0122 08:13:39.240263 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92562b1d-d099-4404-8184-5bf5bf0e16e4-utilities" (OuterVolumeSpecName: "utilities") pod "92562b1d-d099-4404-8184-5bf5bf0e16e4" (UID: "92562b1d-d099-4404-8184-5bf5bf0e16e4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:13:39 crc kubenswrapper[4933]: I0122 08:13:39.240947 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92562b1d-d099-4404-8184-5bf5bf0e16e4-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 08:13:39 crc kubenswrapper[4933]: I0122 08:13:39.246439 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92562b1d-d099-4404-8184-5bf5bf0e16e4-kube-api-access-mthz5" (OuterVolumeSpecName: "kube-api-access-mthz5") pod "92562b1d-d099-4404-8184-5bf5bf0e16e4" (UID: "92562b1d-d099-4404-8184-5bf5bf0e16e4"). InnerVolumeSpecName "kube-api-access-mthz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:13:39 crc kubenswrapper[4933]: I0122 08:13:39.343140 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mthz5\" (UniqueName: \"kubernetes.io/projected/92562b1d-d099-4404-8184-5bf5bf0e16e4-kube-api-access-mthz5\") on node \"crc\" DevicePath \"\"" Jan 22 08:13:39 crc kubenswrapper[4933]: I0122 08:13:39.448400 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92562b1d-d099-4404-8184-5bf5bf0e16e4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "92562b1d-d099-4404-8184-5bf5bf0e16e4" (UID: "92562b1d-d099-4404-8184-5bf5bf0e16e4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:13:39 crc kubenswrapper[4933]: I0122 08:13:39.547716 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92562b1d-d099-4404-8184-5bf5bf0e16e4-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 08:13:39 crc kubenswrapper[4933]: I0122 08:13:39.635442 4933 generic.go:334] "Generic (PLEG): container finished" podID="92562b1d-d099-4404-8184-5bf5bf0e16e4" containerID="0c5a526103f31dc6c50498a93ddd00873a9e2ce4d2a30bf8a0a61b0febe6416b" exitCode=0 Jan 22 08:13:39 crc kubenswrapper[4933]: I0122 08:13:39.635520 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rjwjw" event={"ID":"92562b1d-d099-4404-8184-5bf5bf0e16e4","Type":"ContainerDied","Data":"0c5a526103f31dc6c50498a93ddd00873a9e2ce4d2a30bf8a0a61b0febe6416b"} Jan 22 08:13:39 crc kubenswrapper[4933]: I0122 08:13:39.635577 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rjwjw" Jan 22 08:13:39 crc kubenswrapper[4933]: I0122 08:13:39.635599 4933 scope.go:117] "RemoveContainer" containerID="0c5a526103f31dc6c50498a93ddd00873a9e2ce4d2a30bf8a0a61b0febe6416b" Jan 22 08:13:39 crc kubenswrapper[4933]: I0122 08:13:39.635577 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rjwjw" event={"ID":"92562b1d-d099-4404-8184-5bf5bf0e16e4","Type":"ContainerDied","Data":"f8748031b8a1db2acf1d167fcbbc9b7d7773eb8fbe7fbe81fb82f1481cfc8776"} Jan 22 08:13:39 crc kubenswrapper[4933]: I0122 08:13:39.663240 4933 scope.go:117] "RemoveContainer" containerID="0982a88cb5eaf9e806d4c96cf9ec5ea5a33e3827adfab47e5e90b38f0d85448c" Jan 22 08:13:39 crc kubenswrapper[4933]: I0122 08:13:39.691154 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rjwjw"] Jan 22 08:13:39 crc kubenswrapper[4933]: I0122 08:13:39.705598 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rjwjw"] Jan 22 08:13:40 crc kubenswrapper[4933]: I0122 08:13:40.119974 4933 scope.go:117] "RemoveContainer" containerID="3200863502af2ff7c2760f35b8723ca87ac5b027bdc8c5cb72189dc1482cc138" Jan 22 08:13:40 crc kubenswrapper[4933]: I0122 08:13:40.182588 4933 scope.go:117] "RemoveContainer" containerID="0c5a526103f31dc6c50498a93ddd00873a9e2ce4d2a30bf8a0a61b0febe6416b" Jan 22 08:13:40 crc kubenswrapper[4933]: E0122 08:13:40.183348 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c5a526103f31dc6c50498a93ddd00873a9e2ce4d2a30bf8a0a61b0febe6416b\": container with ID starting with 0c5a526103f31dc6c50498a93ddd00873a9e2ce4d2a30bf8a0a61b0febe6416b not found: ID does not exist" containerID="0c5a526103f31dc6c50498a93ddd00873a9e2ce4d2a30bf8a0a61b0febe6416b" Jan 22 08:13:40 crc kubenswrapper[4933]: I0122 08:13:40.183390 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c5a526103f31dc6c50498a93ddd00873a9e2ce4d2a30bf8a0a61b0febe6416b"} err="failed to get container status \"0c5a526103f31dc6c50498a93ddd00873a9e2ce4d2a30bf8a0a61b0febe6416b\": rpc error: code = NotFound desc = could not find container \"0c5a526103f31dc6c50498a93ddd00873a9e2ce4d2a30bf8a0a61b0febe6416b\": container with ID starting with 0c5a526103f31dc6c50498a93ddd00873a9e2ce4d2a30bf8a0a61b0febe6416b not found: ID does not exist" Jan 22 08:13:40 crc kubenswrapper[4933]: I0122 08:13:40.183436 4933 scope.go:117] "RemoveContainer" containerID="0982a88cb5eaf9e806d4c96cf9ec5ea5a33e3827adfab47e5e90b38f0d85448c" Jan 22 08:13:40 crc kubenswrapper[4933]: E0122 08:13:40.183681 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0982a88cb5eaf9e806d4c96cf9ec5ea5a33e3827adfab47e5e90b38f0d85448c\": container with ID starting with 0982a88cb5eaf9e806d4c96cf9ec5ea5a33e3827adfab47e5e90b38f0d85448c not found: ID does not exist" containerID="0982a88cb5eaf9e806d4c96cf9ec5ea5a33e3827adfab47e5e90b38f0d85448c" Jan 22 08:13:40 crc kubenswrapper[4933]: I0122 08:13:40.183713 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0982a88cb5eaf9e806d4c96cf9ec5ea5a33e3827adfab47e5e90b38f0d85448c"} err="failed to get container status \"0982a88cb5eaf9e806d4c96cf9ec5ea5a33e3827adfab47e5e90b38f0d85448c\": rpc error: code = NotFound desc = could not find container \"0982a88cb5eaf9e806d4c96cf9ec5ea5a33e3827adfab47e5e90b38f0d85448c\": container with ID starting with 0982a88cb5eaf9e806d4c96cf9ec5ea5a33e3827adfab47e5e90b38f0d85448c not found: ID does not exist" Jan 22 08:13:40 crc kubenswrapper[4933]: I0122 08:13:40.183737 4933 scope.go:117] "RemoveContainer" containerID="3200863502af2ff7c2760f35b8723ca87ac5b027bdc8c5cb72189dc1482cc138" Jan 22 08:13:40 crc kubenswrapper[4933]: E0122 08:13:40.183985 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3200863502af2ff7c2760f35b8723ca87ac5b027bdc8c5cb72189dc1482cc138\": container with ID starting with 3200863502af2ff7c2760f35b8723ca87ac5b027bdc8c5cb72189dc1482cc138 not found: ID does not exist" containerID="3200863502af2ff7c2760f35b8723ca87ac5b027bdc8c5cb72189dc1482cc138" Jan 22 08:13:40 crc kubenswrapper[4933]: I0122 08:13:40.184020 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3200863502af2ff7c2760f35b8723ca87ac5b027bdc8c5cb72189dc1482cc138"} err="failed to get container status \"3200863502af2ff7c2760f35b8723ca87ac5b027bdc8c5cb72189dc1482cc138\": rpc error: code = NotFound desc = could not find container \"3200863502af2ff7c2760f35b8723ca87ac5b027bdc8c5cb72189dc1482cc138\": container with ID starting with 3200863502af2ff7c2760f35b8723ca87ac5b027bdc8c5cb72189dc1482cc138 not found: ID does not exist" Jan 22 08:13:40 crc kubenswrapper[4933]: I0122 08:13:40.502844 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92562b1d-d099-4404-8184-5bf5bf0e16e4" path="/var/lib/kubelet/pods/92562b1d-d099-4404-8184-5bf5bf0e16e4/volumes" Jan 22 08:13:40 crc kubenswrapper[4933]: I0122 08:13:40.942557 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:13:40 crc kubenswrapper[4933]: I0122 08:13:40.942614 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:14:10 crc kubenswrapper[4933]: I0122 08:14:10.943634 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:14:10 crc kubenswrapper[4933]: I0122 08:14:10.944429 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:14:10 crc kubenswrapper[4933]: I0122 08:14:10.944500 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 08:14:10 crc kubenswrapper[4933]: I0122 08:14:10.946268 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 08:14:10 crc kubenswrapper[4933]: I0122 08:14:10.946358 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" gracePeriod=600 Jan 22 08:14:11 crc kubenswrapper[4933]: E0122 08:14:11.091019 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:14:11 crc kubenswrapper[4933]: I0122 08:14:11.951364 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" exitCode=0 Jan 22 08:14:11 crc kubenswrapper[4933]: I0122 08:14:11.951429 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4"} Jan 22 08:14:11 crc kubenswrapper[4933]: I0122 08:14:11.951789 4933 scope.go:117] "RemoveContainer" containerID="0eb1b1ad00eba263f6765d2eb1d795c255fb8b5e6c5cd831ddc2b8fc4281ee98" Jan 22 08:14:11 crc kubenswrapper[4933]: I0122 08:14:11.953113 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:14:11 crc kubenswrapper[4933]: E0122 08:14:11.954385 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:14:25 crc kubenswrapper[4933]: I0122 08:14:25.491559 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:14:25 crc kubenswrapper[4933]: E0122 08:14:25.492628 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:14:37 crc kubenswrapper[4933]: I0122 08:14:37.491098 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:14:37 crc kubenswrapper[4933]: E0122 08:14:37.491852 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:14:48 crc kubenswrapper[4933]: I0122 08:14:48.493757 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:14:48 crc kubenswrapper[4933]: E0122 08:14:48.494496 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.150767 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484495-m8pxk"] Jan 22 08:15:00 crc kubenswrapper[4933]: E0122 08:15:00.151840 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92562b1d-d099-4404-8184-5bf5bf0e16e4" containerName="registry-server" Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.151856 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="92562b1d-d099-4404-8184-5bf5bf0e16e4" containerName="registry-server" Jan 22 08:15:00 crc kubenswrapper[4933]: E0122 08:15:00.151884 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92562b1d-d099-4404-8184-5bf5bf0e16e4" containerName="extract-content" Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.151894 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="92562b1d-d099-4404-8184-5bf5bf0e16e4" containerName="extract-content" Jan 22 08:15:00 crc kubenswrapper[4933]: E0122 08:15:00.151932 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92562b1d-d099-4404-8184-5bf5bf0e16e4" containerName="extract-utilities" Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.151941 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="92562b1d-d099-4404-8184-5bf5bf0e16e4" containerName="extract-utilities" Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.152213 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="92562b1d-d099-4404-8184-5bf5bf0e16e4" containerName="registry-server" Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.153129 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484495-m8pxk" Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.157414 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.158026 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.166936 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484495-m8pxk"] Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.178332 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/704aef51-412c-4693-bf6d-9d90d8fa42e7-config-volume\") pod \"collect-profiles-29484495-m8pxk\" (UID: \"704aef51-412c-4693-bf6d-9d90d8fa42e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484495-m8pxk" Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.178410 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/704aef51-412c-4693-bf6d-9d90d8fa42e7-secret-volume\") pod \"collect-profiles-29484495-m8pxk\" (UID: \"704aef51-412c-4693-bf6d-9d90d8fa42e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484495-m8pxk" Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.178494 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tnrl\" (UniqueName: \"kubernetes.io/projected/704aef51-412c-4693-bf6d-9d90d8fa42e7-kube-api-access-6tnrl\") pod \"collect-profiles-29484495-m8pxk\" (UID: \"704aef51-412c-4693-bf6d-9d90d8fa42e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484495-m8pxk" Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.285587 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/704aef51-412c-4693-bf6d-9d90d8fa42e7-config-volume\") pod \"collect-profiles-29484495-m8pxk\" (UID: \"704aef51-412c-4693-bf6d-9d90d8fa42e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484495-m8pxk" Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.285776 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/704aef51-412c-4693-bf6d-9d90d8fa42e7-secret-volume\") pod \"collect-profiles-29484495-m8pxk\" (UID: \"704aef51-412c-4693-bf6d-9d90d8fa42e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484495-m8pxk" Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.285945 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tnrl\" (UniqueName: \"kubernetes.io/projected/704aef51-412c-4693-bf6d-9d90d8fa42e7-kube-api-access-6tnrl\") pod \"collect-profiles-29484495-m8pxk\" (UID: \"704aef51-412c-4693-bf6d-9d90d8fa42e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484495-m8pxk" Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.287342 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/704aef51-412c-4693-bf6d-9d90d8fa42e7-config-volume\") pod \"collect-profiles-29484495-m8pxk\" (UID: \"704aef51-412c-4693-bf6d-9d90d8fa42e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484495-m8pxk" Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.293675 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/704aef51-412c-4693-bf6d-9d90d8fa42e7-secret-volume\") pod \"collect-profiles-29484495-m8pxk\" (UID: \"704aef51-412c-4693-bf6d-9d90d8fa42e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484495-m8pxk" Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.304384 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tnrl\" (UniqueName: \"kubernetes.io/projected/704aef51-412c-4693-bf6d-9d90d8fa42e7-kube-api-access-6tnrl\") pod \"collect-profiles-29484495-m8pxk\" (UID: \"704aef51-412c-4693-bf6d-9d90d8fa42e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484495-m8pxk" Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.483255 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484495-m8pxk" Jan 22 08:15:00 crc kubenswrapper[4933]: I0122 08:15:00.938146 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484495-m8pxk"] Jan 22 08:15:00 crc kubenswrapper[4933]: W0122 08:15:00.946852 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod704aef51_412c_4693_bf6d_9d90d8fa42e7.slice/crio-6653e1b2bfe84e210a0bf704848a00f97a5757f3446bf163425e2c8beaa2aff3 WatchSource:0}: Error finding container 6653e1b2bfe84e210a0bf704848a00f97a5757f3446bf163425e2c8beaa2aff3: Status 404 returned error can't find the container with id 6653e1b2bfe84e210a0bf704848a00f97a5757f3446bf163425e2c8beaa2aff3 Jan 22 08:15:01 crc kubenswrapper[4933]: I0122 08:15:01.450588 4933 generic.go:334] "Generic (PLEG): container finished" podID="704aef51-412c-4693-bf6d-9d90d8fa42e7" containerID="fbacbe288d8c1619f96dc24d171620c2512ac5fae9f7aa969fdedded04a6c873" exitCode=0 Jan 22 08:15:01 crc kubenswrapper[4933]: I0122 08:15:01.450642 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484495-m8pxk" event={"ID":"704aef51-412c-4693-bf6d-9d90d8fa42e7","Type":"ContainerDied","Data":"fbacbe288d8c1619f96dc24d171620c2512ac5fae9f7aa969fdedded04a6c873"} Jan 22 08:15:01 crc kubenswrapper[4933]: I0122 08:15:01.450885 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484495-m8pxk" event={"ID":"704aef51-412c-4693-bf6d-9d90d8fa42e7","Type":"ContainerStarted","Data":"6653e1b2bfe84e210a0bf704848a00f97a5757f3446bf163425e2c8beaa2aff3"} Jan 22 08:15:02 crc kubenswrapper[4933]: I0122 08:15:02.855828 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484495-m8pxk" Jan 22 08:15:03 crc kubenswrapper[4933]: I0122 08:15:03.045488 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6tnrl\" (UniqueName: \"kubernetes.io/projected/704aef51-412c-4693-bf6d-9d90d8fa42e7-kube-api-access-6tnrl\") pod \"704aef51-412c-4693-bf6d-9d90d8fa42e7\" (UID: \"704aef51-412c-4693-bf6d-9d90d8fa42e7\") " Jan 22 08:15:03 crc kubenswrapper[4933]: I0122 08:15:03.045672 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/704aef51-412c-4693-bf6d-9d90d8fa42e7-config-volume\") pod \"704aef51-412c-4693-bf6d-9d90d8fa42e7\" (UID: \"704aef51-412c-4693-bf6d-9d90d8fa42e7\") " Jan 22 08:15:03 crc kubenswrapper[4933]: I0122 08:15:03.045850 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/704aef51-412c-4693-bf6d-9d90d8fa42e7-secret-volume\") pod \"704aef51-412c-4693-bf6d-9d90d8fa42e7\" (UID: \"704aef51-412c-4693-bf6d-9d90d8fa42e7\") " Jan 22 08:15:03 crc kubenswrapper[4933]: I0122 08:15:03.046601 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/704aef51-412c-4693-bf6d-9d90d8fa42e7-config-volume" (OuterVolumeSpecName: "config-volume") pod "704aef51-412c-4693-bf6d-9d90d8fa42e7" (UID: "704aef51-412c-4693-bf6d-9d90d8fa42e7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 08:15:03 crc kubenswrapper[4933]: I0122 08:15:03.047790 4933 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/704aef51-412c-4693-bf6d-9d90d8fa42e7-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 08:15:03 crc kubenswrapper[4933]: I0122 08:15:03.052727 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/704aef51-412c-4693-bf6d-9d90d8fa42e7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "704aef51-412c-4693-bf6d-9d90d8fa42e7" (UID: "704aef51-412c-4693-bf6d-9d90d8fa42e7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 08:15:03 crc kubenswrapper[4933]: I0122 08:15:03.052807 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/704aef51-412c-4693-bf6d-9d90d8fa42e7-kube-api-access-6tnrl" (OuterVolumeSpecName: "kube-api-access-6tnrl") pod "704aef51-412c-4693-bf6d-9d90d8fa42e7" (UID: "704aef51-412c-4693-bf6d-9d90d8fa42e7"). InnerVolumeSpecName "kube-api-access-6tnrl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:15:03 crc kubenswrapper[4933]: I0122 08:15:03.150647 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6tnrl\" (UniqueName: \"kubernetes.io/projected/704aef51-412c-4693-bf6d-9d90d8fa42e7-kube-api-access-6tnrl\") on node \"crc\" DevicePath \"\"" Jan 22 08:15:03 crc kubenswrapper[4933]: I0122 08:15:03.150910 4933 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/704aef51-412c-4693-bf6d-9d90d8fa42e7-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 08:15:03 crc kubenswrapper[4933]: I0122 08:15:03.470519 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484495-m8pxk" event={"ID":"704aef51-412c-4693-bf6d-9d90d8fa42e7","Type":"ContainerDied","Data":"6653e1b2bfe84e210a0bf704848a00f97a5757f3446bf163425e2c8beaa2aff3"} Jan 22 08:15:03 crc kubenswrapper[4933]: I0122 08:15:03.470838 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6653e1b2bfe84e210a0bf704848a00f97a5757f3446bf163425e2c8beaa2aff3" Jan 22 08:15:03 crc kubenswrapper[4933]: I0122 08:15:03.470814 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484495-m8pxk" Jan 22 08:15:03 crc kubenswrapper[4933]: I0122 08:15:03.493483 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:15:03 crc kubenswrapper[4933]: E0122 08:15:03.493836 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:15:03 crc kubenswrapper[4933]: I0122 08:15:03.942241 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4"] Jan 22 08:15:03 crc kubenswrapper[4933]: I0122 08:15:03.950894 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484450-pc2c4"] Jan 22 08:15:04 crc kubenswrapper[4933]: I0122 08:15:04.503455 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58d48b6b-6a4a-4ab6-866c-251ea91606b4" path="/var/lib/kubelet/pods/58d48b6b-6a4a-4ab6-866c-251ea91606b4/volumes" Jan 22 08:15:10 crc kubenswrapper[4933]: I0122 08:15:10.637709 4933 scope.go:117] "RemoveContainer" containerID="d3b501225a3eb635dd64b971b927905a74c467676a62a38ede5d5e5bba6e04ed" Jan 22 08:15:14 crc kubenswrapper[4933]: I0122 08:15:14.491245 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:15:14 crc kubenswrapper[4933]: E0122 08:15:14.492143 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:15:25 crc kubenswrapper[4933]: I0122 08:15:25.490535 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:15:25 crc kubenswrapper[4933]: E0122 08:15:25.491248 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:15:37 crc kubenswrapper[4933]: I0122 08:15:37.491639 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:15:37 crc kubenswrapper[4933]: E0122 08:15:37.493810 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:15:48 crc kubenswrapper[4933]: I0122 08:15:48.491414 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:15:48 crc kubenswrapper[4933]: E0122 08:15:48.493461 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:15:59 crc kubenswrapper[4933]: I0122 08:15:59.490937 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:15:59 crc kubenswrapper[4933]: E0122 08:15:59.491695 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:16:13 crc kubenswrapper[4933]: I0122 08:16:13.491132 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:16:13 crc kubenswrapper[4933]: E0122 08:16:13.492213 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:16:26 crc kubenswrapper[4933]: I0122 08:16:26.491105 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:16:26 crc kubenswrapper[4933]: E0122 08:16:26.491919 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:16:38 crc kubenswrapper[4933]: I0122 08:16:38.496215 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:16:38 crc kubenswrapper[4933]: E0122 08:16:38.497181 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:16:51 crc kubenswrapper[4933]: I0122 08:16:51.491584 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:16:51 crc kubenswrapper[4933]: E0122 08:16:51.495186 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:17:03 crc kubenswrapper[4933]: I0122 08:17:03.490994 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:17:03 crc kubenswrapper[4933]: E0122 08:17:03.492869 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:17:17 crc kubenswrapper[4933]: I0122 08:17:17.491152 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:17:17 crc kubenswrapper[4933]: E0122 08:17:17.491982 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:17:30 crc kubenswrapper[4933]: I0122 08:17:30.490968 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:17:30 crc kubenswrapper[4933]: E0122 08:17:30.491886 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:17:45 crc kubenswrapper[4933]: I0122 08:17:45.491878 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:17:45 crc kubenswrapper[4933]: E0122 08:17:45.492599 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:18:00 crc kubenswrapper[4933]: I0122 08:18:00.491487 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:18:00 crc kubenswrapper[4933]: E0122 08:18:00.492312 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:18:15 crc kubenswrapper[4933]: I0122 08:18:15.493010 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:18:15 crc kubenswrapper[4933]: E0122 08:18:15.494049 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:18:27 crc kubenswrapper[4933]: I0122 08:18:27.490897 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:18:27 crc kubenswrapper[4933]: E0122 08:18:27.492309 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:18:38 crc kubenswrapper[4933]: I0122 08:18:38.502066 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:18:38 crc kubenswrapper[4933]: E0122 08:18:38.502989 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:18:50 crc kubenswrapper[4933]: I0122 08:18:50.491904 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:18:50 crc kubenswrapper[4933]: E0122 08:18:50.494245 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:19:02 crc kubenswrapper[4933]: I0122 08:19:02.504967 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:19:02 crc kubenswrapper[4933]: E0122 08:19:02.506393 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:19:15 crc kubenswrapper[4933]: I0122 08:19:15.491447 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:19:16 crc kubenswrapper[4933]: I0122 08:19:16.243509 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"c98d04fa56075612dc036d5b10973aa3b26ebc3beeae287b19656f51b500424d"} Jan 22 08:20:38 crc kubenswrapper[4933]: I0122 08:20:38.449132 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-665p4"] Jan 22 08:20:38 crc kubenswrapper[4933]: E0122 08:20:38.450486 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="704aef51-412c-4693-bf6d-9d90d8fa42e7" containerName="collect-profiles" Jan 22 08:20:38 crc kubenswrapper[4933]: I0122 08:20:38.450506 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="704aef51-412c-4693-bf6d-9d90d8fa42e7" containerName="collect-profiles" Jan 22 08:20:38 crc kubenswrapper[4933]: I0122 08:20:38.450743 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="704aef51-412c-4693-bf6d-9d90d8fa42e7" containerName="collect-profiles" Jan 22 08:20:38 crc kubenswrapper[4933]: I0122 08:20:38.452635 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-665p4" Jan 22 08:20:38 crc kubenswrapper[4933]: I0122 08:20:38.475891 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-665p4"] Jan 22 08:20:38 crc kubenswrapper[4933]: I0122 08:20:38.498344 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ebff0cb-46ab-44c2-adb2-b98885b9cd56-catalog-content\") pod \"certified-operators-665p4\" (UID: \"2ebff0cb-46ab-44c2-adb2-b98885b9cd56\") " pod="openshift-marketplace/certified-operators-665p4" Jan 22 08:20:38 crc kubenswrapper[4933]: I0122 08:20:38.498421 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djs5s\" (UniqueName: \"kubernetes.io/projected/2ebff0cb-46ab-44c2-adb2-b98885b9cd56-kube-api-access-djs5s\") pod \"certified-operators-665p4\" (UID: \"2ebff0cb-46ab-44c2-adb2-b98885b9cd56\") " pod="openshift-marketplace/certified-operators-665p4" Jan 22 08:20:38 crc kubenswrapper[4933]: I0122 08:20:38.498481 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ebff0cb-46ab-44c2-adb2-b98885b9cd56-utilities\") pod \"certified-operators-665p4\" (UID: \"2ebff0cb-46ab-44c2-adb2-b98885b9cd56\") " pod="openshift-marketplace/certified-operators-665p4" Jan 22 08:20:38 crc kubenswrapper[4933]: I0122 08:20:38.600531 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ebff0cb-46ab-44c2-adb2-b98885b9cd56-catalog-content\") pod \"certified-operators-665p4\" (UID: \"2ebff0cb-46ab-44c2-adb2-b98885b9cd56\") " pod="openshift-marketplace/certified-operators-665p4" Jan 22 08:20:38 crc kubenswrapper[4933]: I0122 08:20:38.600647 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djs5s\" (UniqueName: \"kubernetes.io/projected/2ebff0cb-46ab-44c2-adb2-b98885b9cd56-kube-api-access-djs5s\") pod \"certified-operators-665p4\" (UID: \"2ebff0cb-46ab-44c2-adb2-b98885b9cd56\") " pod="openshift-marketplace/certified-operators-665p4" Jan 22 08:20:38 crc kubenswrapper[4933]: I0122 08:20:38.601041 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ebff0cb-46ab-44c2-adb2-b98885b9cd56-catalog-content\") pod \"certified-operators-665p4\" (UID: \"2ebff0cb-46ab-44c2-adb2-b98885b9cd56\") " pod="openshift-marketplace/certified-operators-665p4" Jan 22 08:20:38 crc kubenswrapper[4933]: I0122 08:20:38.601406 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ebff0cb-46ab-44c2-adb2-b98885b9cd56-utilities\") pod \"certified-operators-665p4\" (UID: \"2ebff0cb-46ab-44c2-adb2-b98885b9cd56\") " pod="openshift-marketplace/certified-operators-665p4" Jan 22 08:20:38 crc kubenswrapper[4933]: I0122 08:20:38.601067 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ebff0cb-46ab-44c2-adb2-b98885b9cd56-utilities\") pod \"certified-operators-665p4\" (UID: \"2ebff0cb-46ab-44c2-adb2-b98885b9cd56\") " pod="openshift-marketplace/certified-operators-665p4" Jan 22 08:20:38 crc kubenswrapper[4933]: I0122 08:20:38.619511 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djs5s\" (UniqueName: \"kubernetes.io/projected/2ebff0cb-46ab-44c2-adb2-b98885b9cd56-kube-api-access-djs5s\") pod \"certified-operators-665p4\" (UID: \"2ebff0cb-46ab-44c2-adb2-b98885b9cd56\") " pod="openshift-marketplace/certified-operators-665p4" Jan 22 08:20:38 crc kubenswrapper[4933]: I0122 08:20:38.789392 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-665p4" Jan 22 08:20:39 crc kubenswrapper[4933]: I0122 08:20:39.339874 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-665p4"] Jan 22 08:20:40 crc kubenswrapper[4933]: I0122 08:20:40.226922 4933 generic.go:334] "Generic (PLEG): container finished" podID="2ebff0cb-46ab-44c2-adb2-b98885b9cd56" containerID="92d7393c97781ce6ca69c8656c298521c840cd14b1eea74e705b81941f00ec12" exitCode=0 Jan 22 08:20:40 crc kubenswrapper[4933]: I0122 08:20:40.227041 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-665p4" event={"ID":"2ebff0cb-46ab-44c2-adb2-b98885b9cd56","Type":"ContainerDied","Data":"92d7393c97781ce6ca69c8656c298521c840cd14b1eea74e705b81941f00ec12"} Jan 22 08:20:40 crc kubenswrapper[4933]: I0122 08:20:40.227543 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-665p4" event={"ID":"2ebff0cb-46ab-44c2-adb2-b98885b9cd56","Type":"ContainerStarted","Data":"147945d03af32013d80e46a8f0fac42051f14289611cce621b15e8effc052669"} Jan 22 08:20:40 crc kubenswrapper[4933]: I0122 08:20:40.231734 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 08:20:41 crc kubenswrapper[4933]: I0122 08:20:41.241997 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-665p4" event={"ID":"2ebff0cb-46ab-44c2-adb2-b98885b9cd56","Type":"ContainerStarted","Data":"d3cb365853c671ca8ebc910138f203464f9eaad28792ae9a097bac7b1eaf51c4"} Jan 22 08:20:42 crc kubenswrapper[4933]: I0122 08:20:42.256217 4933 generic.go:334] "Generic (PLEG): container finished" podID="2ebff0cb-46ab-44c2-adb2-b98885b9cd56" containerID="d3cb365853c671ca8ebc910138f203464f9eaad28792ae9a097bac7b1eaf51c4" exitCode=0 Jan 22 08:20:42 crc kubenswrapper[4933]: I0122 08:20:42.256346 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-665p4" event={"ID":"2ebff0cb-46ab-44c2-adb2-b98885b9cd56","Type":"ContainerDied","Data":"d3cb365853c671ca8ebc910138f203464f9eaad28792ae9a097bac7b1eaf51c4"} Jan 22 08:20:43 crc kubenswrapper[4933]: I0122 08:20:43.274319 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-665p4" event={"ID":"2ebff0cb-46ab-44c2-adb2-b98885b9cd56","Type":"ContainerStarted","Data":"5bde1179df3467ba25c9a335ce0f8129359964ee091d99f3cd4e22580097df24"} Jan 22 08:20:43 crc kubenswrapper[4933]: I0122 08:20:43.310320 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-665p4" podStartSLOduration=2.8415005349999998 podStartE2EDuration="5.310287791s" podCreationTimestamp="2026-01-22 08:20:38 +0000 UTC" firstStartedPulling="2026-01-22 08:20:40.231360921 +0000 UTC m=+9288.068486314" lastFinishedPulling="2026-01-22 08:20:42.700148217 +0000 UTC m=+9290.537273570" observedRunningTime="2026-01-22 08:20:43.296879425 +0000 UTC m=+9291.134004818" watchObservedRunningTime="2026-01-22 08:20:43.310287791 +0000 UTC m=+9291.147413144" Jan 22 08:20:48 crc kubenswrapper[4933]: I0122 08:20:48.790226 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-665p4" Jan 22 08:20:48 crc kubenswrapper[4933]: I0122 08:20:48.790831 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-665p4" Jan 22 08:20:48 crc kubenswrapper[4933]: I0122 08:20:48.861787 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-665p4" Jan 22 08:20:49 crc kubenswrapper[4933]: I0122 08:20:49.423782 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-665p4" Jan 22 08:20:49 crc kubenswrapper[4933]: I0122 08:20:49.488664 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-665p4"] Jan 22 08:20:51 crc kubenswrapper[4933]: I0122 08:20:51.357525 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-665p4" podUID="2ebff0cb-46ab-44c2-adb2-b98885b9cd56" containerName="registry-server" containerID="cri-o://5bde1179df3467ba25c9a335ce0f8129359964ee091d99f3cd4e22580097df24" gracePeriod=2 Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.330323 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-665p4" Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.369580 4933 generic.go:334] "Generic (PLEG): container finished" podID="2ebff0cb-46ab-44c2-adb2-b98885b9cd56" containerID="5bde1179df3467ba25c9a335ce0f8129359964ee091d99f3cd4e22580097df24" exitCode=0 Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.369623 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-665p4" event={"ID":"2ebff0cb-46ab-44c2-adb2-b98885b9cd56","Type":"ContainerDied","Data":"5bde1179df3467ba25c9a335ce0f8129359964ee091d99f3cd4e22580097df24"} Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.369649 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-665p4" event={"ID":"2ebff0cb-46ab-44c2-adb2-b98885b9cd56","Type":"ContainerDied","Data":"147945d03af32013d80e46a8f0fac42051f14289611cce621b15e8effc052669"} Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.369667 4933 scope.go:117] "RemoveContainer" containerID="5bde1179df3467ba25c9a335ce0f8129359964ee091d99f3cd4e22580097df24" Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.369685 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-665p4" Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.400929 4933 scope.go:117] "RemoveContainer" containerID="d3cb365853c671ca8ebc910138f203464f9eaad28792ae9a097bac7b1eaf51c4" Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.424227 4933 scope.go:117] "RemoveContainer" containerID="92d7393c97781ce6ca69c8656c298521c840cd14b1eea74e705b81941f00ec12" Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.470453 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ebff0cb-46ab-44c2-adb2-b98885b9cd56-catalog-content\") pod \"2ebff0cb-46ab-44c2-adb2-b98885b9cd56\" (UID: \"2ebff0cb-46ab-44c2-adb2-b98885b9cd56\") " Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.470590 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ebff0cb-46ab-44c2-adb2-b98885b9cd56-utilities\") pod \"2ebff0cb-46ab-44c2-adb2-b98885b9cd56\" (UID: \"2ebff0cb-46ab-44c2-adb2-b98885b9cd56\") " Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.470817 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djs5s\" (UniqueName: \"kubernetes.io/projected/2ebff0cb-46ab-44c2-adb2-b98885b9cd56-kube-api-access-djs5s\") pod \"2ebff0cb-46ab-44c2-adb2-b98885b9cd56\" (UID: \"2ebff0cb-46ab-44c2-adb2-b98885b9cd56\") " Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.471106 4933 scope.go:117] "RemoveContainer" containerID="5bde1179df3467ba25c9a335ce0f8129359964ee091d99f3cd4e22580097df24" Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.471505 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ebff0cb-46ab-44c2-adb2-b98885b9cd56-utilities" (OuterVolumeSpecName: "utilities") pod "2ebff0cb-46ab-44c2-adb2-b98885b9cd56" (UID: "2ebff0cb-46ab-44c2-adb2-b98885b9cd56"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:20:52 crc kubenswrapper[4933]: E0122 08:20:52.471601 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5bde1179df3467ba25c9a335ce0f8129359964ee091d99f3cd4e22580097df24\": container with ID starting with 5bde1179df3467ba25c9a335ce0f8129359964ee091d99f3cd4e22580097df24 not found: ID does not exist" containerID="5bde1179df3467ba25c9a335ce0f8129359964ee091d99f3cd4e22580097df24" Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.471637 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5bde1179df3467ba25c9a335ce0f8129359964ee091d99f3cd4e22580097df24"} err="failed to get container status \"5bde1179df3467ba25c9a335ce0f8129359964ee091d99f3cd4e22580097df24\": rpc error: code = NotFound desc = could not find container \"5bde1179df3467ba25c9a335ce0f8129359964ee091d99f3cd4e22580097df24\": container with ID starting with 5bde1179df3467ba25c9a335ce0f8129359964ee091d99f3cd4e22580097df24 not found: ID does not exist" Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.471663 4933 scope.go:117] "RemoveContainer" containerID="d3cb365853c671ca8ebc910138f203464f9eaad28792ae9a097bac7b1eaf51c4" Jan 22 08:20:52 crc kubenswrapper[4933]: E0122 08:20:52.471931 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3cb365853c671ca8ebc910138f203464f9eaad28792ae9a097bac7b1eaf51c4\": container with ID starting with d3cb365853c671ca8ebc910138f203464f9eaad28792ae9a097bac7b1eaf51c4 not found: ID does not exist" containerID="d3cb365853c671ca8ebc910138f203464f9eaad28792ae9a097bac7b1eaf51c4" Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.471952 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3cb365853c671ca8ebc910138f203464f9eaad28792ae9a097bac7b1eaf51c4"} err="failed to get container status \"d3cb365853c671ca8ebc910138f203464f9eaad28792ae9a097bac7b1eaf51c4\": rpc error: code = NotFound desc = could not find container \"d3cb365853c671ca8ebc910138f203464f9eaad28792ae9a097bac7b1eaf51c4\": container with ID starting with d3cb365853c671ca8ebc910138f203464f9eaad28792ae9a097bac7b1eaf51c4 not found: ID does not exist" Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.471965 4933 scope.go:117] "RemoveContainer" containerID="92d7393c97781ce6ca69c8656c298521c840cd14b1eea74e705b81941f00ec12" Jan 22 08:20:52 crc kubenswrapper[4933]: E0122 08:20:52.472202 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92d7393c97781ce6ca69c8656c298521c840cd14b1eea74e705b81941f00ec12\": container with ID starting with 92d7393c97781ce6ca69c8656c298521c840cd14b1eea74e705b81941f00ec12 not found: ID does not exist" containerID="92d7393c97781ce6ca69c8656c298521c840cd14b1eea74e705b81941f00ec12" Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.472221 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92d7393c97781ce6ca69c8656c298521c840cd14b1eea74e705b81941f00ec12"} err="failed to get container status \"92d7393c97781ce6ca69c8656c298521c840cd14b1eea74e705b81941f00ec12\": rpc error: code = NotFound desc = could not find container \"92d7393c97781ce6ca69c8656c298521c840cd14b1eea74e705b81941f00ec12\": container with ID starting with 92d7393c97781ce6ca69c8656c298521c840cd14b1eea74e705b81941f00ec12 not found: ID does not exist" Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.479382 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ebff0cb-46ab-44c2-adb2-b98885b9cd56-kube-api-access-djs5s" (OuterVolumeSpecName: "kube-api-access-djs5s") pod "2ebff0cb-46ab-44c2-adb2-b98885b9cd56" (UID: "2ebff0cb-46ab-44c2-adb2-b98885b9cd56"). InnerVolumeSpecName "kube-api-access-djs5s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.529360 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ebff0cb-46ab-44c2-adb2-b98885b9cd56-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2ebff0cb-46ab-44c2-adb2-b98885b9cd56" (UID: "2ebff0cb-46ab-44c2-adb2-b98885b9cd56"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.573374 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djs5s\" (UniqueName: \"kubernetes.io/projected/2ebff0cb-46ab-44c2-adb2-b98885b9cd56-kube-api-access-djs5s\") on node \"crc\" DevicePath \"\"" Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.573409 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ebff0cb-46ab-44c2-adb2-b98885b9cd56-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.573420 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ebff0cb-46ab-44c2-adb2-b98885b9cd56-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.708122 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-665p4"] Jan 22 08:20:52 crc kubenswrapper[4933]: I0122 08:20:52.723765 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-665p4"] Jan 22 08:20:52 crc kubenswrapper[4933]: E0122 08:20:52.819015 4933 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2ebff0cb_46ab_44c2_adb2_b98885b9cd56.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2ebff0cb_46ab_44c2_adb2_b98885b9cd56.slice/crio-147945d03af32013d80e46a8f0fac42051f14289611cce621b15e8effc052669\": RecentStats: unable to find data in memory cache]" Jan 22 08:20:54 crc kubenswrapper[4933]: I0122 08:20:54.507098 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ebff0cb-46ab-44c2-adb2-b98885b9cd56" path="/var/lib/kubelet/pods/2ebff0cb-46ab-44c2-adb2-b98885b9cd56/volumes" Jan 22 08:21:09 crc kubenswrapper[4933]: I0122 08:21:09.791001 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jr2vq"] Jan 22 08:21:09 crc kubenswrapper[4933]: E0122 08:21:09.794542 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ebff0cb-46ab-44c2-adb2-b98885b9cd56" containerName="extract-utilities" Jan 22 08:21:09 crc kubenswrapper[4933]: I0122 08:21:09.794577 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ebff0cb-46ab-44c2-adb2-b98885b9cd56" containerName="extract-utilities" Jan 22 08:21:09 crc kubenswrapper[4933]: E0122 08:21:09.794595 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ebff0cb-46ab-44c2-adb2-b98885b9cd56" containerName="registry-server" Jan 22 08:21:09 crc kubenswrapper[4933]: I0122 08:21:09.794614 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ebff0cb-46ab-44c2-adb2-b98885b9cd56" containerName="registry-server" Jan 22 08:21:09 crc kubenswrapper[4933]: E0122 08:21:09.794665 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ebff0cb-46ab-44c2-adb2-b98885b9cd56" containerName="extract-content" Jan 22 08:21:09 crc kubenswrapper[4933]: I0122 08:21:09.794675 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ebff0cb-46ab-44c2-adb2-b98885b9cd56" containerName="extract-content" Jan 22 08:21:09 crc kubenswrapper[4933]: I0122 08:21:09.794935 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ebff0cb-46ab-44c2-adb2-b98885b9cd56" containerName="registry-server" Jan 22 08:21:09 crc kubenswrapper[4933]: I0122 08:21:09.798123 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jr2vq" Jan 22 08:21:09 crc kubenswrapper[4933]: I0122 08:21:09.818341 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jr2vq"] Jan 22 08:21:09 crc kubenswrapper[4933]: I0122 08:21:09.909330 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjht8\" (UniqueName: \"kubernetes.io/projected/ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1-kube-api-access-zjht8\") pod \"redhat-marketplace-jr2vq\" (UID: \"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1\") " pod="openshift-marketplace/redhat-marketplace-jr2vq" Jan 22 08:21:09 crc kubenswrapper[4933]: I0122 08:21:09.909686 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1-catalog-content\") pod \"redhat-marketplace-jr2vq\" (UID: \"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1\") " pod="openshift-marketplace/redhat-marketplace-jr2vq" Jan 22 08:21:09 crc kubenswrapper[4933]: I0122 08:21:09.909878 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1-utilities\") pod \"redhat-marketplace-jr2vq\" (UID: \"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1\") " pod="openshift-marketplace/redhat-marketplace-jr2vq" Jan 22 08:21:10 crc kubenswrapper[4933]: I0122 08:21:10.011422 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1-utilities\") pod \"redhat-marketplace-jr2vq\" (UID: \"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1\") " pod="openshift-marketplace/redhat-marketplace-jr2vq" Jan 22 08:21:10 crc kubenswrapper[4933]: I0122 08:21:10.011561 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjht8\" (UniqueName: \"kubernetes.io/projected/ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1-kube-api-access-zjht8\") pod \"redhat-marketplace-jr2vq\" (UID: \"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1\") " pod="openshift-marketplace/redhat-marketplace-jr2vq" Jan 22 08:21:10 crc kubenswrapper[4933]: I0122 08:21:10.011617 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1-catalog-content\") pod \"redhat-marketplace-jr2vq\" (UID: \"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1\") " pod="openshift-marketplace/redhat-marketplace-jr2vq" Jan 22 08:21:10 crc kubenswrapper[4933]: I0122 08:21:10.011997 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1-catalog-content\") pod \"redhat-marketplace-jr2vq\" (UID: \"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1\") " pod="openshift-marketplace/redhat-marketplace-jr2vq" Jan 22 08:21:10 crc kubenswrapper[4933]: I0122 08:21:10.012020 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1-utilities\") pod \"redhat-marketplace-jr2vq\" (UID: \"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1\") " pod="openshift-marketplace/redhat-marketplace-jr2vq" Jan 22 08:21:10 crc kubenswrapper[4933]: I0122 08:21:10.034456 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjht8\" (UniqueName: \"kubernetes.io/projected/ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1-kube-api-access-zjht8\") pod \"redhat-marketplace-jr2vq\" (UID: \"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1\") " pod="openshift-marketplace/redhat-marketplace-jr2vq" Jan 22 08:21:10 crc kubenswrapper[4933]: I0122 08:21:10.142181 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jr2vq" Jan 22 08:21:10 crc kubenswrapper[4933]: I0122 08:21:10.701833 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jr2vq"] Jan 22 08:21:10 crc kubenswrapper[4933]: W0122 08:21:10.710942 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podad0eab00_0a84_4fa1_a72e_aa3ef67d64f1.slice/crio-70b9cab4289bf711bfd9d6e63c4193d5f9c75975bbc08a95dd46868898fd6209 WatchSource:0}: Error finding container 70b9cab4289bf711bfd9d6e63c4193d5f9c75975bbc08a95dd46868898fd6209: Status 404 returned error can't find the container with id 70b9cab4289bf711bfd9d6e63c4193d5f9c75975bbc08a95dd46868898fd6209 Jan 22 08:21:11 crc kubenswrapper[4933]: I0122 08:21:11.634065 4933 generic.go:334] "Generic (PLEG): container finished" podID="ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1" containerID="7d2afd1a503ed191589929cea40723b0cf297c5f8cafb8c70864c5f7e69bdaa1" exitCode=0 Jan 22 08:21:11 crc kubenswrapper[4933]: I0122 08:21:11.634449 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jr2vq" event={"ID":"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1","Type":"ContainerDied","Data":"7d2afd1a503ed191589929cea40723b0cf297c5f8cafb8c70864c5f7e69bdaa1"} Jan 22 08:21:11 crc kubenswrapper[4933]: I0122 08:21:11.634488 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jr2vq" event={"ID":"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1","Type":"ContainerStarted","Data":"70b9cab4289bf711bfd9d6e63c4193d5f9c75975bbc08a95dd46868898fd6209"} Jan 22 08:21:13 crc kubenswrapper[4933]: I0122 08:21:13.681450 4933 generic.go:334] "Generic (PLEG): container finished" podID="ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1" containerID="e09687cc22359038409d7bd27ce930a095bc3fdf00226789e4ee55c18370b0df" exitCode=0 Jan 22 08:21:13 crc kubenswrapper[4933]: I0122 08:21:13.681551 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jr2vq" event={"ID":"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1","Type":"ContainerDied","Data":"e09687cc22359038409d7bd27ce930a095bc3fdf00226789e4ee55c18370b0df"} Jan 22 08:21:14 crc kubenswrapper[4933]: I0122 08:21:14.694136 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jr2vq" event={"ID":"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1","Type":"ContainerStarted","Data":"2eee2d3078a9e418a13d5cfd99a019a0b48425640c5ee5cda6967b336e03f6ba"} Jan 22 08:21:14 crc kubenswrapper[4933]: I0122 08:21:14.723736 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jr2vq" podStartSLOduration=3.25700182 podStartE2EDuration="5.723715225s" podCreationTimestamp="2026-01-22 08:21:09 +0000 UTC" firstStartedPulling="2026-01-22 08:21:11.638759509 +0000 UTC m=+9319.475884902" lastFinishedPulling="2026-01-22 08:21:14.105472954 +0000 UTC m=+9321.942598307" observedRunningTime="2026-01-22 08:21:14.715525825 +0000 UTC m=+9322.552651198" watchObservedRunningTime="2026-01-22 08:21:14.723715225 +0000 UTC m=+9322.560840578" Jan 22 08:21:20 crc kubenswrapper[4933]: I0122 08:21:20.142455 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jr2vq" Jan 22 08:21:20 crc kubenswrapper[4933]: I0122 08:21:20.143201 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jr2vq" Jan 22 08:21:20 crc kubenswrapper[4933]: I0122 08:21:20.220222 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jr2vq" Jan 22 08:21:20 crc kubenswrapper[4933]: I0122 08:21:20.809825 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jr2vq" Jan 22 08:21:20 crc kubenswrapper[4933]: I0122 08:21:20.864371 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jr2vq"] Jan 22 08:21:22 crc kubenswrapper[4933]: I0122 08:21:22.781665 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jr2vq" podUID="ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1" containerName="registry-server" containerID="cri-o://2eee2d3078a9e418a13d5cfd99a019a0b48425640c5ee5cda6967b336e03f6ba" gracePeriod=2 Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.281039 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jr2vq" Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.467826 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1-catalog-content\") pod \"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1\" (UID: \"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1\") " Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.467883 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjht8\" (UniqueName: \"kubernetes.io/projected/ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1-kube-api-access-zjht8\") pod \"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1\" (UID: \"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1\") " Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.468278 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1-utilities\") pod \"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1\" (UID: \"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1\") " Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.470069 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1-utilities" (OuterVolumeSpecName: "utilities") pod "ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1" (UID: "ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.477527 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1-kube-api-access-zjht8" (OuterVolumeSpecName: "kube-api-access-zjht8") pod "ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1" (UID: "ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1"). InnerVolumeSpecName "kube-api-access-zjht8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.492758 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1" (UID: "ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.570893 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.570960 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjht8\" (UniqueName: \"kubernetes.io/projected/ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1-kube-api-access-zjht8\") on node \"crc\" DevicePath \"\"" Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.570972 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.815519 4933 generic.go:334] "Generic (PLEG): container finished" podID="ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1" containerID="2eee2d3078a9e418a13d5cfd99a019a0b48425640c5ee5cda6967b336e03f6ba" exitCode=0 Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.815595 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jr2vq" Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.815576 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jr2vq" event={"ID":"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1","Type":"ContainerDied","Data":"2eee2d3078a9e418a13d5cfd99a019a0b48425640c5ee5cda6967b336e03f6ba"} Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.815643 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jr2vq" event={"ID":"ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1","Type":"ContainerDied","Data":"70b9cab4289bf711bfd9d6e63c4193d5f9c75975bbc08a95dd46868898fd6209"} Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.815663 4933 scope.go:117] "RemoveContainer" containerID="2eee2d3078a9e418a13d5cfd99a019a0b48425640c5ee5cda6967b336e03f6ba" Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.867342 4933 scope.go:117] "RemoveContainer" containerID="e09687cc22359038409d7bd27ce930a095bc3fdf00226789e4ee55c18370b0df" Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.868945 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jr2vq"] Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.899796 4933 scope.go:117] "RemoveContainer" containerID="7d2afd1a503ed191589929cea40723b0cf297c5f8cafb8c70864c5f7e69bdaa1" Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.902857 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jr2vq"] Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.952875 4933 scope.go:117] "RemoveContainer" containerID="2eee2d3078a9e418a13d5cfd99a019a0b48425640c5ee5cda6967b336e03f6ba" Jan 22 08:21:23 crc kubenswrapper[4933]: E0122 08:21:23.953689 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2eee2d3078a9e418a13d5cfd99a019a0b48425640c5ee5cda6967b336e03f6ba\": container with ID starting with 2eee2d3078a9e418a13d5cfd99a019a0b48425640c5ee5cda6967b336e03f6ba not found: ID does not exist" containerID="2eee2d3078a9e418a13d5cfd99a019a0b48425640c5ee5cda6967b336e03f6ba" Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.953727 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2eee2d3078a9e418a13d5cfd99a019a0b48425640c5ee5cda6967b336e03f6ba"} err="failed to get container status \"2eee2d3078a9e418a13d5cfd99a019a0b48425640c5ee5cda6967b336e03f6ba\": rpc error: code = NotFound desc = could not find container \"2eee2d3078a9e418a13d5cfd99a019a0b48425640c5ee5cda6967b336e03f6ba\": container with ID starting with 2eee2d3078a9e418a13d5cfd99a019a0b48425640c5ee5cda6967b336e03f6ba not found: ID does not exist" Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.953749 4933 scope.go:117] "RemoveContainer" containerID="e09687cc22359038409d7bd27ce930a095bc3fdf00226789e4ee55c18370b0df" Jan 22 08:21:23 crc kubenswrapper[4933]: E0122 08:21:23.954982 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e09687cc22359038409d7bd27ce930a095bc3fdf00226789e4ee55c18370b0df\": container with ID starting with e09687cc22359038409d7bd27ce930a095bc3fdf00226789e4ee55c18370b0df not found: ID does not exist" containerID="e09687cc22359038409d7bd27ce930a095bc3fdf00226789e4ee55c18370b0df" Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.955014 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e09687cc22359038409d7bd27ce930a095bc3fdf00226789e4ee55c18370b0df"} err="failed to get container status \"e09687cc22359038409d7bd27ce930a095bc3fdf00226789e4ee55c18370b0df\": rpc error: code = NotFound desc = could not find container \"e09687cc22359038409d7bd27ce930a095bc3fdf00226789e4ee55c18370b0df\": container with ID starting with e09687cc22359038409d7bd27ce930a095bc3fdf00226789e4ee55c18370b0df not found: ID does not exist" Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.955036 4933 scope.go:117] "RemoveContainer" containerID="7d2afd1a503ed191589929cea40723b0cf297c5f8cafb8c70864c5f7e69bdaa1" Jan 22 08:21:23 crc kubenswrapper[4933]: E0122 08:21:23.956115 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d2afd1a503ed191589929cea40723b0cf297c5f8cafb8c70864c5f7e69bdaa1\": container with ID starting with 7d2afd1a503ed191589929cea40723b0cf297c5f8cafb8c70864c5f7e69bdaa1 not found: ID does not exist" containerID="7d2afd1a503ed191589929cea40723b0cf297c5f8cafb8c70864c5f7e69bdaa1" Jan 22 08:21:23 crc kubenswrapper[4933]: I0122 08:21:23.956155 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d2afd1a503ed191589929cea40723b0cf297c5f8cafb8c70864c5f7e69bdaa1"} err="failed to get container status \"7d2afd1a503ed191589929cea40723b0cf297c5f8cafb8c70864c5f7e69bdaa1\": rpc error: code = NotFound desc = could not find container \"7d2afd1a503ed191589929cea40723b0cf297c5f8cafb8c70864c5f7e69bdaa1\": container with ID starting with 7d2afd1a503ed191589929cea40723b0cf297c5f8cafb8c70864c5f7e69bdaa1 not found: ID does not exist" Jan 22 08:21:24 crc kubenswrapper[4933]: I0122 08:21:24.516174 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1" path="/var/lib/kubelet/pods/ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1/volumes" Jan 22 08:21:40 crc kubenswrapper[4933]: I0122 08:21:40.943486 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:21:40 crc kubenswrapper[4933]: I0122 08:21:40.943904 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:22:10 crc kubenswrapper[4933]: I0122 08:22:10.943627 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:22:10 crc kubenswrapper[4933]: I0122 08:22:10.944177 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:22:31 crc kubenswrapper[4933]: I0122 08:22:31.634819 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hrfjq"] Jan 22 08:22:31 crc kubenswrapper[4933]: E0122 08:22:31.636143 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1" containerName="extract-utilities" Jan 22 08:22:31 crc kubenswrapper[4933]: I0122 08:22:31.636164 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1" containerName="extract-utilities" Jan 22 08:22:31 crc kubenswrapper[4933]: E0122 08:22:31.636180 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1" containerName="extract-content" Jan 22 08:22:31 crc kubenswrapper[4933]: I0122 08:22:31.636189 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1" containerName="extract-content" Jan 22 08:22:31 crc kubenswrapper[4933]: E0122 08:22:31.636224 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1" containerName="registry-server" Jan 22 08:22:31 crc kubenswrapper[4933]: I0122 08:22:31.636233 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1" containerName="registry-server" Jan 22 08:22:31 crc kubenswrapper[4933]: I0122 08:22:31.636528 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad0eab00-0a84-4fa1-a72e-aa3ef67d64f1" containerName="registry-server" Jan 22 08:22:31 crc kubenswrapper[4933]: I0122 08:22:31.638697 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hrfjq" Jan 22 08:22:31 crc kubenswrapper[4933]: I0122 08:22:31.649213 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hrfjq"] Jan 22 08:22:31 crc kubenswrapper[4933]: I0122 08:22:31.679593 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/864ad04f-b829-48ac-adc5-4a165e5de824-utilities\") pod \"redhat-operators-hrfjq\" (UID: \"864ad04f-b829-48ac-adc5-4a165e5de824\") " pod="openshift-marketplace/redhat-operators-hrfjq" Jan 22 08:22:31 crc kubenswrapper[4933]: I0122 08:22:31.679654 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/864ad04f-b829-48ac-adc5-4a165e5de824-catalog-content\") pod \"redhat-operators-hrfjq\" (UID: \"864ad04f-b829-48ac-adc5-4a165e5de824\") " pod="openshift-marketplace/redhat-operators-hrfjq" Jan 22 08:22:31 crc kubenswrapper[4933]: I0122 08:22:31.679696 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqj94\" (UniqueName: \"kubernetes.io/projected/864ad04f-b829-48ac-adc5-4a165e5de824-kube-api-access-zqj94\") pod \"redhat-operators-hrfjq\" (UID: \"864ad04f-b829-48ac-adc5-4a165e5de824\") " pod="openshift-marketplace/redhat-operators-hrfjq" Jan 22 08:22:31 crc kubenswrapper[4933]: I0122 08:22:31.781783 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/864ad04f-b829-48ac-adc5-4a165e5de824-utilities\") pod \"redhat-operators-hrfjq\" (UID: \"864ad04f-b829-48ac-adc5-4a165e5de824\") " pod="openshift-marketplace/redhat-operators-hrfjq" Jan 22 08:22:31 crc kubenswrapper[4933]: I0122 08:22:31.781849 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/864ad04f-b829-48ac-adc5-4a165e5de824-catalog-content\") pod \"redhat-operators-hrfjq\" (UID: \"864ad04f-b829-48ac-adc5-4a165e5de824\") " pod="openshift-marketplace/redhat-operators-hrfjq" Jan 22 08:22:31 crc kubenswrapper[4933]: I0122 08:22:31.781892 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqj94\" (UniqueName: \"kubernetes.io/projected/864ad04f-b829-48ac-adc5-4a165e5de824-kube-api-access-zqj94\") pod \"redhat-operators-hrfjq\" (UID: \"864ad04f-b829-48ac-adc5-4a165e5de824\") " pod="openshift-marketplace/redhat-operators-hrfjq" Jan 22 08:22:31 crc kubenswrapper[4933]: I0122 08:22:31.782488 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/864ad04f-b829-48ac-adc5-4a165e5de824-utilities\") pod \"redhat-operators-hrfjq\" (UID: \"864ad04f-b829-48ac-adc5-4a165e5de824\") " pod="openshift-marketplace/redhat-operators-hrfjq" Jan 22 08:22:31 crc kubenswrapper[4933]: I0122 08:22:31.782616 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/864ad04f-b829-48ac-adc5-4a165e5de824-catalog-content\") pod \"redhat-operators-hrfjq\" (UID: \"864ad04f-b829-48ac-adc5-4a165e5de824\") " pod="openshift-marketplace/redhat-operators-hrfjq" Jan 22 08:22:31 crc kubenswrapper[4933]: I0122 08:22:31.806955 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqj94\" (UniqueName: \"kubernetes.io/projected/864ad04f-b829-48ac-adc5-4a165e5de824-kube-api-access-zqj94\") pod \"redhat-operators-hrfjq\" (UID: \"864ad04f-b829-48ac-adc5-4a165e5de824\") " pod="openshift-marketplace/redhat-operators-hrfjq" Jan 22 08:22:32 crc kubenswrapper[4933]: I0122 08:22:32.016864 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hrfjq" Jan 22 08:22:32 crc kubenswrapper[4933]: I0122 08:22:32.515440 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hrfjq"] Jan 22 08:22:32 crc kubenswrapper[4933]: I0122 08:22:32.742700 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hrfjq" event={"ID":"864ad04f-b829-48ac-adc5-4a165e5de824","Type":"ContainerStarted","Data":"6bf76a7b73b6b1607f1845a6cd61459b24af797d6244526ffef43f9ca64983a0"} Jan 22 08:22:32 crc kubenswrapper[4933]: I0122 08:22:32.742753 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hrfjq" event={"ID":"864ad04f-b829-48ac-adc5-4a165e5de824","Type":"ContainerStarted","Data":"8b4554845529e9a7cba40e7b2be093aa4cc5003e0dbfc72789c4e5697b75d9b7"} Jan 22 08:22:33 crc kubenswrapper[4933]: I0122 08:22:33.755915 4933 generic.go:334] "Generic (PLEG): container finished" podID="864ad04f-b829-48ac-adc5-4a165e5de824" containerID="6bf76a7b73b6b1607f1845a6cd61459b24af797d6244526ffef43f9ca64983a0" exitCode=0 Jan 22 08:22:33 crc kubenswrapper[4933]: I0122 08:22:33.756005 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hrfjq" event={"ID":"864ad04f-b829-48ac-adc5-4a165e5de824","Type":"ContainerDied","Data":"6bf76a7b73b6b1607f1845a6cd61459b24af797d6244526ffef43f9ca64983a0"} Jan 22 08:22:35 crc kubenswrapper[4933]: I0122 08:22:35.804030 4933 generic.go:334] "Generic (PLEG): container finished" podID="864ad04f-b829-48ac-adc5-4a165e5de824" containerID="00552a89f5184c47e26675384173a1814bacd960ac7c059c085c5c0d962abed0" exitCode=0 Jan 22 08:22:35 crc kubenswrapper[4933]: I0122 08:22:35.804804 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hrfjq" event={"ID":"864ad04f-b829-48ac-adc5-4a165e5de824","Type":"ContainerDied","Data":"00552a89f5184c47e26675384173a1814bacd960ac7c059c085c5c0d962abed0"} Jan 22 08:22:37 crc kubenswrapper[4933]: I0122 08:22:37.831927 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hrfjq" event={"ID":"864ad04f-b829-48ac-adc5-4a165e5de824","Type":"ContainerStarted","Data":"52b81b131272fb29de624347a49cf204c61732842b9bfb01313f5b487dbe5c05"} Jan 22 08:22:37 crc kubenswrapper[4933]: I0122 08:22:37.866498 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hrfjq" podStartSLOduration=3.951928585 podStartE2EDuration="6.866481676s" podCreationTimestamp="2026-01-22 08:22:31 +0000 UTC" firstStartedPulling="2026-01-22 08:22:33.758787882 +0000 UTC m=+9401.595913245" lastFinishedPulling="2026-01-22 08:22:36.673340973 +0000 UTC m=+9404.510466336" observedRunningTime="2026-01-22 08:22:37.858592273 +0000 UTC m=+9405.695717626" watchObservedRunningTime="2026-01-22 08:22:37.866481676 +0000 UTC m=+9405.703607029" Jan 22 08:22:40 crc kubenswrapper[4933]: I0122 08:22:40.943276 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:22:40 crc kubenswrapper[4933]: I0122 08:22:40.943620 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:22:40 crc kubenswrapper[4933]: I0122 08:22:40.943670 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 08:22:40 crc kubenswrapper[4933]: I0122 08:22:40.944553 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c98d04fa56075612dc036d5b10973aa3b26ebc3beeae287b19656f51b500424d"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 08:22:40 crc kubenswrapper[4933]: I0122 08:22:40.944616 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://c98d04fa56075612dc036d5b10973aa3b26ebc3beeae287b19656f51b500424d" gracePeriod=600 Jan 22 08:22:42 crc kubenswrapper[4933]: I0122 08:22:42.018037 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hrfjq" Jan 22 08:22:42 crc kubenswrapper[4933]: I0122 08:22:42.018412 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hrfjq" Jan 22 08:22:42 crc kubenswrapper[4933]: I0122 08:22:42.890399 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="c98d04fa56075612dc036d5b10973aa3b26ebc3beeae287b19656f51b500424d" exitCode=0 Jan 22 08:22:42 crc kubenswrapper[4933]: I0122 08:22:42.890475 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"c98d04fa56075612dc036d5b10973aa3b26ebc3beeae287b19656f51b500424d"} Jan 22 08:22:42 crc kubenswrapper[4933]: I0122 08:22:42.890816 4933 scope.go:117] "RemoveContainer" containerID="6fae462cde8930b9f721583df9f743ddcf60826d446fdfdfcfc9fe9adf4d01a4" Jan 22 08:22:43 crc kubenswrapper[4933]: I0122 08:22:43.267544 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hrfjq" podUID="864ad04f-b829-48ac-adc5-4a165e5de824" containerName="registry-server" probeResult="failure" output=< Jan 22 08:22:43 crc kubenswrapper[4933]: timeout: failed to connect service ":50051" within 1s Jan 22 08:22:43 crc kubenswrapper[4933]: > Jan 22 08:22:44 crc kubenswrapper[4933]: I0122 08:22:44.931013 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e"} Jan 22 08:22:52 crc kubenswrapper[4933]: I0122 08:22:52.092970 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hrfjq" Jan 22 08:22:52 crc kubenswrapper[4933]: I0122 08:22:52.163749 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hrfjq" Jan 22 08:22:52 crc kubenswrapper[4933]: I0122 08:22:52.333484 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hrfjq"] Jan 22 08:22:54 crc kubenswrapper[4933]: I0122 08:22:54.027662 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-hrfjq" podUID="864ad04f-b829-48ac-adc5-4a165e5de824" containerName="registry-server" containerID="cri-o://52b81b131272fb29de624347a49cf204c61732842b9bfb01313f5b487dbe5c05" gracePeriod=2 Jan 22 08:22:54 crc kubenswrapper[4933]: I0122 08:22:54.610901 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hrfjq" Jan 22 08:22:54 crc kubenswrapper[4933]: I0122 08:22:54.746105 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zqj94\" (UniqueName: \"kubernetes.io/projected/864ad04f-b829-48ac-adc5-4a165e5de824-kube-api-access-zqj94\") pod \"864ad04f-b829-48ac-adc5-4a165e5de824\" (UID: \"864ad04f-b829-48ac-adc5-4a165e5de824\") " Jan 22 08:22:54 crc kubenswrapper[4933]: I0122 08:22:54.746347 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/864ad04f-b829-48ac-adc5-4a165e5de824-catalog-content\") pod \"864ad04f-b829-48ac-adc5-4a165e5de824\" (UID: \"864ad04f-b829-48ac-adc5-4a165e5de824\") " Jan 22 08:22:54 crc kubenswrapper[4933]: I0122 08:22:54.746409 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/864ad04f-b829-48ac-adc5-4a165e5de824-utilities\") pod \"864ad04f-b829-48ac-adc5-4a165e5de824\" (UID: \"864ad04f-b829-48ac-adc5-4a165e5de824\") " Jan 22 08:22:54 crc kubenswrapper[4933]: I0122 08:22:54.747629 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/864ad04f-b829-48ac-adc5-4a165e5de824-utilities" (OuterVolumeSpecName: "utilities") pod "864ad04f-b829-48ac-adc5-4a165e5de824" (UID: "864ad04f-b829-48ac-adc5-4a165e5de824"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:22:54 crc kubenswrapper[4933]: I0122 08:22:54.756584 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/864ad04f-b829-48ac-adc5-4a165e5de824-kube-api-access-zqj94" (OuterVolumeSpecName: "kube-api-access-zqj94") pod "864ad04f-b829-48ac-adc5-4a165e5de824" (UID: "864ad04f-b829-48ac-adc5-4a165e5de824"). InnerVolumeSpecName "kube-api-access-zqj94". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:22:54 crc kubenswrapper[4933]: I0122 08:22:54.849594 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zqj94\" (UniqueName: \"kubernetes.io/projected/864ad04f-b829-48ac-adc5-4a165e5de824-kube-api-access-zqj94\") on node \"crc\" DevicePath \"\"" Jan 22 08:22:54 crc kubenswrapper[4933]: I0122 08:22:54.849626 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/864ad04f-b829-48ac-adc5-4a165e5de824-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 08:22:54 crc kubenswrapper[4933]: I0122 08:22:54.920725 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/864ad04f-b829-48ac-adc5-4a165e5de824-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "864ad04f-b829-48ac-adc5-4a165e5de824" (UID: "864ad04f-b829-48ac-adc5-4a165e5de824"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:22:54 crc kubenswrapper[4933]: I0122 08:22:54.951087 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/864ad04f-b829-48ac-adc5-4a165e5de824-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 08:22:55 crc kubenswrapper[4933]: I0122 08:22:55.041263 4933 generic.go:334] "Generic (PLEG): container finished" podID="864ad04f-b829-48ac-adc5-4a165e5de824" containerID="52b81b131272fb29de624347a49cf204c61732842b9bfb01313f5b487dbe5c05" exitCode=0 Jan 22 08:22:55 crc kubenswrapper[4933]: I0122 08:22:55.041311 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hrfjq" event={"ID":"864ad04f-b829-48ac-adc5-4a165e5de824","Type":"ContainerDied","Data":"52b81b131272fb29de624347a49cf204c61732842b9bfb01313f5b487dbe5c05"} Jan 22 08:22:55 crc kubenswrapper[4933]: I0122 08:22:55.041358 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hrfjq" event={"ID":"864ad04f-b829-48ac-adc5-4a165e5de824","Type":"ContainerDied","Data":"8b4554845529e9a7cba40e7b2be093aa4cc5003e0dbfc72789c4e5697b75d9b7"} Jan 22 08:22:55 crc kubenswrapper[4933]: I0122 08:22:55.041375 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hrfjq" Jan 22 08:22:55 crc kubenswrapper[4933]: I0122 08:22:55.041388 4933 scope.go:117] "RemoveContainer" containerID="52b81b131272fb29de624347a49cf204c61732842b9bfb01313f5b487dbe5c05" Jan 22 08:22:55 crc kubenswrapper[4933]: I0122 08:22:55.076926 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hrfjq"] Jan 22 08:22:55 crc kubenswrapper[4933]: I0122 08:22:55.078187 4933 scope.go:117] "RemoveContainer" containerID="00552a89f5184c47e26675384173a1814bacd960ac7c059c085c5c0d962abed0" Jan 22 08:22:55 crc kubenswrapper[4933]: I0122 08:22:55.085185 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-hrfjq"] Jan 22 08:22:55 crc kubenswrapper[4933]: I0122 08:22:55.107986 4933 scope.go:117] "RemoveContainer" containerID="6bf76a7b73b6b1607f1845a6cd61459b24af797d6244526ffef43f9ca64983a0" Jan 22 08:22:55 crc kubenswrapper[4933]: I0122 08:22:55.157980 4933 scope.go:117] "RemoveContainer" containerID="52b81b131272fb29de624347a49cf204c61732842b9bfb01313f5b487dbe5c05" Jan 22 08:22:55 crc kubenswrapper[4933]: E0122 08:22:55.159334 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52b81b131272fb29de624347a49cf204c61732842b9bfb01313f5b487dbe5c05\": container with ID starting with 52b81b131272fb29de624347a49cf204c61732842b9bfb01313f5b487dbe5c05 not found: ID does not exist" containerID="52b81b131272fb29de624347a49cf204c61732842b9bfb01313f5b487dbe5c05" Jan 22 08:22:55 crc kubenswrapper[4933]: I0122 08:22:55.159398 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52b81b131272fb29de624347a49cf204c61732842b9bfb01313f5b487dbe5c05"} err="failed to get container status \"52b81b131272fb29de624347a49cf204c61732842b9bfb01313f5b487dbe5c05\": rpc error: code = NotFound desc = could not find container \"52b81b131272fb29de624347a49cf204c61732842b9bfb01313f5b487dbe5c05\": container with ID starting with 52b81b131272fb29de624347a49cf204c61732842b9bfb01313f5b487dbe5c05 not found: ID does not exist" Jan 22 08:22:55 crc kubenswrapper[4933]: I0122 08:22:55.159433 4933 scope.go:117] "RemoveContainer" containerID="00552a89f5184c47e26675384173a1814bacd960ac7c059c085c5c0d962abed0" Jan 22 08:22:55 crc kubenswrapper[4933]: E0122 08:22:55.159907 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00552a89f5184c47e26675384173a1814bacd960ac7c059c085c5c0d962abed0\": container with ID starting with 00552a89f5184c47e26675384173a1814bacd960ac7c059c085c5c0d962abed0 not found: ID does not exist" containerID="00552a89f5184c47e26675384173a1814bacd960ac7c059c085c5c0d962abed0" Jan 22 08:22:55 crc kubenswrapper[4933]: I0122 08:22:55.159962 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00552a89f5184c47e26675384173a1814bacd960ac7c059c085c5c0d962abed0"} err="failed to get container status \"00552a89f5184c47e26675384173a1814bacd960ac7c059c085c5c0d962abed0\": rpc error: code = NotFound desc = could not find container \"00552a89f5184c47e26675384173a1814bacd960ac7c059c085c5c0d962abed0\": container with ID starting with 00552a89f5184c47e26675384173a1814bacd960ac7c059c085c5c0d962abed0 not found: ID does not exist" Jan 22 08:22:55 crc kubenswrapper[4933]: I0122 08:22:55.159991 4933 scope.go:117] "RemoveContainer" containerID="6bf76a7b73b6b1607f1845a6cd61459b24af797d6244526ffef43f9ca64983a0" Jan 22 08:22:55 crc kubenswrapper[4933]: E0122 08:22:55.160475 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6bf76a7b73b6b1607f1845a6cd61459b24af797d6244526ffef43f9ca64983a0\": container with ID starting with 6bf76a7b73b6b1607f1845a6cd61459b24af797d6244526ffef43f9ca64983a0 not found: ID does not exist" containerID="6bf76a7b73b6b1607f1845a6cd61459b24af797d6244526ffef43f9ca64983a0" Jan 22 08:22:55 crc kubenswrapper[4933]: I0122 08:22:55.160517 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6bf76a7b73b6b1607f1845a6cd61459b24af797d6244526ffef43f9ca64983a0"} err="failed to get container status \"6bf76a7b73b6b1607f1845a6cd61459b24af797d6244526ffef43f9ca64983a0\": rpc error: code = NotFound desc = could not find container \"6bf76a7b73b6b1607f1845a6cd61459b24af797d6244526ffef43f9ca64983a0\": container with ID starting with 6bf76a7b73b6b1607f1845a6cd61459b24af797d6244526ffef43f9ca64983a0 not found: ID does not exist" Jan 22 08:22:56 crc kubenswrapper[4933]: I0122 08:22:56.501183 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="864ad04f-b829-48ac-adc5-4a165e5de824" path="/var/lib/kubelet/pods/864ad04f-b829-48ac-adc5-4a165e5de824/volumes" Jan 22 08:23:58 crc kubenswrapper[4933]: I0122 08:23:58.434123 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5ljdm"] Jan 22 08:23:58 crc kubenswrapper[4933]: E0122 08:23:58.435088 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="864ad04f-b829-48ac-adc5-4a165e5de824" containerName="registry-server" Jan 22 08:23:58 crc kubenswrapper[4933]: I0122 08:23:58.435100 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="864ad04f-b829-48ac-adc5-4a165e5de824" containerName="registry-server" Jan 22 08:23:58 crc kubenswrapper[4933]: E0122 08:23:58.435131 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="864ad04f-b829-48ac-adc5-4a165e5de824" containerName="extract-utilities" Jan 22 08:23:58 crc kubenswrapper[4933]: I0122 08:23:58.435138 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="864ad04f-b829-48ac-adc5-4a165e5de824" containerName="extract-utilities" Jan 22 08:23:58 crc kubenswrapper[4933]: E0122 08:23:58.435152 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="864ad04f-b829-48ac-adc5-4a165e5de824" containerName="extract-content" Jan 22 08:23:58 crc kubenswrapper[4933]: I0122 08:23:58.435159 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="864ad04f-b829-48ac-adc5-4a165e5de824" containerName="extract-content" Jan 22 08:23:58 crc kubenswrapper[4933]: I0122 08:23:58.435394 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="864ad04f-b829-48ac-adc5-4a165e5de824" containerName="registry-server" Jan 22 08:23:58 crc kubenswrapper[4933]: I0122 08:23:58.436849 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5ljdm" Jan 22 08:23:58 crc kubenswrapper[4933]: I0122 08:23:58.452327 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5ljdm"] Jan 22 08:23:58 crc kubenswrapper[4933]: I0122 08:23:58.457935 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d12f93c-a191-4a74-b4db-ae7f082eee97-utilities\") pod \"community-operators-5ljdm\" (UID: \"8d12f93c-a191-4a74-b4db-ae7f082eee97\") " pod="openshift-marketplace/community-operators-5ljdm" Jan 22 08:23:58 crc kubenswrapper[4933]: I0122 08:23:58.458414 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d12f93c-a191-4a74-b4db-ae7f082eee97-catalog-content\") pod \"community-operators-5ljdm\" (UID: \"8d12f93c-a191-4a74-b4db-ae7f082eee97\") " pod="openshift-marketplace/community-operators-5ljdm" Jan 22 08:23:58 crc kubenswrapper[4933]: I0122 08:23:58.458482 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdr2z\" (UniqueName: \"kubernetes.io/projected/8d12f93c-a191-4a74-b4db-ae7f082eee97-kube-api-access-vdr2z\") pod \"community-operators-5ljdm\" (UID: \"8d12f93c-a191-4a74-b4db-ae7f082eee97\") " pod="openshift-marketplace/community-operators-5ljdm" Jan 22 08:23:58 crc kubenswrapper[4933]: I0122 08:23:58.560724 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d12f93c-a191-4a74-b4db-ae7f082eee97-utilities\") pod \"community-operators-5ljdm\" (UID: \"8d12f93c-a191-4a74-b4db-ae7f082eee97\") " pod="openshift-marketplace/community-operators-5ljdm" Jan 22 08:23:58 crc kubenswrapper[4933]: I0122 08:23:58.560919 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d12f93c-a191-4a74-b4db-ae7f082eee97-catalog-content\") pod \"community-operators-5ljdm\" (UID: \"8d12f93c-a191-4a74-b4db-ae7f082eee97\") " pod="openshift-marketplace/community-operators-5ljdm" Jan 22 08:23:58 crc kubenswrapper[4933]: I0122 08:23:58.560940 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdr2z\" (UniqueName: \"kubernetes.io/projected/8d12f93c-a191-4a74-b4db-ae7f082eee97-kube-api-access-vdr2z\") pod \"community-operators-5ljdm\" (UID: \"8d12f93c-a191-4a74-b4db-ae7f082eee97\") " pod="openshift-marketplace/community-operators-5ljdm" Jan 22 08:23:58 crc kubenswrapper[4933]: I0122 08:23:58.564064 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d12f93c-a191-4a74-b4db-ae7f082eee97-utilities\") pod \"community-operators-5ljdm\" (UID: \"8d12f93c-a191-4a74-b4db-ae7f082eee97\") " pod="openshift-marketplace/community-operators-5ljdm" Jan 22 08:23:58 crc kubenswrapper[4933]: I0122 08:23:58.564164 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d12f93c-a191-4a74-b4db-ae7f082eee97-catalog-content\") pod \"community-operators-5ljdm\" (UID: \"8d12f93c-a191-4a74-b4db-ae7f082eee97\") " pod="openshift-marketplace/community-operators-5ljdm" Jan 22 08:23:59 crc kubenswrapper[4933]: I0122 08:23:59.002471 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdr2z\" (UniqueName: \"kubernetes.io/projected/8d12f93c-a191-4a74-b4db-ae7f082eee97-kube-api-access-vdr2z\") pod \"community-operators-5ljdm\" (UID: \"8d12f93c-a191-4a74-b4db-ae7f082eee97\") " pod="openshift-marketplace/community-operators-5ljdm" Jan 22 08:23:59 crc kubenswrapper[4933]: I0122 08:23:59.063476 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5ljdm" Jan 22 08:23:59 crc kubenswrapper[4933]: I0122 08:23:59.552594 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5ljdm"] Jan 22 08:23:59 crc kubenswrapper[4933]: I0122 08:23:59.738443 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5ljdm" event={"ID":"8d12f93c-a191-4a74-b4db-ae7f082eee97","Type":"ContainerStarted","Data":"3358a417fcbd5e27c2035db7e3912757a87f498965ace3180603fc1d82440fc2"} Jan 22 08:24:00 crc kubenswrapper[4933]: I0122 08:24:00.756167 4933 generic.go:334] "Generic (PLEG): container finished" podID="8d12f93c-a191-4a74-b4db-ae7f082eee97" containerID="5ff418c9e3d0443fd63eb7667163ed3472c6c373fd76342e9f9fd7432204ff94" exitCode=0 Jan 22 08:24:00 crc kubenswrapper[4933]: I0122 08:24:00.756242 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5ljdm" event={"ID":"8d12f93c-a191-4a74-b4db-ae7f082eee97","Type":"ContainerDied","Data":"5ff418c9e3d0443fd63eb7667163ed3472c6c373fd76342e9f9fd7432204ff94"} Jan 22 08:24:01 crc kubenswrapper[4933]: I0122 08:24:01.768534 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5ljdm" event={"ID":"8d12f93c-a191-4a74-b4db-ae7f082eee97","Type":"ContainerStarted","Data":"c4591b164ce61274fc6636ec609372751dc82d91a24e4c0e9a5cc09d3f0fdcc3"} Jan 22 08:24:02 crc kubenswrapper[4933]: I0122 08:24:02.788486 4933 generic.go:334] "Generic (PLEG): container finished" podID="8d12f93c-a191-4a74-b4db-ae7f082eee97" containerID="c4591b164ce61274fc6636ec609372751dc82d91a24e4c0e9a5cc09d3f0fdcc3" exitCode=0 Jan 22 08:24:02 crc kubenswrapper[4933]: I0122 08:24:02.788545 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5ljdm" event={"ID":"8d12f93c-a191-4a74-b4db-ae7f082eee97","Type":"ContainerDied","Data":"c4591b164ce61274fc6636ec609372751dc82d91a24e4c0e9a5cc09d3f0fdcc3"} Jan 22 08:24:03 crc kubenswrapper[4933]: I0122 08:24:03.802570 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5ljdm" event={"ID":"8d12f93c-a191-4a74-b4db-ae7f082eee97","Type":"ContainerStarted","Data":"77bdbf514c8b9ad0c853e8d20b318321b687c41fac781ccb4dfa910cb3e643b5"} Jan 22 08:24:03 crc kubenswrapper[4933]: I0122 08:24:03.834641 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5ljdm" podStartSLOduration=3.2735739 podStartE2EDuration="5.834618894s" podCreationTimestamp="2026-01-22 08:23:58 +0000 UTC" firstStartedPulling="2026-01-22 08:24:00.759392786 +0000 UTC m=+9488.596518169" lastFinishedPulling="2026-01-22 08:24:03.32043781 +0000 UTC m=+9491.157563163" observedRunningTime="2026-01-22 08:24:03.821620737 +0000 UTC m=+9491.658746110" watchObservedRunningTime="2026-01-22 08:24:03.834618894 +0000 UTC m=+9491.671744257" Jan 22 08:24:09 crc kubenswrapper[4933]: I0122 08:24:09.064004 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5ljdm" Jan 22 08:24:09 crc kubenswrapper[4933]: I0122 08:24:09.064809 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5ljdm" Jan 22 08:24:09 crc kubenswrapper[4933]: I0122 08:24:09.768044 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5ljdm" Jan 22 08:24:09 crc kubenswrapper[4933]: I0122 08:24:09.925422 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5ljdm" Jan 22 08:24:10 crc kubenswrapper[4933]: I0122 08:24:10.010293 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5ljdm"] Jan 22 08:24:11 crc kubenswrapper[4933]: I0122 08:24:11.899607 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5ljdm" podUID="8d12f93c-a191-4a74-b4db-ae7f082eee97" containerName="registry-server" containerID="cri-o://77bdbf514c8b9ad0c853e8d20b318321b687c41fac781ccb4dfa910cb3e643b5" gracePeriod=2 Jan 22 08:24:12 crc kubenswrapper[4933]: I0122 08:24:12.922051 4933 generic.go:334] "Generic (PLEG): container finished" podID="8d12f93c-a191-4a74-b4db-ae7f082eee97" containerID="77bdbf514c8b9ad0c853e8d20b318321b687c41fac781ccb4dfa910cb3e643b5" exitCode=0 Jan 22 08:24:12 crc kubenswrapper[4933]: I0122 08:24:12.922222 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5ljdm" event={"ID":"8d12f93c-a191-4a74-b4db-ae7f082eee97","Type":"ContainerDied","Data":"77bdbf514c8b9ad0c853e8d20b318321b687c41fac781ccb4dfa910cb3e643b5"} Jan 22 08:24:13 crc kubenswrapper[4933]: I0122 08:24:13.094035 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5ljdm" Jan 22 08:24:13 crc kubenswrapper[4933]: I0122 08:24:13.144326 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d12f93c-a191-4a74-b4db-ae7f082eee97-catalog-content\") pod \"8d12f93c-a191-4a74-b4db-ae7f082eee97\" (UID: \"8d12f93c-a191-4a74-b4db-ae7f082eee97\") " Jan 22 08:24:13 crc kubenswrapper[4933]: I0122 08:24:13.144468 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vdr2z\" (UniqueName: \"kubernetes.io/projected/8d12f93c-a191-4a74-b4db-ae7f082eee97-kube-api-access-vdr2z\") pod \"8d12f93c-a191-4a74-b4db-ae7f082eee97\" (UID: \"8d12f93c-a191-4a74-b4db-ae7f082eee97\") " Jan 22 08:24:13 crc kubenswrapper[4933]: I0122 08:24:13.144597 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d12f93c-a191-4a74-b4db-ae7f082eee97-utilities\") pod \"8d12f93c-a191-4a74-b4db-ae7f082eee97\" (UID: \"8d12f93c-a191-4a74-b4db-ae7f082eee97\") " Jan 22 08:24:13 crc kubenswrapper[4933]: I0122 08:24:13.151619 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d12f93c-a191-4a74-b4db-ae7f082eee97-utilities" (OuterVolumeSpecName: "utilities") pod "8d12f93c-a191-4a74-b4db-ae7f082eee97" (UID: "8d12f93c-a191-4a74-b4db-ae7f082eee97"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:24:13 crc kubenswrapper[4933]: I0122 08:24:13.155115 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d12f93c-a191-4a74-b4db-ae7f082eee97-kube-api-access-vdr2z" (OuterVolumeSpecName: "kube-api-access-vdr2z") pod "8d12f93c-a191-4a74-b4db-ae7f082eee97" (UID: "8d12f93c-a191-4a74-b4db-ae7f082eee97"). InnerVolumeSpecName "kube-api-access-vdr2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:24:13 crc kubenswrapper[4933]: I0122 08:24:13.207630 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d12f93c-a191-4a74-b4db-ae7f082eee97-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8d12f93c-a191-4a74-b4db-ae7f082eee97" (UID: "8d12f93c-a191-4a74-b4db-ae7f082eee97"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:24:13 crc kubenswrapper[4933]: I0122 08:24:13.247324 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d12f93c-a191-4a74-b4db-ae7f082eee97-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 08:24:13 crc kubenswrapper[4933]: I0122 08:24:13.247357 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vdr2z\" (UniqueName: \"kubernetes.io/projected/8d12f93c-a191-4a74-b4db-ae7f082eee97-kube-api-access-vdr2z\") on node \"crc\" DevicePath \"\"" Jan 22 08:24:13 crc kubenswrapper[4933]: I0122 08:24:13.247370 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d12f93c-a191-4a74-b4db-ae7f082eee97-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 08:24:13 crc kubenswrapper[4933]: I0122 08:24:13.941064 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5ljdm" event={"ID":"8d12f93c-a191-4a74-b4db-ae7f082eee97","Type":"ContainerDied","Data":"3358a417fcbd5e27c2035db7e3912757a87f498965ace3180603fc1d82440fc2"} Jan 22 08:24:13 crc kubenswrapper[4933]: I0122 08:24:13.941172 4933 scope.go:117] "RemoveContainer" containerID="77bdbf514c8b9ad0c853e8d20b318321b687c41fac781ccb4dfa910cb3e643b5" Jan 22 08:24:13 crc kubenswrapper[4933]: I0122 08:24:13.941346 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5ljdm" Jan 22 08:24:13 crc kubenswrapper[4933]: I0122 08:24:13.979319 4933 scope.go:117] "RemoveContainer" containerID="c4591b164ce61274fc6636ec609372751dc82d91a24e4c0e9a5cc09d3f0fdcc3" Jan 22 08:24:14 crc kubenswrapper[4933]: I0122 08:24:14.017648 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5ljdm"] Jan 22 08:24:14 crc kubenswrapper[4933]: I0122 08:24:14.023820 4933 scope.go:117] "RemoveContainer" containerID="5ff418c9e3d0443fd63eb7667163ed3472c6c373fd76342e9f9fd7432204ff94" Jan 22 08:24:14 crc kubenswrapper[4933]: I0122 08:24:14.030771 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5ljdm"] Jan 22 08:24:14 crc kubenswrapper[4933]: I0122 08:24:14.514041 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d12f93c-a191-4a74-b4db-ae7f082eee97" path="/var/lib/kubelet/pods/8d12f93c-a191-4a74-b4db-ae7f082eee97/volumes" Jan 22 08:24:21 crc kubenswrapper[4933]: I0122 08:24:21.149312 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-48tlb" podUID="beeaabac-5adb-4389-a41d-fcd84b8b7259" containerName="hostpath-provisioner" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 08:25:10 crc kubenswrapper[4933]: I0122 08:25:10.943776 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:25:10 crc kubenswrapper[4933]: I0122 08:25:10.944258 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:25:40 crc kubenswrapper[4933]: I0122 08:25:40.943359 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:25:40 crc kubenswrapper[4933]: I0122 08:25:40.944325 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:26:10 crc kubenswrapper[4933]: I0122 08:26:10.943032 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:26:10 crc kubenswrapper[4933]: I0122 08:26:10.943521 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:26:10 crc kubenswrapper[4933]: I0122 08:26:10.943568 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 08:26:10 crc kubenswrapper[4933]: I0122 08:26:10.944153 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 08:26:10 crc kubenswrapper[4933]: I0122 08:26:10.944217 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" gracePeriod=600 Jan 22 08:26:11 crc kubenswrapper[4933]: E0122 08:26:11.101018 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:26:11 crc kubenswrapper[4933]: I0122 08:26:11.504226 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" exitCode=0 Jan 22 08:26:11 crc kubenswrapper[4933]: I0122 08:26:11.504297 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e"} Jan 22 08:26:11 crc kubenswrapper[4933]: I0122 08:26:11.504341 4933 scope.go:117] "RemoveContainer" containerID="c98d04fa56075612dc036d5b10973aa3b26ebc3beeae287b19656f51b500424d" Jan 22 08:26:11 crc kubenswrapper[4933]: I0122 08:26:11.505373 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:26:11 crc kubenswrapper[4933]: E0122 08:26:11.505958 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:26:25 crc kubenswrapper[4933]: I0122 08:26:25.491373 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:26:25 crc kubenswrapper[4933]: E0122 08:26:25.494775 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:26:36 crc kubenswrapper[4933]: I0122 08:26:36.491703 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:26:36 crc kubenswrapper[4933]: E0122 08:26:36.492575 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:26:50 crc kubenswrapper[4933]: I0122 08:26:50.490757 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:26:50 crc kubenswrapper[4933]: E0122 08:26:50.491534 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:27:04 crc kubenswrapper[4933]: I0122 08:27:04.491992 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:27:04 crc kubenswrapper[4933]: E0122 08:27:04.493186 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:27:15 crc kubenswrapper[4933]: I0122 08:27:15.491373 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:27:15 crc kubenswrapper[4933]: E0122 08:27:15.493024 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:27:28 crc kubenswrapper[4933]: I0122 08:27:28.491111 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:27:28 crc kubenswrapper[4933]: E0122 08:27:28.491887 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:27:43 crc kubenswrapper[4933]: I0122 08:27:43.492256 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:27:43 crc kubenswrapper[4933]: E0122 08:27:43.492918 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:27:57 crc kubenswrapper[4933]: I0122 08:27:57.491488 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:27:57 crc kubenswrapper[4933]: E0122 08:27:57.492583 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:28:11 crc kubenswrapper[4933]: I0122 08:28:11.492412 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:28:11 crc kubenswrapper[4933]: E0122 08:28:11.493718 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:28:22 crc kubenswrapper[4933]: I0122 08:28:22.500890 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:28:22 crc kubenswrapper[4933]: E0122 08:28:22.502000 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:28:35 crc kubenswrapper[4933]: I0122 08:28:35.494545 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:28:35 crc kubenswrapper[4933]: E0122 08:28:35.495836 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:28:48 crc kubenswrapper[4933]: I0122 08:28:48.491809 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:28:48 crc kubenswrapper[4933]: E0122 08:28:48.492862 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:29:01 crc kubenswrapper[4933]: I0122 08:29:01.491998 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:29:01 crc kubenswrapper[4933]: E0122 08:29:01.493037 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:29:14 crc kubenswrapper[4933]: I0122 08:29:14.491879 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:29:14 crc kubenswrapper[4933]: E0122 08:29:14.493338 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:29:25 crc kubenswrapper[4933]: I0122 08:29:25.492125 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:29:25 crc kubenswrapper[4933]: E0122 08:29:25.493584 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:29:38 crc kubenswrapper[4933]: I0122 08:29:38.490865 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:29:38 crc kubenswrapper[4933]: E0122 08:29:38.491977 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:29:53 crc kubenswrapper[4933]: I0122 08:29:53.491758 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:29:53 crc kubenswrapper[4933]: E0122 08:29:53.492533 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.162834 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8"] Jan 22 08:30:00 crc kubenswrapper[4933]: E0122 08:30:00.164744 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d12f93c-a191-4a74-b4db-ae7f082eee97" containerName="extract-utilities" Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.164771 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d12f93c-a191-4a74-b4db-ae7f082eee97" containerName="extract-utilities" Jan 22 08:30:00 crc kubenswrapper[4933]: E0122 08:30:00.164810 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d12f93c-a191-4a74-b4db-ae7f082eee97" containerName="registry-server" Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.164822 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d12f93c-a191-4a74-b4db-ae7f082eee97" containerName="registry-server" Jan 22 08:30:00 crc kubenswrapper[4933]: E0122 08:30:00.164854 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d12f93c-a191-4a74-b4db-ae7f082eee97" containerName="extract-content" Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.164865 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d12f93c-a191-4a74-b4db-ae7f082eee97" containerName="extract-content" Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.165224 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d12f93c-a191-4a74-b4db-ae7f082eee97" containerName="registry-server" Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.166383 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8" Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.170866 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.171181 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.185233 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8"] Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.319728 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/59227e16-6884-4e1e-8e52-258f0dcdddd5-config-volume\") pod \"collect-profiles-29484510-5xgh8\" (UID: \"59227e16-6884-4e1e-8e52-258f0dcdddd5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8" Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.319805 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/59227e16-6884-4e1e-8e52-258f0dcdddd5-secret-volume\") pod \"collect-profiles-29484510-5xgh8\" (UID: \"59227e16-6884-4e1e-8e52-258f0dcdddd5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8" Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.319908 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ss7rb\" (UniqueName: \"kubernetes.io/projected/59227e16-6884-4e1e-8e52-258f0dcdddd5-kube-api-access-ss7rb\") pod \"collect-profiles-29484510-5xgh8\" (UID: \"59227e16-6884-4e1e-8e52-258f0dcdddd5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8" Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.422813 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ss7rb\" (UniqueName: \"kubernetes.io/projected/59227e16-6884-4e1e-8e52-258f0dcdddd5-kube-api-access-ss7rb\") pod \"collect-profiles-29484510-5xgh8\" (UID: \"59227e16-6884-4e1e-8e52-258f0dcdddd5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8" Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.423126 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/59227e16-6884-4e1e-8e52-258f0dcdddd5-config-volume\") pod \"collect-profiles-29484510-5xgh8\" (UID: \"59227e16-6884-4e1e-8e52-258f0dcdddd5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8" Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.423318 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/59227e16-6884-4e1e-8e52-258f0dcdddd5-secret-volume\") pod \"collect-profiles-29484510-5xgh8\" (UID: \"59227e16-6884-4e1e-8e52-258f0dcdddd5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8" Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.424197 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/59227e16-6884-4e1e-8e52-258f0dcdddd5-config-volume\") pod \"collect-profiles-29484510-5xgh8\" (UID: \"59227e16-6884-4e1e-8e52-258f0dcdddd5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8" Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.439608 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/59227e16-6884-4e1e-8e52-258f0dcdddd5-secret-volume\") pod \"collect-profiles-29484510-5xgh8\" (UID: \"59227e16-6884-4e1e-8e52-258f0dcdddd5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8" Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.446321 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ss7rb\" (UniqueName: \"kubernetes.io/projected/59227e16-6884-4e1e-8e52-258f0dcdddd5-kube-api-access-ss7rb\") pod \"collect-profiles-29484510-5xgh8\" (UID: \"59227e16-6884-4e1e-8e52-258f0dcdddd5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8" Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.489597 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8" Jan 22 08:30:00 crc kubenswrapper[4933]: I0122 08:30:00.992927 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8"] Jan 22 08:30:01 crc kubenswrapper[4933]: I0122 08:30:01.804848 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8" event={"ID":"59227e16-6884-4e1e-8e52-258f0dcdddd5","Type":"ContainerStarted","Data":"1be43c68bf7cd5b6e6565d355c9bd1a45d476fb094d508caa25897cf2d1b12e4"} Jan 22 08:30:01 crc kubenswrapper[4933]: I0122 08:30:01.805134 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8" event={"ID":"59227e16-6884-4e1e-8e52-258f0dcdddd5","Type":"ContainerStarted","Data":"69f6caa381691f516916e9fb9275e4ce0734d0f1e84d19d1550caefd4bf94be5"} Jan 22 08:30:01 crc kubenswrapper[4933]: I0122 08:30:01.828164 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8" podStartSLOduration=1.828146711 podStartE2EDuration="1.828146711s" podCreationTimestamp="2026-01-22 08:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 08:30:01.820524525 +0000 UTC m=+9849.657649918" watchObservedRunningTime="2026-01-22 08:30:01.828146711 +0000 UTC m=+9849.665272074" Jan 22 08:30:02 crc kubenswrapper[4933]: I0122 08:30:02.818280 4933 generic.go:334] "Generic (PLEG): container finished" podID="59227e16-6884-4e1e-8e52-258f0dcdddd5" containerID="1be43c68bf7cd5b6e6565d355c9bd1a45d476fb094d508caa25897cf2d1b12e4" exitCode=0 Jan 22 08:30:02 crc kubenswrapper[4933]: I0122 08:30:02.818449 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8" event={"ID":"59227e16-6884-4e1e-8e52-258f0dcdddd5","Type":"ContainerDied","Data":"1be43c68bf7cd5b6e6565d355c9bd1a45d476fb094d508caa25897cf2d1b12e4"} Jan 22 08:30:04 crc kubenswrapper[4933]: I0122 08:30:04.253066 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8" Jan 22 08:30:04 crc kubenswrapper[4933]: I0122 08:30:04.420481 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/59227e16-6884-4e1e-8e52-258f0dcdddd5-config-volume\") pod \"59227e16-6884-4e1e-8e52-258f0dcdddd5\" (UID: \"59227e16-6884-4e1e-8e52-258f0dcdddd5\") " Jan 22 08:30:04 crc kubenswrapper[4933]: I0122 08:30:04.420557 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ss7rb\" (UniqueName: \"kubernetes.io/projected/59227e16-6884-4e1e-8e52-258f0dcdddd5-kube-api-access-ss7rb\") pod \"59227e16-6884-4e1e-8e52-258f0dcdddd5\" (UID: \"59227e16-6884-4e1e-8e52-258f0dcdddd5\") " Jan 22 08:30:04 crc kubenswrapper[4933]: I0122 08:30:04.420661 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/59227e16-6884-4e1e-8e52-258f0dcdddd5-secret-volume\") pod \"59227e16-6884-4e1e-8e52-258f0dcdddd5\" (UID: \"59227e16-6884-4e1e-8e52-258f0dcdddd5\") " Jan 22 08:30:04 crc kubenswrapper[4933]: I0122 08:30:04.423606 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59227e16-6884-4e1e-8e52-258f0dcdddd5-config-volume" (OuterVolumeSpecName: "config-volume") pod "59227e16-6884-4e1e-8e52-258f0dcdddd5" (UID: "59227e16-6884-4e1e-8e52-258f0dcdddd5"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 08:30:04 crc kubenswrapper[4933]: I0122 08:30:04.440742 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59227e16-6884-4e1e-8e52-258f0dcdddd5-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "59227e16-6884-4e1e-8e52-258f0dcdddd5" (UID: "59227e16-6884-4e1e-8e52-258f0dcdddd5"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 08:30:04 crc kubenswrapper[4933]: I0122 08:30:04.445425 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59227e16-6884-4e1e-8e52-258f0dcdddd5-kube-api-access-ss7rb" (OuterVolumeSpecName: "kube-api-access-ss7rb") pod "59227e16-6884-4e1e-8e52-258f0dcdddd5" (UID: "59227e16-6884-4e1e-8e52-258f0dcdddd5"). InnerVolumeSpecName "kube-api-access-ss7rb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:30:04 crc kubenswrapper[4933]: I0122 08:30:04.491551 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:30:04 crc kubenswrapper[4933]: E0122 08:30:04.491843 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:30:04 crc kubenswrapper[4933]: I0122 08:30:04.541838 4933 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/59227e16-6884-4e1e-8e52-258f0dcdddd5-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 08:30:04 crc kubenswrapper[4933]: I0122 08:30:04.541886 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ss7rb\" (UniqueName: \"kubernetes.io/projected/59227e16-6884-4e1e-8e52-258f0dcdddd5-kube-api-access-ss7rb\") on node \"crc\" DevicePath \"\"" Jan 22 08:30:04 crc kubenswrapper[4933]: I0122 08:30:04.541901 4933 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/59227e16-6884-4e1e-8e52-258f0dcdddd5-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 08:30:04 crc kubenswrapper[4933]: I0122 08:30:04.838642 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8" event={"ID":"59227e16-6884-4e1e-8e52-258f0dcdddd5","Type":"ContainerDied","Data":"69f6caa381691f516916e9fb9275e4ce0734d0f1e84d19d1550caefd4bf94be5"} Jan 22 08:30:04 crc kubenswrapper[4933]: I0122 08:30:04.838679 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="69f6caa381691f516916e9fb9275e4ce0734d0f1e84d19d1550caefd4bf94be5" Jan 22 08:30:04 crc kubenswrapper[4933]: I0122 08:30:04.838728 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484510-5xgh8" Jan 22 08:30:04 crc kubenswrapper[4933]: I0122 08:30:04.896695 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp"] Jan 22 08:30:04 crc kubenswrapper[4933]: I0122 08:30:04.905259 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484465-mhcpp"] Jan 22 08:30:06 crc kubenswrapper[4933]: I0122 08:30:06.506350 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a655516-5e44-40ee-92f7-129dcfb09834" path="/var/lib/kubelet/pods/4a655516-5e44-40ee-92f7-129dcfb09834/volumes" Jan 22 08:30:11 crc kubenswrapper[4933]: I0122 08:30:11.216089 4933 scope.go:117] "RemoveContainer" containerID="ad8a2b6208aa16a08cef5bd57199b5726c6c45b84b3c389519a0c572a985fc87" Jan 22 08:30:17 crc kubenswrapper[4933]: I0122 08:30:17.490932 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:30:17 crc kubenswrapper[4933]: E0122 08:30:17.491666 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:30:32 crc kubenswrapper[4933]: I0122 08:30:32.507835 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:30:32 crc kubenswrapper[4933]: E0122 08:30:32.509714 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:30:44 crc kubenswrapper[4933]: I0122 08:30:44.498106 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:30:44 crc kubenswrapper[4933]: E0122 08:30:44.498919 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:30:57 crc kubenswrapper[4933]: I0122 08:30:57.493195 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:30:57 crc kubenswrapper[4933]: E0122 08:30:57.495332 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:31:10 crc kubenswrapper[4933]: I0122 08:31:10.493446 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:31:10 crc kubenswrapper[4933]: E0122 08:31:10.495271 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:31:21 crc kubenswrapper[4933]: I0122 08:31:21.491418 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:31:21 crc kubenswrapper[4933]: I0122 08:31:21.838677 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mw69z"] Jan 22 08:31:21 crc kubenswrapper[4933]: E0122 08:31:21.839607 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59227e16-6884-4e1e-8e52-258f0dcdddd5" containerName="collect-profiles" Jan 22 08:31:21 crc kubenswrapper[4933]: I0122 08:31:21.839630 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="59227e16-6884-4e1e-8e52-258f0dcdddd5" containerName="collect-profiles" Jan 22 08:31:21 crc kubenswrapper[4933]: I0122 08:31:21.839935 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="59227e16-6884-4e1e-8e52-258f0dcdddd5" containerName="collect-profiles" Jan 22 08:31:21 crc kubenswrapper[4933]: I0122 08:31:21.841877 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mw69z" Jan 22 08:31:21 crc kubenswrapper[4933]: I0122 08:31:21.887676 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mw69z"] Jan 22 08:31:21 crc kubenswrapper[4933]: I0122 08:31:21.993437 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9386f588-773e-4406-a61e-7012857252fb-utilities\") pod \"redhat-marketplace-mw69z\" (UID: \"9386f588-773e-4406-a61e-7012857252fb\") " pod="openshift-marketplace/redhat-marketplace-mw69z" Jan 22 08:31:21 crc kubenswrapper[4933]: I0122 08:31:21.993964 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5vgx\" (UniqueName: \"kubernetes.io/projected/9386f588-773e-4406-a61e-7012857252fb-kube-api-access-h5vgx\") pod \"redhat-marketplace-mw69z\" (UID: \"9386f588-773e-4406-a61e-7012857252fb\") " pod="openshift-marketplace/redhat-marketplace-mw69z" Jan 22 08:31:21 crc kubenswrapper[4933]: I0122 08:31:21.994064 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9386f588-773e-4406-a61e-7012857252fb-catalog-content\") pod \"redhat-marketplace-mw69z\" (UID: \"9386f588-773e-4406-a61e-7012857252fb\") " pod="openshift-marketplace/redhat-marketplace-mw69z" Jan 22 08:31:22 crc kubenswrapper[4933]: I0122 08:31:22.096185 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9386f588-773e-4406-a61e-7012857252fb-catalog-content\") pod \"redhat-marketplace-mw69z\" (UID: \"9386f588-773e-4406-a61e-7012857252fb\") " pod="openshift-marketplace/redhat-marketplace-mw69z" Jan 22 08:31:22 crc kubenswrapper[4933]: I0122 08:31:22.096510 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9386f588-773e-4406-a61e-7012857252fb-utilities\") pod \"redhat-marketplace-mw69z\" (UID: \"9386f588-773e-4406-a61e-7012857252fb\") " pod="openshift-marketplace/redhat-marketplace-mw69z" Jan 22 08:31:22 crc kubenswrapper[4933]: I0122 08:31:22.096664 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5vgx\" (UniqueName: \"kubernetes.io/projected/9386f588-773e-4406-a61e-7012857252fb-kube-api-access-h5vgx\") pod \"redhat-marketplace-mw69z\" (UID: \"9386f588-773e-4406-a61e-7012857252fb\") " pod="openshift-marketplace/redhat-marketplace-mw69z" Jan 22 08:31:22 crc kubenswrapper[4933]: I0122 08:31:22.096783 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9386f588-773e-4406-a61e-7012857252fb-catalog-content\") pod \"redhat-marketplace-mw69z\" (UID: \"9386f588-773e-4406-a61e-7012857252fb\") " pod="openshift-marketplace/redhat-marketplace-mw69z" Jan 22 08:31:22 crc kubenswrapper[4933]: I0122 08:31:22.097123 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9386f588-773e-4406-a61e-7012857252fb-utilities\") pod \"redhat-marketplace-mw69z\" (UID: \"9386f588-773e-4406-a61e-7012857252fb\") " pod="openshift-marketplace/redhat-marketplace-mw69z" Jan 22 08:31:22 crc kubenswrapper[4933]: I0122 08:31:22.130924 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5vgx\" (UniqueName: \"kubernetes.io/projected/9386f588-773e-4406-a61e-7012857252fb-kube-api-access-h5vgx\") pod \"redhat-marketplace-mw69z\" (UID: \"9386f588-773e-4406-a61e-7012857252fb\") " pod="openshift-marketplace/redhat-marketplace-mw69z" Jan 22 08:31:22 crc kubenswrapper[4933]: I0122 08:31:22.175984 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mw69z" Jan 22 08:31:22 crc kubenswrapper[4933]: I0122 08:31:22.712204 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"cc5483118f56c2d5bcedb4f7bb24619d1dd7c18eb657447fb845206c794d2671"} Jan 22 08:31:22 crc kubenswrapper[4933]: I0122 08:31:22.725541 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mw69z"] Jan 22 08:31:23 crc kubenswrapper[4933]: I0122 08:31:23.733048 4933 generic.go:334] "Generic (PLEG): container finished" podID="9386f588-773e-4406-a61e-7012857252fb" containerID="3626b19b5f17e5c13f5ff2cd684f9bf3607d8bdbe445f6218cf1f0ec054a5987" exitCode=0 Jan 22 08:31:23 crc kubenswrapper[4933]: I0122 08:31:23.733678 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mw69z" event={"ID":"9386f588-773e-4406-a61e-7012857252fb","Type":"ContainerDied","Data":"3626b19b5f17e5c13f5ff2cd684f9bf3607d8bdbe445f6218cf1f0ec054a5987"} Jan 22 08:31:23 crc kubenswrapper[4933]: I0122 08:31:23.733709 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mw69z" event={"ID":"9386f588-773e-4406-a61e-7012857252fb","Type":"ContainerStarted","Data":"74d718b85b8ed605124619ef93418758efe0de1072af251525aad9cd6369ee96"} Jan 22 08:31:23 crc kubenswrapper[4933]: I0122 08:31:23.737981 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 08:31:24 crc kubenswrapper[4933]: I0122 08:31:24.749808 4933 generic.go:334] "Generic (PLEG): container finished" podID="9386f588-773e-4406-a61e-7012857252fb" containerID="abbf0253b6ceb18bb1c706f11e1c238957656de7cf288a8ce38deafec7843c89" exitCode=0 Jan 22 08:31:24 crc kubenswrapper[4933]: I0122 08:31:24.749904 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mw69z" event={"ID":"9386f588-773e-4406-a61e-7012857252fb","Type":"ContainerDied","Data":"abbf0253b6ceb18bb1c706f11e1c238957656de7cf288a8ce38deafec7843c89"} Jan 22 08:31:25 crc kubenswrapper[4933]: I0122 08:31:25.760891 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mw69z" event={"ID":"9386f588-773e-4406-a61e-7012857252fb","Type":"ContainerStarted","Data":"bf7a5a42245cd3aae8b2789391fd63cbd30833ec3020bef2364ea440bbdfd14b"} Jan 22 08:31:25 crc kubenswrapper[4933]: I0122 08:31:25.785541 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mw69z" podStartSLOduration=3.37231703 podStartE2EDuration="4.785522424s" podCreationTimestamp="2026-01-22 08:31:21 +0000 UTC" firstStartedPulling="2026-01-22 08:31:23.737583226 +0000 UTC m=+9931.574708619" lastFinishedPulling="2026-01-22 08:31:25.15078866 +0000 UTC m=+9932.987914013" observedRunningTime="2026-01-22 08:31:25.777725335 +0000 UTC m=+9933.614850688" watchObservedRunningTime="2026-01-22 08:31:25.785522424 +0000 UTC m=+9933.622647777" Jan 22 08:31:32 crc kubenswrapper[4933]: I0122 08:31:32.176699 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mw69z" Jan 22 08:31:32 crc kubenswrapper[4933]: I0122 08:31:32.177470 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mw69z" Jan 22 08:31:32 crc kubenswrapper[4933]: I0122 08:31:32.266941 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mw69z" Jan 22 08:31:32 crc kubenswrapper[4933]: I0122 08:31:32.902300 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mw69z" Jan 22 08:31:32 crc kubenswrapper[4933]: I0122 08:31:32.989930 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mw69z"] Jan 22 08:31:34 crc kubenswrapper[4933]: I0122 08:31:34.877877 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mw69z" podUID="9386f588-773e-4406-a61e-7012857252fb" containerName="registry-server" containerID="cri-o://bf7a5a42245cd3aae8b2789391fd63cbd30833ec3020bef2364ea440bbdfd14b" gracePeriod=2 Jan 22 08:31:35 crc kubenswrapper[4933]: I0122 08:31:35.886653 4933 generic.go:334] "Generic (PLEG): container finished" podID="9386f588-773e-4406-a61e-7012857252fb" containerID="bf7a5a42245cd3aae8b2789391fd63cbd30833ec3020bef2364ea440bbdfd14b" exitCode=0 Jan 22 08:31:35 crc kubenswrapper[4933]: I0122 08:31:35.886923 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mw69z" event={"ID":"9386f588-773e-4406-a61e-7012857252fb","Type":"ContainerDied","Data":"bf7a5a42245cd3aae8b2789391fd63cbd30833ec3020bef2364ea440bbdfd14b"} Jan 22 08:31:36 crc kubenswrapper[4933]: I0122 08:31:36.458254 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mw69z" Jan 22 08:31:36 crc kubenswrapper[4933]: I0122 08:31:36.607941 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9386f588-773e-4406-a61e-7012857252fb-utilities\") pod \"9386f588-773e-4406-a61e-7012857252fb\" (UID: \"9386f588-773e-4406-a61e-7012857252fb\") " Jan 22 08:31:36 crc kubenswrapper[4933]: I0122 08:31:36.609536 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9386f588-773e-4406-a61e-7012857252fb-catalog-content\") pod \"9386f588-773e-4406-a61e-7012857252fb\" (UID: \"9386f588-773e-4406-a61e-7012857252fb\") " Jan 22 08:31:36 crc kubenswrapper[4933]: I0122 08:31:36.609649 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5vgx\" (UniqueName: \"kubernetes.io/projected/9386f588-773e-4406-a61e-7012857252fb-kube-api-access-h5vgx\") pod \"9386f588-773e-4406-a61e-7012857252fb\" (UID: \"9386f588-773e-4406-a61e-7012857252fb\") " Jan 22 08:31:36 crc kubenswrapper[4933]: I0122 08:31:36.612125 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9386f588-773e-4406-a61e-7012857252fb-utilities" (OuterVolumeSpecName: "utilities") pod "9386f588-773e-4406-a61e-7012857252fb" (UID: "9386f588-773e-4406-a61e-7012857252fb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:31:36 crc kubenswrapper[4933]: I0122 08:31:36.618618 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9386f588-773e-4406-a61e-7012857252fb-kube-api-access-h5vgx" (OuterVolumeSpecName: "kube-api-access-h5vgx") pod "9386f588-773e-4406-a61e-7012857252fb" (UID: "9386f588-773e-4406-a61e-7012857252fb"). InnerVolumeSpecName "kube-api-access-h5vgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:31:36 crc kubenswrapper[4933]: I0122 08:31:36.645202 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9386f588-773e-4406-a61e-7012857252fb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9386f588-773e-4406-a61e-7012857252fb" (UID: "9386f588-773e-4406-a61e-7012857252fb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:31:36 crc kubenswrapper[4933]: I0122 08:31:36.712122 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9386f588-773e-4406-a61e-7012857252fb-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 08:31:36 crc kubenswrapper[4933]: I0122 08:31:36.712356 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5vgx\" (UniqueName: \"kubernetes.io/projected/9386f588-773e-4406-a61e-7012857252fb-kube-api-access-h5vgx\") on node \"crc\" DevicePath \"\"" Jan 22 08:31:36 crc kubenswrapper[4933]: I0122 08:31:36.712472 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9386f588-773e-4406-a61e-7012857252fb-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 08:31:36 crc kubenswrapper[4933]: I0122 08:31:36.910700 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mw69z" event={"ID":"9386f588-773e-4406-a61e-7012857252fb","Type":"ContainerDied","Data":"74d718b85b8ed605124619ef93418758efe0de1072af251525aad9cd6369ee96"} Jan 22 08:31:36 crc kubenswrapper[4933]: I0122 08:31:36.910787 4933 scope.go:117] "RemoveContainer" containerID="bf7a5a42245cd3aae8b2789391fd63cbd30833ec3020bef2364ea440bbdfd14b" Jan 22 08:31:36 crc kubenswrapper[4933]: I0122 08:31:36.912373 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mw69z" Jan 22 08:31:36 crc kubenswrapper[4933]: I0122 08:31:36.978262 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mw69z"] Jan 22 08:31:36 crc kubenswrapper[4933]: I0122 08:31:36.985558 4933 scope.go:117] "RemoveContainer" containerID="abbf0253b6ceb18bb1c706f11e1c238957656de7cf288a8ce38deafec7843c89" Jan 22 08:31:36 crc kubenswrapper[4933]: I0122 08:31:36.989555 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mw69z"] Jan 22 08:31:37 crc kubenswrapper[4933]: I0122 08:31:37.012605 4933 scope.go:117] "RemoveContainer" containerID="3626b19b5f17e5c13f5ff2cd684f9bf3607d8bdbe445f6218cf1f0ec054a5987" Jan 22 08:31:38 crc kubenswrapper[4933]: I0122 08:31:38.506586 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9386f588-773e-4406-a61e-7012857252fb" path="/var/lib/kubelet/pods/9386f588-773e-4406-a61e-7012857252fb/volumes" Jan 22 08:31:53 crc kubenswrapper[4933]: I0122 08:31:53.179127 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-shpjb"] Jan 22 08:31:53 crc kubenswrapper[4933]: E0122 08:31:53.187304 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9386f588-773e-4406-a61e-7012857252fb" containerName="extract-utilities" Jan 22 08:31:53 crc kubenswrapper[4933]: I0122 08:31:53.187347 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9386f588-773e-4406-a61e-7012857252fb" containerName="extract-utilities" Jan 22 08:31:53 crc kubenswrapper[4933]: E0122 08:31:53.194198 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9386f588-773e-4406-a61e-7012857252fb" containerName="registry-server" Jan 22 08:31:53 crc kubenswrapper[4933]: I0122 08:31:53.194244 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9386f588-773e-4406-a61e-7012857252fb" containerName="registry-server" Jan 22 08:31:53 crc kubenswrapper[4933]: E0122 08:31:53.194289 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9386f588-773e-4406-a61e-7012857252fb" containerName="extract-content" Jan 22 08:31:53 crc kubenswrapper[4933]: I0122 08:31:53.194300 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="9386f588-773e-4406-a61e-7012857252fb" containerName="extract-content" Jan 22 08:31:53 crc kubenswrapper[4933]: I0122 08:31:53.198327 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="9386f588-773e-4406-a61e-7012857252fb" containerName="registry-server" Jan 22 08:31:53 crc kubenswrapper[4933]: I0122 08:31:53.216872 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-shpjb"] Jan 22 08:31:53 crc kubenswrapper[4933]: I0122 08:31:53.217618 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-shpjb" Jan 22 08:31:53 crc kubenswrapper[4933]: I0122 08:31:53.279214 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fca5696-a167-43bb-93ad-75fb4096c81c-utilities\") pod \"certified-operators-shpjb\" (UID: \"6fca5696-a167-43bb-93ad-75fb4096c81c\") " pod="openshift-marketplace/certified-operators-shpjb" Jan 22 08:31:53 crc kubenswrapper[4933]: I0122 08:31:53.279267 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thstx\" (UniqueName: \"kubernetes.io/projected/6fca5696-a167-43bb-93ad-75fb4096c81c-kube-api-access-thstx\") pod \"certified-operators-shpjb\" (UID: \"6fca5696-a167-43bb-93ad-75fb4096c81c\") " pod="openshift-marketplace/certified-operators-shpjb" Jan 22 08:31:53 crc kubenswrapper[4933]: I0122 08:31:53.279618 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fca5696-a167-43bb-93ad-75fb4096c81c-catalog-content\") pod \"certified-operators-shpjb\" (UID: \"6fca5696-a167-43bb-93ad-75fb4096c81c\") " pod="openshift-marketplace/certified-operators-shpjb" Jan 22 08:31:53 crc kubenswrapper[4933]: I0122 08:31:53.381897 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fca5696-a167-43bb-93ad-75fb4096c81c-catalog-content\") pod \"certified-operators-shpjb\" (UID: \"6fca5696-a167-43bb-93ad-75fb4096c81c\") " pod="openshift-marketplace/certified-operators-shpjb" Jan 22 08:31:53 crc kubenswrapper[4933]: I0122 08:31:53.382220 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fca5696-a167-43bb-93ad-75fb4096c81c-utilities\") pod \"certified-operators-shpjb\" (UID: \"6fca5696-a167-43bb-93ad-75fb4096c81c\") " pod="openshift-marketplace/certified-operators-shpjb" Jan 22 08:31:53 crc kubenswrapper[4933]: I0122 08:31:53.382283 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thstx\" (UniqueName: \"kubernetes.io/projected/6fca5696-a167-43bb-93ad-75fb4096c81c-kube-api-access-thstx\") pod \"certified-operators-shpjb\" (UID: \"6fca5696-a167-43bb-93ad-75fb4096c81c\") " pod="openshift-marketplace/certified-operators-shpjb" Jan 22 08:31:53 crc kubenswrapper[4933]: I0122 08:31:53.383691 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fca5696-a167-43bb-93ad-75fb4096c81c-catalog-content\") pod \"certified-operators-shpjb\" (UID: \"6fca5696-a167-43bb-93ad-75fb4096c81c\") " pod="openshift-marketplace/certified-operators-shpjb" Jan 22 08:31:53 crc kubenswrapper[4933]: I0122 08:31:53.384142 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fca5696-a167-43bb-93ad-75fb4096c81c-utilities\") pod \"certified-operators-shpjb\" (UID: \"6fca5696-a167-43bb-93ad-75fb4096c81c\") " pod="openshift-marketplace/certified-operators-shpjb" Jan 22 08:31:53 crc kubenswrapper[4933]: I0122 08:31:53.803174 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thstx\" (UniqueName: \"kubernetes.io/projected/6fca5696-a167-43bb-93ad-75fb4096c81c-kube-api-access-thstx\") pod \"certified-operators-shpjb\" (UID: \"6fca5696-a167-43bb-93ad-75fb4096c81c\") " pod="openshift-marketplace/certified-operators-shpjb" Jan 22 08:31:53 crc kubenswrapper[4933]: I0122 08:31:53.846810 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-shpjb" Jan 22 08:31:54 crc kubenswrapper[4933]: I0122 08:31:54.466673 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-shpjb"] Jan 22 08:31:55 crc kubenswrapper[4933]: I0122 08:31:55.212197 4933 generic.go:334] "Generic (PLEG): container finished" podID="6fca5696-a167-43bb-93ad-75fb4096c81c" containerID="8d8c8e75c4fbdd64ba05fdc92e98891ac9dffefac61f433682728ae19467311d" exitCode=0 Jan 22 08:31:55 crc kubenswrapper[4933]: I0122 08:31:55.212249 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-shpjb" event={"ID":"6fca5696-a167-43bb-93ad-75fb4096c81c","Type":"ContainerDied","Data":"8d8c8e75c4fbdd64ba05fdc92e98891ac9dffefac61f433682728ae19467311d"} Jan 22 08:31:55 crc kubenswrapper[4933]: I0122 08:31:55.212557 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-shpjb" event={"ID":"6fca5696-a167-43bb-93ad-75fb4096c81c","Type":"ContainerStarted","Data":"b02b960c3b1b0cd78b55ac94f29442fb5a47db884d82e7b1033206bbb2c348c4"} Jan 22 08:31:57 crc kubenswrapper[4933]: I0122 08:31:57.232664 4933 generic.go:334] "Generic (PLEG): container finished" podID="6fca5696-a167-43bb-93ad-75fb4096c81c" containerID="cf9345f773b4dc42cd51b9621a04769a54277595f832ce62a5e0fb67a16315fb" exitCode=0 Jan 22 08:31:57 crc kubenswrapper[4933]: I0122 08:31:57.232800 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-shpjb" event={"ID":"6fca5696-a167-43bb-93ad-75fb4096c81c","Type":"ContainerDied","Data":"cf9345f773b4dc42cd51b9621a04769a54277595f832ce62a5e0fb67a16315fb"} Jan 22 08:31:58 crc kubenswrapper[4933]: I0122 08:31:58.252855 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-shpjb" event={"ID":"6fca5696-a167-43bb-93ad-75fb4096c81c","Type":"ContainerStarted","Data":"99488013afbb42665d8ec4e5d4c05caa435b63dce3bcd33a621330013e6e606f"} Jan 22 08:31:58 crc kubenswrapper[4933]: I0122 08:31:58.282261 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-shpjb" podStartSLOduration=2.840303669 podStartE2EDuration="5.282111324s" podCreationTimestamp="2026-01-22 08:31:53 +0000 UTC" firstStartedPulling="2026-01-22 08:31:55.214188314 +0000 UTC m=+9963.051313657" lastFinishedPulling="2026-01-22 08:31:57.655995919 +0000 UTC m=+9965.493121312" observedRunningTime="2026-01-22 08:31:58.281351455 +0000 UTC m=+9966.118476828" watchObservedRunningTime="2026-01-22 08:31:58.282111324 +0000 UTC m=+9966.119236697" Jan 22 08:32:03 crc kubenswrapper[4933]: I0122 08:32:03.847872 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-shpjb" Jan 22 08:32:03 crc kubenswrapper[4933]: I0122 08:32:03.848459 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-shpjb" Jan 22 08:32:03 crc kubenswrapper[4933]: I0122 08:32:03.925716 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-shpjb" Jan 22 08:32:04 crc kubenswrapper[4933]: I0122 08:32:04.393244 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-shpjb" Jan 22 08:32:04 crc kubenswrapper[4933]: I0122 08:32:04.458730 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-shpjb"] Jan 22 08:32:06 crc kubenswrapper[4933]: I0122 08:32:06.339195 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-shpjb" podUID="6fca5696-a167-43bb-93ad-75fb4096c81c" containerName="registry-server" containerID="cri-o://99488013afbb42665d8ec4e5d4c05caa435b63dce3bcd33a621330013e6e606f" gracePeriod=2 Jan 22 08:32:06 crc kubenswrapper[4933]: I0122 08:32:06.964390 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-shpjb" Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.140585 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fca5696-a167-43bb-93ad-75fb4096c81c-catalog-content\") pod \"6fca5696-a167-43bb-93ad-75fb4096c81c\" (UID: \"6fca5696-a167-43bb-93ad-75fb4096c81c\") " Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.140900 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thstx\" (UniqueName: \"kubernetes.io/projected/6fca5696-a167-43bb-93ad-75fb4096c81c-kube-api-access-thstx\") pod \"6fca5696-a167-43bb-93ad-75fb4096c81c\" (UID: \"6fca5696-a167-43bb-93ad-75fb4096c81c\") " Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.141126 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fca5696-a167-43bb-93ad-75fb4096c81c-utilities\") pod \"6fca5696-a167-43bb-93ad-75fb4096c81c\" (UID: \"6fca5696-a167-43bb-93ad-75fb4096c81c\") " Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.143966 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fca5696-a167-43bb-93ad-75fb4096c81c-utilities" (OuterVolumeSpecName: "utilities") pod "6fca5696-a167-43bb-93ad-75fb4096c81c" (UID: "6fca5696-a167-43bb-93ad-75fb4096c81c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.150829 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fca5696-a167-43bb-93ad-75fb4096c81c-kube-api-access-thstx" (OuterVolumeSpecName: "kube-api-access-thstx") pod "6fca5696-a167-43bb-93ad-75fb4096c81c" (UID: "6fca5696-a167-43bb-93ad-75fb4096c81c"). InnerVolumeSpecName "kube-api-access-thstx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.195982 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fca5696-a167-43bb-93ad-75fb4096c81c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6fca5696-a167-43bb-93ad-75fb4096c81c" (UID: "6fca5696-a167-43bb-93ad-75fb4096c81c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.244551 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6fca5696-a167-43bb-93ad-75fb4096c81c-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.244586 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6fca5696-a167-43bb-93ad-75fb4096c81c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.244602 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thstx\" (UniqueName: \"kubernetes.io/projected/6fca5696-a167-43bb-93ad-75fb4096c81c-kube-api-access-thstx\") on node \"crc\" DevicePath \"\"" Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.356006 4933 generic.go:334] "Generic (PLEG): container finished" podID="6fca5696-a167-43bb-93ad-75fb4096c81c" containerID="99488013afbb42665d8ec4e5d4c05caa435b63dce3bcd33a621330013e6e606f" exitCode=0 Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.356151 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-shpjb" Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.356162 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-shpjb" event={"ID":"6fca5696-a167-43bb-93ad-75fb4096c81c","Type":"ContainerDied","Data":"99488013afbb42665d8ec4e5d4c05caa435b63dce3bcd33a621330013e6e606f"} Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.356668 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-shpjb" event={"ID":"6fca5696-a167-43bb-93ad-75fb4096c81c","Type":"ContainerDied","Data":"b02b960c3b1b0cd78b55ac94f29442fb5a47db884d82e7b1033206bbb2c348c4"} Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.356716 4933 scope.go:117] "RemoveContainer" containerID="99488013afbb42665d8ec4e5d4c05caa435b63dce3bcd33a621330013e6e606f" Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.398168 4933 scope.go:117] "RemoveContainer" containerID="cf9345f773b4dc42cd51b9621a04769a54277595f832ce62a5e0fb67a16315fb" Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.425583 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-shpjb"] Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.446690 4933 scope.go:117] "RemoveContainer" containerID="8d8c8e75c4fbdd64ba05fdc92e98891ac9dffefac61f433682728ae19467311d" Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.449880 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-shpjb"] Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.491680 4933 scope.go:117] "RemoveContainer" containerID="99488013afbb42665d8ec4e5d4c05caa435b63dce3bcd33a621330013e6e606f" Jan 22 08:32:07 crc kubenswrapper[4933]: E0122 08:32:07.492029 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99488013afbb42665d8ec4e5d4c05caa435b63dce3bcd33a621330013e6e606f\": container with ID starting with 99488013afbb42665d8ec4e5d4c05caa435b63dce3bcd33a621330013e6e606f not found: ID does not exist" containerID="99488013afbb42665d8ec4e5d4c05caa435b63dce3bcd33a621330013e6e606f" Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.492062 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99488013afbb42665d8ec4e5d4c05caa435b63dce3bcd33a621330013e6e606f"} err="failed to get container status \"99488013afbb42665d8ec4e5d4c05caa435b63dce3bcd33a621330013e6e606f\": rpc error: code = NotFound desc = could not find container \"99488013afbb42665d8ec4e5d4c05caa435b63dce3bcd33a621330013e6e606f\": container with ID starting with 99488013afbb42665d8ec4e5d4c05caa435b63dce3bcd33a621330013e6e606f not found: ID does not exist" Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.492081 4933 scope.go:117] "RemoveContainer" containerID="cf9345f773b4dc42cd51b9621a04769a54277595f832ce62a5e0fb67a16315fb" Jan 22 08:32:07 crc kubenswrapper[4933]: E0122 08:32:07.492288 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf9345f773b4dc42cd51b9621a04769a54277595f832ce62a5e0fb67a16315fb\": container with ID starting with cf9345f773b4dc42cd51b9621a04769a54277595f832ce62a5e0fb67a16315fb not found: ID does not exist" containerID="cf9345f773b4dc42cd51b9621a04769a54277595f832ce62a5e0fb67a16315fb" Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.492303 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf9345f773b4dc42cd51b9621a04769a54277595f832ce62a5e0fb67a16315fb"} err="failed to get container status \"cf9345f773b4dc42cd51b9621a04769a54277595f832ce62a5e0fb67a16315fb\": rpc error: code = NotFound desc = could not find container \"cf9345f773b4dc42cd51b9621a04769a54277595f832ce62a5e0fb67a16315fb\": container with ID starting with cf9345f773b4dc42cd51b9621a04769a54277595f832ce62a5e0fb67a16315fb not found: ID does not exist" Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.492317 4933 scope.go:117] "RemoveContainer" containerID="8d8c8e75c4fbdd64ba05fdc92e98891ac9dffefac61f433682728ae19467311d" Jan 22 08:32:07 crc kubenswrapper[4933]: E0122 08:32:07.492486 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d8c8e75c4fbdd64ba05fdc92e98891ac9dffefac61f433682728ae19467311d\": container with ID starting with 8d8c8e75c4fbdd64ba05fdc92e98891ac9dffefac61f433682728ae19467311d not found: ID does not exist" containerID="8d8c8e75c4fbdd64ba05fdc92e98891ac9dffefac61f433682728ae19467311d" Jan 22 08:32:07 crc kubenswrapper[4933]: I0122 08:32:07.492520 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d8c8e75c4fbdd64ba05fdc92e98891ac9dffefac61f433682728ae19467311d"} err="failed to get container status \"8d8c8e75c4fbdd64ba05fdc92e98891ac9dffefac61f433682728ae19467311d\": rpc error: code = NotFound desc = could not find container \"8d8c8e75c4fbdd64ba05fdc92e98891ac9dffefac61f433682728ae19467311d\": container with ID starting with 8d8c8e75c4fbdd64ba05fdc92e98891ac9dffefac61f433682728ae19467311d not found: ID does not exist" Jan 22 08:32:08 crc kubenswrapper[4933]: I0122 08:32:08.504036 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fca5696-a167-43bb-93ad-75fb4096c81c" path="/var/lib/kubelet/pods/6fca5696-a167-43bb-93ad-75fb4096c81c/volumes" Jan 22 08:33:13 crc kubenswrapper[4933]: I0122 08:33:13.241562 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2mvx2"] Jan 22 08:33:13 crc kubenswrapper[4933]: E0122 08:33:13.242676 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fca5696-a167-43bb-93ad-75fb4096c81c" containerName="extract-content" Jan 22 08:33:13 crc kubenswrapper[4933]: I0122 08:33:13.242692 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fca5696-a167-43bb-93ad-75fb4096c81c" containerName="extract-content" Jan 22 08:33:13 crc kubenswrapper[4933]: E0122 08:33:13.242710 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fca5696-a167-43bb-93ad-75fb4096c81c" containerName="extract-utilities" Jan 22 08:33:13 crc kubenswrapper[4933]: I0122 08:33:13.242719 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fca5696-a167-43bb-93ad-75fb4096c81c" containerName="extract-utilities" Jan 22 08:33:13 crc kubenswrapper[4933]: E0122 08:33:13.242744 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fca5696-a167-43bb-93ad-75fb4096c81c" containerName="registry-server" Jan 22 08:33:13 crc kubenswrapper[4933]: I0122 08:33:13.242753 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fca5696-a167-43bb-93ad-75fb4096c81c" containerName="registry-server" Jan 22 08:33:13 crc kubenswrapper[4933]: I0122 08:33:13.243027 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fca5696-a167-43bb-93ad-75fb4096c81c" containerName="registry-server" Jan 22 08:33:13 crc kubenswrapper[4933]: I0122 08:33:13.244812 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2mvx2" Jan 22 08:33:13 crc kubenswrapper[4933]: I0122 08:33:13.278441 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2mvx2"] Jan 22 08:33:13 crc kubenswrapper[4933]: I0122 08:33:13.442786 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d99f47c4-f7e0-498a-89f6-1775c3ca14fa-utilities\") pod \"redhat-operators-2mvx2\" (UID: \"d99f47c4-f7e0-498a-89f6-1775c3ca14fa\") " pod="openshift-marketplace/redhat-operators-2mvx2" Jan 22 08:33:13 crc kubenswrapper[4933]: I0122 08:33:13.443610 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d99f47c4-f7e0-498a-89f6-1775c3ca14fa-catalog-content\") pod \"redhat-operators-2mvx2\" (UID: \"d99f47c4-f7e0-498a-89f6-1775c3ca14fa\") " pod="openshift-marketplace/redhat-operators-2mvx2" Jan 22 08:33:13 crc kubenswrapper[4933]: I0122 08:33:13.443711 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsc45\" (UniqueName: \"kubernetes.io/projected/d99f47c4-f7e0-498a-89f6-1775c3ca14fa-kube-api-access-gsc45\") pod \"redhat-operators-2mvx2\" (UID: \"d99f47c4-f7e0-498a-89f6-1775c3ca14fa\") " pod="openshift-marketplace/redhat-operators-2mvx2" Jan 22 08:33:13 crc kubenswrapper[4933]: I0122 08:33:13.546262 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d99f47c4-f7e0-498a-89f6-1775c3ca14fa-utilities\") pod \"redhat-operators-2mvx2\" (UID: \"d99f47c4-f7e0-498a-89f6-1775c3ca14fa\") " pod="openshift-marketplace/redhat-operators-2mvx2" Jan 22 08:33:13 crc kubenswrapper[4933]: I0122 08:33:13.546484 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d99f47c4-f7e0-498a-89f6-1775c3ca14fa-catalog-content\") pod \"redhat-operators-2mvx2\" (UID: \"d99f47c4-f7e0-498a-89f6-1775c3ca14fa\") " pod="openshift-marketplace/redhat-operators-2mvx2" Jan 22 08:33:13 crc kubenswrapper[4933]: I0122 08:33:13.546554 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsc45\" (UniqueName: \"kubernetes.io/projected/d99f47c4-f7e0-498a-89f6-1775c3ca14fa-kube-api-access-gsc45\") pod \"redhat-operators-2mvx2\" (UID: \"d99f47c4-f7e0-498a-89f6-1775c3ca14fa\") " pod="openshift-marketplace/redhat-operators-2mvx2" Jan 22 08:33:13 crc kubenswrapper[4933]: I0122 08:33:13.547112 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d99f47c4-f7e0-498a-89f6-1775c3ca14fa-utilities\") pod \"redhat-operators-2mvx2\" (UID: \"d99f47c4-f7e0-498a-89f6-1775c3ca14fa\") " pod="openshift-marketplace/redhat-operators-2mvx2" Jan 22 08:33:13 crc kubenswrapper[4933]: I0122 08:33:13.547645 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d99f47c4-f7e0-498a-89f6-1775c3ca14fa-catalog-content\") pod \"redhat-operators-2mvx2\" (UID: \"d99f47c4-f7e0-498a-89f6-1775c3ca14fa\") " pod="openshift-marketplace/redhat-operators-2mvx2" Jan 22 08:33:13 crc kubenswrapper[4933]: I0122 08:33:13.567923 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsc45\" (UniqueName: \"kubernetes.io/projected/d99f47c4-f7e0-498a-89f6-1775c3ca14fa-kube-api-access-gsc45\") pod \"redhat-operators-2mvx2\" (UID: \"d99f47c4-f7e0-498a-89f6-1775c3ca14fa\") " pod="openshift-marketplace/redhat-operators-2mvx2" Jan 22 08:33:13 crc kubenswrapper[4933]: I0122 08:33:13.590581 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2mvx2" Jan 22 08:33:14 crc kubenswrapper[4933]: I0122 08:33:14.120778 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2mvx2"] Jan 22 08:33:14 crc kubenswrapper[4933]: W0122 08:33:14.121940 4933 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd99f47c4_f7e0_498a_89f6_1775c3ca14fa.slice/crio-3fa9154c9ec99bbb087a20cb74a0d7e3023ee288513b46942bf210d9b79003a6 WatchSource:0}: Error finding container 3fa9154c9ec99bbb087a20cb74a0d7e3023ee288513b46942bf210d9b79003a6: Status 404 returned error can't find the container with id 3fa9154c9ec99bbb087a20cb74a0d7e3023ee288513b46942bf210d9b79003a6 Jan 22 08:33:14 crc kubenswrapper[4933]: I0122 08:33:14.189787 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2mvx2" event={"ID":"d99f47c4-f7e0-498a-89f6-1775c3ca14fa","Type":"ContainerStarted","Data":"3fa9154c9ec99bbb087a20cb74a0d7e3023ee288513b46942bf210d9b79003a6"} Jan 22 08:33:15 crc kubenswrapper[4933]: I0122 08:33:15.201390 4933 generic.go:334] "Generic (PLEG): container finished" podID="d99f47c4-f7e0-498a-89f6-1775c3ca14fa" containerID="b3e12944c03e5f1f6117aa288144e81510032125826b178c4c1bb3027f3c1f67" exitCode=0 Jan 22 08:33:15 crc kubenswrapper[4933]: I0122 08:33:15.201474 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2mvx2" event={"ID":"d99f47c4-f7e0-498a-89f6-1775c3ca14fa","Type":"ContainerDied","Data":"b3e12944c03e5f1f6117aa288144e81510032125826b178c4c1bb3027f3c1f67"} Jan 22 08:33:21 crc kubenswrapper[4933]: I0122 08:33:21.322096 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2mvx2" event={"ID":"d99f47c4-f7e0-498a-89f6-1775c3ca14fa","Type":"ContainerStarted","Data":"9ee5f6587cecbd3171cc7364010b10052821b307fde0ca32b5ef0767410dcc25"} Jan 22 08:33:24 crc kubenswrapper[4933]: E0122 08:33:24.228318 4933 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd99f47c4_f7e0_498a_89f6_1775c3ca14fa.slice/crio-9ee5f6587cecbd3171cc7364010b10052821b307fde0ca32b5ef0767410dcc25.scope\": RecentStats: unable to find data in memory cache]" Jan 22 08:33:25 crc kubenswrapper[4933]: I0122 08:33:25.374526 4933 generic.go:334] "Generic (PLEG): container finished" podID="d99f47c4-f7e0-498a-89f6-1775c3ca14fa" containerID="9ee5f6587cecbd3171cc7364010b10052821b307fde0ca32b5ef0767410dcc25" exitCode=0 Jan 22 08:33:25 crc kubenswrapper[4933]: I0122 08:33:25.374611 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2mvx2" event={"ID":"d99f47c4-f7e0-498a-89f6-1775c3ca14fa","Type":"ContainerDied","Data":"9ee5f6587cecbd3171cc7364010b10052821b307fde0ca32b5ef0767410dcc25"} Jan 22 08:33:28 crc kubenswrapper[4933]: I0122 08:33:28.439241 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2mvx2" event={"ID":"d99f47c4-f7e0-498a-89f6-1775c3ca14fa","Type":"ContainerStarted","Data":"1d4ec38f3dcb292eab3e2723758375b5abd6409e53a2dbf8d49ee0cf2a643f9f"} Jan 22 08:33:28 crc kubenswrapper[4933]: I0122 08:33:28.493326 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2mvx2" podStartSLOduration=3.452549942 podStartE2EDuration="15.493307896s" podCreationTimestamp="2026-01-22 08:33:13 +0000 UTC" firstStartedPulling="2026-01-22 08:33:15.20348065 +0000 UTC m=+10043.040606003" lastFinishedPulling="2026-01-22 08:33:27.244238594 +0000 UTC m=+10055.081363957" observedRunningTime="2026-01-22 08:33:28.479388588 +0000 UTC m=+10056.316514011" watchObservedRunningTime="2026-01-22 08:33:28.493307896 +0000 UTC m=+10056.330433249" Jan 22 08:33:33 crc kubenswrapper[4933]: I0122 08:33:33.591045 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2mvx2" Jan 22 08:33:33 crc kubenswrapper[4933]: I0122 08:33:33.591439 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2mvx2" Jan 22 08:33:34 crc kubenswrapper[4933]: I0122 08:33:34.663901 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-2mvx2" podUID="d99f47c4-f7e0-498a-89f6-1775c3ca14fa" containerName="registry-server" probeResult="failure" output=< Jan 22 08:33:34 crc kubenswrapper[4933]: timeout: failed to connect service ":50051" within 1s Jan 22 08:33:34 crc kubenswrapper[4933]: > Jan 22 08:33:40 crc kubenswrapper[4933]: I0122 08:33:40.943048 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:33:40 crc kubenswrapper[4933]: I0122 08:33:40.943608 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:33:43 crc kubenswrapper[4933]: I0122 08:33:43.724826 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2mvx2" Jan 22 08:33:43 crc kubenswrapper[4933]: I0122 08:33:43.778608 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2mvx2" Jan 22 08:33:44 crc kubenswrapper[4933]: I0122 08:33:44.460140 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2mvx2"] Jan 22 08:33:45 crc kubenswrapper[4933]: I0122 08:33:45.680727 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2mvx2" podUID="d99f47c4-f7e0-498a-89f6-1775c3ca14fa" containerName="registry-server" containerID="cri-o://1d4ec38f3dcb292eab3e2723758375b5abd6409e53a2dbf8d49ee0cf2a643f9f" gracePeriod=2 Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.455224 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2mvx2" Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.628438 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gsc45\" (UniqueName: \"kubernetes.io/projected/d99f47c4-f7e0-498a-89f6-1775c3ca14fa-kube-api-access-gsc45\") pod \"d99f47c4-f7e0-498a-89f6-1775c3ca14fa\" (UID: \"d99f47c4-f7e0-498a-89f6-1775c3ca14fa\") " Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.628908 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d99f47c4-f7e0-498a-89f6-1775c3ca14fa-catalog-content\") pod \"d99f47c4-f7e0-498a-89f6-1775c3ca14fa\" (UID: \"d99f47c4-f7e0-498a-89f6-1775c3ca14fa\") " Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.629069 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d99f47c4-f7e0-498a-89f6-1775c3ca14fa-utilities\") pod \"d99f47c4-f7e0-498a-89f6-1775c3ca14fa\" (UID: \"d99f47c4-f7e0-498a-89f6-1775c3ca14fa\") " Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.629850 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d99f47c4-f7e0-498a-89f6-1775c3ca14fa-utilities" (OuterVolumeSpecName: "utilities") pod "d99f47c4-f7e0-498a-89f6-1775c3ca14fa" (UID: "d99f47c4-f7e0-498a-89f6-1775c3ca14fa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.630190 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d99f47c4-f7e0-498a-89f6-1775c3ca14fa-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.637051 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d99f47c4-f7e0-498a-89f6-1775c3ca14fa-kube-api-access-gsc45" (OuterVolumeSpecName: "kube-api-access-gsc45") pod "d99f47c4-f7e0-498a-89f6-1775c3ca14fa" (UID: "d99f47c4-f7e0-498a-89f6-1775c3ca14fa"). InnerVolumeSpecName "kube-api-access-gsc45". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.693814 4933 generic.go:334] "Generic (PLEG): container finished" podID="d99f47c4-f7e0-498a-89f6-1775c3ca14fa" containerID="1d4ec38f3dcb292eab3e2723758375b5abd6409e53a2dbf8d49ee0cf2a643f9f" exitCode=0 Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.693886 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2mvx2" event={"ID":"d99f47c4-f7e0-498a-89f6-1775c3ca14fa","Type":"ContainerDied","Data":"1d4ec38f3dcb292eab3e2723758375b5abd6409e53a2dbf8d49ee0cf2a643f9f"} Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.693898 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2mvx2" Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.693937 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2mvx2" event={"ID":"d99f47c4-f7e0-498a-89f6-1775c3ca14fa","Type":"ContainerDied","Data":"3fa9154c9ec99bbb087a20cb74a0d7e3023ee288513b46942bf210d9b79003a6"} Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.693973 4933 scope.go:117] "RemoveContainer" containerID="1d4ec38f3dcb292eab3e2723758375b5abd6409e53a2dbf8d49ee0cf2a643f9f" Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.727927 4933 scope.go:117] "RemoveContainer" containerID="9ee5f6587cecbd3171cc7364010b10052821b307fde0ca32b5ef0767410dcc25" Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.731975 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gsc45\" (UniqueName: \"kubernetes.io/projected/d99f47c4-f7e0-498a-89f6-1775c3ca14fa-kube-api-access-gsc45\") on node \"crc\" DevicePath \"\"" Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.748849 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d99f47c4-f7e0-498a-89f6-1775c3ca14fa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d99f47c4-f7e0-498a-89f6-1775c3ca14fa" (UID: "d99f47c4-f7e0-498a-89f6-1775c3ca14fa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.765708 4933 scope.go:117] "RemoveContainer" containerID="b3e12944c03e5f1f6117aa288144e81510032125826b178c4c1bb3027f3c1f67" Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.820795 4933 scope.go:117] "RemoveContainer" containerID="1d4ec38f3dcb292eab3e2723758375b5abd6409e53a2dbf8d49ee0cf2a643f9f" Jan 22 08:33:46 crc kubenswrapper[4933]: E0122 08:33:46.821668 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d4ec38f3dcb292eab3e2723758375b5abd6409e53a2dbf8d49ee0cf2a643f9f\": container with ID starting with 1d4ec38f3dcb292eab3e2723758375b5abd6409e53a2dbf8d49ee0cf2a643f9f not found: ID does not exist" containerID="1d4ec38f3dcb292eab3e2723758375b5abd6409e53a2dbf8d49ee0cf2a643f9f" Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.821725 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d4ec38f3dcb292eab3e2723758375b5abd6409e53a2dbf8d49ee0cf2a643f9f"} err="failed to get container status \"1d4ec38f3dcb292eab3e2723758375b5abd6409e53a2dbf8d49ee0cf2a643f9f\": rpc error: code = NotFound desc = could not find container \"1d4ec38f3dcb292eab3e2723758375b5abd6409e53a2dbf8d49ee0cf2a643f9f\": container with ID starting with 1d4ec38f3dcb292eab3e2723758375b5abd6409e53a2dbf8d49ee0cf2a643f9f not found: ID does not exist" Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.821759 4933 scope.go:117] "RemoveContainer" containerID="9ee5f6587cecbd3171cc7364010b10052821b307fde0ca32b5ef0767410dcc25" Jan 22 08:33:46 crc kubenswrapper[4933]: E0122 08:33:46.822190 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ee5f6587cecbd3171cc7364010b10052821b307fde0ca32b5ef0767410dcc25\": container with ID starting with 9ee5f6587cecbd3171cc7364010b10052821b307fde0ca32b5ef0767410dcc25 not found: ID does not exist" containerID="9ee5f6587cecbd3171cc7364010b10052821b307fde0ca32b5ef0767410dcc25" Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.822256 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ee5f6587cecbd3171cc7364010b10052821b307fde0ca32b5ef0767410dcc25"} err="failed to get container status \"9ee5f6587cecbd3171cc7364010b10052821b307fde0ca32b5ef0767410dcc25\": rpc error: code = NotFound desc = could not find container \"9ee5f6587cecbd3171cc7364010b10052821b307fde0ca32b5ef0767410dcc25\": container with ID starting with 9ee5f6587cecbd3171cc7364010b10052821b307fde0ca32b5ef0767410dcc25 not found: ID does not exist" Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.822283 4933 scope.go:117] "RemoveContainer" containerID="b3e12944c03e5f1f6117aa288144e81510032125826b178c4c1bb3027f3c1f67" Jan 22 08:33:46 crc kubenswrapper[4933]: E0122 08:33:46.823029 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3e12944c03e5f1f6117aa288144e81510032125826b178c4c1bb3027f3c1f67\": container with ID starting with b3e12944c03e5f1f6117aa288144e81510032125826b178c4c1bb3027f3c1f67 not found: ID does not exist" containerID="b3e12944c03e5f1f6117aa288144e81510032125826b178c4c1bb3027f3c1f67" Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.823162 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3e12944c03e5f1f6117aa288144e81510032125826b178c4c1bb3027f3c1f67"} err="failed to get container status \"b3e12944c03e5f1f6117aa288144e81510032125826b178c4c1bb3027f3c1f67\": rpc error: code = NotFound desc = could not find container \"b3e12944c03e5f1f6117aa288144e81510032125826b178c4c1bb3027f3c1f67\": container with ID starting with b3e12944c03e5f1f6117aa288144e81510032125826b178c4c1bb3027f3c1f67 not found: ID does not exist" Jan 22 08:33:46 crc kubenswrapper[4933]: I0122 08:33:46.834857 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d99f47c4-f7e0-498a-89f6-1775c3ca14fa-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 08:33:47 crc kubenswrapper[4933]: I0122 08:33:47.109513 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2mvx2"] Jan 22 08:33:47 crc kubenswrapper[4933]: I0122 08:33:47.125999 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2mvx2"] Jan 22 08:33:48 crc kubenswrapper[4933]: I0122 08:33:48.503140 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d99f47c4-f7e0-498a-89f6-1775c3ca14fa" path="/var/lib/kubelet/pods/d99f47c4-f7e0-498a-89f6-1775c3ca14fa/volumes" Jan 22 08:34:10 crc kubenswrapper[4933]: I0122 08:34:10.946650 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:34:10 crc kubenswrapper[4933]: I0122 08:34:10.947125 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:34:40 crc kubenswrapper[4933]: I0122 08:34:40.943247 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:34:40 crc kubenswrapper[4933]: I0122 08:34:40.943658 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:34:40 crc kubenswrapper[4933]: I0122 08:34:40.943700 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 08:34:40 crc kubenswrapper[4933]: I0122 08:34:40.944423 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cc5483118f56c2d5bcedb4f7bb24619d1dd7c18eb657447fb845206c794d2671"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 08:34:40 crc kubenswrapper[4933]: I0122 08:34:40.944468 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://cc5483118f56c2d5bcedb4f7bb24619d1dd7c18eb657447fb845206c794d2671" gracePeriod=600 Jan 22 08:34:41 crc kubenswrapper[4933]: I0122 08:34:41.402164 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="cc5483118f56c2d5bcedb4f7bb24619d1dd7c18eb657447fb845206c794d2671" exitCode=0 Jan 22 08:34:41 crc kubenswrapper[4933]: I0122 08:34:41.402261 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"cc5483118f56c2d5bcedb4f7bb24619d1dd7c18eb657447fb845206c794d2671"} Jan 22 08:34:41 crc kubenswrapper[4933]: I0122 08:34:41.402741 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9"} Jan 22 08:34:41 crc kubenswrapper[4933]: I0122 08:34:41.402772 4933 scope.go:117] "RemoveContainer" containerID="d024738decad5f259fa929605621f730ec76d348d7c6c49f2ca52c4e6c41f29e" Jan 22 08:37:10 crc kubenswrapper[4933]: I0122 08:37:10.943698 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:37:10 crc kubenswrapper[4933]: I0122 08:37:10.944269 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:37:40 crc kubenswrapper[4933]: I0122 08:37:40.942613 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:37:40 crc kubenswrapper[4933]: I0122 08:37:40.943171 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:38:10 crc kubenswrapper[4933]: I0122 08:38:10.943204 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:38:10 crc kubenswrapper[4933]: I0122 08:38:10.943614 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:38:10 crc kubenswrapper[4933]: I0122 08:38:10.943649 4933 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" Jan 22 08:38:10 crc kubenswrapper[4933]: I0122 08:38:10.944561 4933 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9"} pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 08:38:10 crc kubenswrapper[4933]: I0122 08:38:10.944602 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" containerID="cri-o://63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" gracePeriod=600 Jan 22 08:38:11 crc kubenswrapper[4933]: E0122 08:38:11.700981 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:38:11 crc kubenswrapper[4933]: I0122 08:38:11.928445 4933 generic.go:334] "Generic (PLEG): container finished" podID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" exitCode=0 Jan 22 08:38:11 crc kubenswrapper[4933]: I0122 08:38:11.928514 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerDied","Data":"63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9"} Jan 22 08:38:11 crc kubenswrapper[4933]: I0122 08:38:11.928571 4933 scope.go:117] "RemoveContainer" containerID="cc5483118f56c2d5bcedb4f7bb24619d1dd7c18eb657447fb845206c794d2671" Jan 22 08:38:11 crc kubenswrapper[4933]: I0122 08:38:11.929459 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:38:11 crc kubenswrapper[4933]: E0122 08:38:11.929930 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:38:25 crc kubenswrapper[4933]: I0122 08:38:25.494627 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:38:25 crc kubenswrapper[4933]: E0122 08:38:25.495714 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:38:37 crc kubenswrapper[4933]: I0122 08:38:37.491490 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:38:37 crc kubenswrapper[4933]: E0122 08:38:37.492224 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:38:48 crc kubenswrapper[4933]: I0122 08:38:48.495693 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:38:48 crc kubenswrapper[4933]: E0122 08:38:48.496420 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:39:00 crc kubenswrapper[4933]: I0122 08:39:00.493619 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:39:00 crc kubenswrapper[4933]: E0122 08:39:00.494783 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:39:13 crc kubenswrapper[4933]: I0122 08:39:13.491573 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:39:13 crc kubenswrapper[4933]: E0122 08:39:13.492270 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:39:28 crc kubenswrapper[4933]: I0122 08:39:28.492500 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:39:28 crc kubenswrapper[4933]: E0122 08:39:28.493283 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:39:39 crc kubenswrapper[4933]: I0122 08:39:39.492008 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:39:39 crc kubenswrapper[4933]: E0122 08:39:39.493041 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:39:53 crc kubenswrapper[4933]: I0122 08:39:53.490991 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:39:53 crc kubenswrapper[4933]: E0122 08:39:53.491644 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:40:08 crc kubenswrapper[4933]: I0122 08:40:08.491392 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:40:08 crc kubenswrapper[4933]: E0122 08:40:08.492139 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:40:23 crc kubenswrapper[4933]: I0122 08:40:23.491633 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:40:23 crc kubenswrapper[4933]: E0122 08:40:23.492973 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:40:35 crc kubenswrapper[4933]: I0122 08:40:35.490794 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:40:35 crc kubenswrapper[4933]: E0122 08:40:35.491967 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:40:49 crc kubenswrapper[4933]: I0122 08:40:49.490378 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:40:49 crc kubenswrapper[4933]: E0122 08:40:49.491187 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:41:02 crc kubenswrapper[4933]: I0122 08:41:02.517711 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:41:02 crc kubenswrapper[4933]: E0122 08:41:02.520457 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:41:17 crc kubenswrapper[4933]: I0122 08:41:17.492236 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:41:17 crc kubenswrapper[4933]: E0122 08:41:17.494005 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:41:24 crc kubenswrapper[4933]: I0122 08:41:24.511172 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vmkjs"] Jan 22 08:41:24 crc kubenswrapper[4933]: E0122 08:41:24.512146 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d99f47c4-f7e0-498a-89f6-1775c3ca14fa" containerName="extract-utilities" Jan 22 08:41:24 crc kubenswrapper[4933]: I0122 08:41:24.512165 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d99f47c4-f7e0-498a-89f6-1775c3ca14fa" containerName="extract-utilities" Jan 22 08:41:24 crc kubenswrapper[4933]: E0122 08:41:24.512206 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d99f47c4-f7e0-498a-89f6-1775c3ca14fa" containerName="extract-content" Jan 22 08:41:24 crc kubenswrapper[4933]: I0122 08:41:24.512215 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d99f47c4-f7e0-498a-89f6-1775c3ca14fa" containerName="extract-content" Jan 22 08:41:24 crc kubenswrapper[4933]: E0122 08:41:24.512237 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d99f47c4-f7e0-498a-89f6-1775c3ca14fa" containerName="registry-server" Jan 22 08:41:24 crc kubenswrapper[4933]: I0122 08:41:24.512244 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="d99f47c4-f7e0-498a-89f6-1775c3ca14fa" containerName="registry-server" Jan 22 08:41:24 crc kubenswrapper[4933]: I0122 08:41:24.512498 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="d99f47c4-f7e0-498a-89f6-1775c3ca14fa" containerName="registry-server" Jan 22 08:41:24 crc kubenswrapper[4933]: I0122 08:41:24.514336 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vmkjs" Jan 22 08:41:24 crc kubenswrapper[4933]: I0122 08:41:24.532433 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vmkjs"] Jan 22 08:41:24 crc kubenswrapper[4933]: I0122 08:41:24.570414 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pw77d\" (UniqueName: \"kubernetes.io/projected/bef1f1e0-7f15-4a32-ac4e-838c658ab78c-kube-api-access-pw77d\") pod \"redhat-marketplace-vmkjs\" (UID: \"bef1f1e0-7f15-4a32-ac4e-838c658ab78c\") " pod="openshift-marketplace/redhat-marketplace-vmkjs" Jan 22 08:41:24 crc kubenswrapper[4933]: I0122 08:41:24.571047 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bef1f1e0-7f15-4a32-ac4e-838c658ab78c-utilities\") pod \"redhat-marketplace-vmkjs\" (UID: \"bef1f1e0-7f15-4a32-ac4e-838c658ab78c\") " pod="openshift-marketplace/redhat-marketplace-vmkjs" Jan 22 08:41:24 crc kubenswrapper[4933]: I0122 08:41:24.571261 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bef1f1e0-7f15-4a32-ac4e-838c658ab78c-catalog-content\") pod \"redhat-marketplace-vmkjs\" (UID: \"bef1f1e0-7f15-4a32-ac4e-838c658ab78c\") " pod="openshift-marketplace/redhat-marketplace-vmkjs" Jan 22 08:41:24 crc kubenswrapper[4933]: I0122 08:41:24.672507 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bef1f1e0-7f15-4a32-ac4e-838c658ab78c-utilities\") pod \"redhat-marketplace-vmkjs\" (UID: \"bef1f1e0-7f15-4a32-ac4e-838c658ab78c\") " pod="openshift-marketplace/redhat-marketplace-vmkjs" Jan 22 08:41:24 crc kubenswrapper[4933]: I0122 08:41:24.672654 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bef1f1e0-7f15-4a32-ac4e-838c658ab78c-catalog-content\") pod \"redhat-marketplace-vmkjs\" (UID: \"bef1f1e0-7f15-4a32-ac4e-838c658ab78c\") " pod="openshift-marketplace/redhat-marketplace-vmkjs" Jan 22 08:41:24 crc kubenswrapper[4933]: I0122 08:41:24.672708 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pw77d\" (UniqueName: \"kubernetes.io/projected/bef1f1e0-7f15-4a32-ac4e-838c658ab78c-kube-api-access-pw77d\") pod \"redhat-marketplace-vmkjs\" (UID: \"bef1f1e0-7f15-4a32-ac4e-838c658ab78c\") " pod="openshift-marketplace/redhat-marketplace-vmkjs" Jan 22 08:41:24 crc kubenswrapper[4933]: I0122 08:41:24.673193 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bef1f1e0-7f15-4a32-ac4e-838c658ab78c-utilities\") pod \"redhat-marketplace-vmkjs\" (UID: \"bef1f1e0-7f15-4a32-ac4e-838c658ab78c\") " pod="openshift-marketplace/redhat-marketplace-vmkjs" Jan 22 08:41:24 crc kubenswrapper[4933]: I0122 08:41:24.673212 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bef1f1e0-7f15-4a32-ac4e-838c658ab78c-catalog-content\") pod \"redhat-marketplace-vmkjs\" (UID: \"bef1f1e0-7f15-4a32-ac4e-838c658ab78c\") " pod="openshift-marketplace/redhat-marketplace-vmkjs" Jan 22 08:41:24 crc kubenswrapper[4933]: I0122 08:41:24.700703 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pw77d\" (UniqueName: \"kubernetes.io/projected/bef1f1e0-7f15-4a32-ac4e-838c658ab78c-kube-api-access-pw77d\") pod \"redhat-marketplace-vmkjs\" (UID: \"bef1f1e0-7f15-4a32-ac4e-838c658ab78c\") " pod="openshift-marketplace/redhat-marketplace-vmkjs" Jan 22 08:41:24 crc kubenswrapper[4933]: I0122 08:41:24.850106 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vmkjs" Jan 22 08:41:25 crc kubenswrapper[4933]: I0122 08:41:25.734747 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vmkjs"] Jan 22 08:41:25 crc kubenswrapper[4933]: I0122 08:41:25.915580 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-j62k2"] Jan 22 08:41:25 crc kubenswrapper[4933]: I0122 08:41:25.919229 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j62k2" Jan 22 08:41:25 crc kubenswrapper[4933]: I0122 08:41:25.927308 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j62k2"] Jan 22 08:41:26 crc kubenswrapper[4933]: I0122 08:41:26.005321 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2b220e3-c399-42ab-8c7a-11ff4b494f6d-utilities\") pod \"community-operators-j62k2\" (UID: \"b2b220e3-c399-42ab-8c7a-11ff4b494f6d\") " pod="openshift-marketplace/community-operators-j62k2" Jan 22 08:41:26 crc kubenswrapper[4933]: I0122 08:41:26.005607 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2b220e3-c399-42ab-8c7a-11ff4b494f6d-catalog-content\") pod \"community-operators-j62k2\" (UID: \"b2b220e3-c399-42ab-8c7a-11ff4b494f6d\") " pod="openshift-marketplace/community-operators-j62k2" Jan 22 08:41:26 crc kubenswrapper[4933]: I0122 08:41:26.005809 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2sch\" (UniqueName: \"kubernetes.io/projected/b2b220e3-c399-42ab-8c7a-11ff4b494f6d-kube-api-access-m2sch\") pod \"community-operators-j62k2\" (UID: \"b2b220e3-c399-42ab-8c7a-11ff4b494f6d\") " pod="openshift-marketplace/community-operators-j62k2" Jan 22 08:41:26 crc kubenswrapper[4933]: I0122 08:41:26.108605 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2sch\" (UniqueName: \"kubernetes.io/projected/b2b220e3-c399-42ab-8c7a-11ff4b494f6d-kube-api-access-m2sch\") pod \"community-operators-j62k2\" (UID: \"b2b220e3-c399-42ab-8c7a-11ff4b494f6d\") " pod="openshift-marketplace/community-operators-j62k2" Jan 22 08:41:26 crc kubenswrapper[4933]: I0122 08:41:26.109049 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2b220e3-c399-42ab-8c7a-11ff4b494f6d-utilities\") pod \"community-operators-j62k2\" (UID: \"b2b220e3-c399-42ab-8c7a-11ff4b494f6d\") " pod="openshift-marketplace/community-operators-j62k2" Jan 22 08:41:26 crc kubenswrapper[4933]: I0122 08:41:26.109183 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2b220e3-c399-42ab-8c7a-11ff4b494f6d-catalog-content\") pod \"community-operators-j62k2\" (UID: \"b2b220e3-c399-42ab-8c7a-11ff4b494f6d\") " pod="openshift-marketplace/community-operators-j62k2" Jan 22 08:41:26 crc kubenswrapper[4933]: I0122 08:41:26.109646 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2b220e3-c399-42ab-8c7a-11ff4b494f6d-utilities\") pod \"community-operators-j62k2\" (UID: \"b2b220e3-c399-42ab-8c7a-11ff4b494f6d\") " pod="openshift-marketplace/community-operators-j62k2" Jan 22 08:41:26 crc kubenswrapper[4933]: I0122 08:41:26.109831 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2b220e3-c399-42ab-8c7a-11ff4b494f6d-catalog-content\") pod \"community-operators-j62k2\" (UID: \"b2b220e3-c399-42ab-8c7a-11ff4b494f6d\") " pod="openshift-marketplace/community-operators-j62k2" Jan 22 08:41:26 crc kubenswrapper[4933]: I0122 08:41:26.140326 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2sch\" (UniqueName: \"kubernetes.io/projected/b2b220e3-c399-42ab-8c7a-11ff4b494f6d-kube-api-access-m2sch\") pod \"community-operators-j62k2\" (UID: \"b2b220e3-c399-42ab-8c7a-11ff4b494f6d\") " pod="openshift-marketplace/community-operators-j62k2" Jan 22 08:41:26 crc kubenswrapper[4933]: I0122 08:41:26.239578 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j62k2" Jan 22 08:41:26 crc kubenswrapper[4933]: I0122 08:41:26.345946 4933 generic.go:334] "Generic (PLEG): container finished" podID="bef1f1e0-7f15-4a32-ac4e-838c658ab78c" containerID="7809750b280f5c65a249e9b94d201bd1d867093bc4ed044847734a6927529e04" exitCode=0 Jan 22 08:41:26 crc kubenswrapper[4933]: I0122 08:41:26.345989 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vmkjs" event={"ID":"bef1f1e0-7f15-4a32-ac4e-838c658ab78c","Type":"ContainerDied","Data":"7809750b280f5c65a249e9b94d201bd1d867093bc4ed044847734a6927529e04"} Jan 22 08:41:26 crc kubenswrapper[4933]: I0122 08:41:26.346015 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vmkjs" event={"ID":"bef1f1e0-7f15-4a32-ac4e-838c658ab78c","Type":"ContainerStarted","Data":"e1c92c2cfcd0df48897d63d295d935c2fcf515fe3840aeee2194c5f3271f72d7"} Jan 22 08:41:26 crc kubenswrapper[4933]: I0122 08:41:26.366582 4933 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 08:41:26 crc kubenswrapper[4933]: I0122 08:41:26.765312 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j62k2"] Jan 22 08:41:27 crc kubenswrapper[4933]: I0122 08:41:27.365673 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vmkjs" event={"ID":"bef1f1e0-7f15-4a32-ac4e-838c658ab78c","Type":"ContainerStarted","Data":"b8ade3e8f1cf3ae1e9bd6959e4931b28b9716f9bded0550a712845c443a9eae9"} Jan 22 08:41:27 crc kubenswrapper[4933]: I0122 08:41:27.368839 4933 generic.go:334] "Generic (PLEG): container finished" podID="b2b220e3-c399-42ab-8c7a-11ff4b494f6d" containerID="e06409c7289130a2bcbfdec210ec8e306bdb34780a9b86e94d7c96520c7d65c7" exitCode=0 Jan 22 08:41:27 crc kubenswrapper[4933]: I0122 08:41:27.368886 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j62k2" event={"ID":"b2b220e3-c399-42ab-8c7a-11ff4b494f6d","Type":"ContainerDied","Data":"e06409c7289130a2bcbfdec210ec8e306bdb34780a9b86e94d7c96520c7d65c7"} Jan 22 08:41:27 crc kubenswrapper[4933]: I0122 08:41:27.368912 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j62k2" event={"ID":"b2b220e3-c399-42ab-8c7a-11ff4b494f6d","Type":"ContainerStarted","Data":"028f8824eb210ab7370e89c024098d6ba526bc6f3ed1a6d732707354c288c861"} Jan 22 08:41:28 crc kubenswrapper[4933]: I0122 08:41:28.389000 4933 generic.go:334] "Generic (PLEG): container finished" podID="bef1f1e0-7f15-4a32-ac4e-838c658ab78c" containerID="b8ade3e8f1cf3ae1e9bd6959e4931b28b9716f9bded0550a712845c443a9eae9" exitCode=0 Jan 22 08:41:28 crc kubenswrapper[4933]: I0122 08:41:28.389144 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vmkjs" event={"ID":"bef1f1e0-7f15-4a32-ac4e-838c658ab78c","Type":"ContainerDied","Data":"b8ade3e8f1cf3ae1e9bd6959e4931b28b9716f9bded0550a712845c443a9eae9"} Jan 22 08:41:28 crc kubenswrapper[4933]: I0122 08:41:28.393500 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j62k2" event={"ID":"b2b220e3-c399-42ab-8c7a-11ff4b494f6d","Type":"ContainerStarted","Data":"4e7f07ad3a73c80003e6ee3249d078a59d21d665770500673e37cd5b75be6669"} Jan 22 08:41:29 crc kubenswrapper[4933]: I0122 08:41:29.405648 4933 generic.go:334] "Generic (PLEG): container finished" podID="b2b220e3-c399-42ab-8c7a-11ff4b494f6d" containerID="4e7f07ad3a73c80003e6ee3249d078a59d21d665770500673e37cd5b75be6669" exitCode=0 Jan 22 08:41:29 crc kubenswrapper[4933]: I0122 08:41:29.405869 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j62k2" event={"ID":"b2b220e3-c399-42ab-8c7a-11ff4b494f6d","Type":"ContainerDied","Data":"4e7f07ad3a73c80003e6ee3249d078a59d21d665770500673e37cd5b75be6669"} Jan 22 08:41:30 crc kubenswrapper[4933]: I0122 08:41:30.418206 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vmkjs" event={"ID":"bef1f1e0-7f15-4a32-ac4e-838c658ab78c","Type":"ContainerStarted","Data":"e1cf229eb9d12f0598a72b2903f9c42a206dad7bc52dd1be099069eada36895c"} Jan 22 08:41:30 crc kubenswrapper[4933]: I0122 08:41:30.424189 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j62k2" event={"ID":"b2b220e3-c399-42ab-8c7a-11ff4b494f6d","Type":"ContainerStarted","Data":"437c27aecff1562e503549cfc31a649f87fc8ebc10491a3a8e0f506c89e4054a"} Jan 22 08:41:30 crc kubenswrapper[4933]: I0122 08:41:30.450935 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vmkjs" podStartSLOduration=3.199465889 podStartE2EDuration="6.450918291s" podCreationTimestamp="2026-01-22 08:41:24 +0000 UTC" firstStartedPulling="2026-01-22 08:41:26.366272459 +0000 UTC m=+10534.203397812" lastFinishedPulling="2026-01-22 08:41:29.617724851 +0000 UTC m=+10537.454850214" observedRunningTime="2026-01-22 08:41:30.445293905 +0000 UTC m=+10538.282419268" watchObservedRunningTime="2026-01-22 08:41:30.450918291 +0000 UTC m=+10538.288043644" Jan 22 08:41:30 crc kubenswrapper[4933]: I0122 08:41:30.463986 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-j62k2" podStartSLOduration=2.9545175710000002 podStartE2EDuration="5.463962898s" podCreationTimestamp="2026-01-22 08:41:25 +0000 UTC" firstStartedPulling="2026-01-22 08:41:27.370110521 +0000 UTC m=+10535.207235864" lastFinishedPulling="2026-01-22 08:41:29.879555838 +0000 UTC m=+10537.716681191" observedRunningTime="2026-01-22 08:41:30.461691412 +0000 UTC m=+10538.298816765" watchObservedRunningTime="2026-01-22 08:41:30.463962898 +0000 UTC m=+10538.301088271" Jan 22 08:41:30 crc kubenswrapper[4933]: I0122 08:41:30.492162 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:41:30 crc kubenswrapper[4933]: E0122 08:41:30.492419 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:41:34 crc kubenswrapper[4933]: I0122 08:41:34.850461 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vmkjs" Jan 22 08:41:34 crc kubenswrapper[4933]: I0122 08:41:34.850976 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vmkjs" Jan 22 08:41:34 crc kubenswrapper[4933]: I0122 08:41:34.907678 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vmkjs" Jan 22 08:41:35 crc kubenswrapper[4933]: I0122 08:41:35.534020 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vmkjs" Jan 22 08:41:36 crc kubenswrapper[4933]: I0122 08:41:36.240152 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-j62k2" Jan 22 08:41:36 crc kubenswrapper[4933]: I0122 08:41:36.241341 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-j62k2" Jan 22 08:41:36 crc kubenswrapper[4933]: I0122 08:41:36.452756 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-j62k2" Jan 22 08:41:36 crc kubenswrapper[4933]: I0122 08:41:36.563262 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-j62k2" Jan 22 08:41:36 crc kubenswrapper[4933]: I0122 08:41:36.900370 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vmkjs"] Jan 22 08:41:37 crc kubenswrapper[4933]: I0122 08:41:37.506138 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vmkjs" podUID="bef1f1e0-7f15-4a32-ac4e-838c658ab78c" containerName="registry-server" containerID="cri-o://e1cf229eb9d12f0598a72b2903f9c42a206dad7bc52dd1be099069eada36895c" gracePeriod=2 Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.052340 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vmkjs" Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.204442 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bef1f1e0-7f15-4a32-ac4e-838c658ab78c-catalog-content\") pod \"bef1f1e0-7f15-4a32-ac4e-838c658ab78c\" (UID: \"bef1f1e0-7f15-4a32-ac4e-838c658ab78c\") " Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.204972 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bef1f1e0-7f15-4a32-ac4e-838c658ab78c-utilities\") pod \"bef1f1e0-7f15-4a32-ac4e-838c658ab78c\" (UID: \"bef1f1e0-7f15-4a32-ac4e-838c658ab78c\") " Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.205045 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pw77d\" (UniqueName: \"kubernetes.io/projected/bef1f1e0-7f15-4a32-ac4e-838c658ab78c-kube-api-access-pw77d\") pod \"bef1f1e0-7f15-4a32-ac4e-838c658ab78c\" (UID: \"bef1f1e0-7f15-4a32-ac4e-838c658ab78c\") " Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.206242 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bef1f1e0-7f15-4a32-ac4e-838c658ab78c-utilities" (OuterVolumeSpecName: "utilities") pod "bef1f1e0-7f15-4a32-ac4e-838c658ab78c" (UID: "bef1f1e0-7f15-4a32-ac4e-838c658ab78c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.215307 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bef1f1e0-7f15-4a32-ac4e-838c658ab78c-kube-api-access-pw77d" (OuterVolumeSpecName: "kube-api-access-pw77d") pod "bef1f1e0-7f15-4a32-ac4e-838c658ab78c" (UID: "bef1f1e0-7f15-4a32-ac4e-838c658ab78c"). InnerVolumeSpecName "kube-api-access-pw77d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.224141 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bef1f1e0-7f15-4a32-ac4e-838c658ab78c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bef1f1e0-7f15-4a32-ac4e-838c658ab78c" (UID: "bef1f1e0-7f15-4a32-ac4e-838c658ab78c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.307147 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bef1f1e0-7f15-4a32-ac4e-838c658ab78c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.307184 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bef1f1e0-7f15-4a32-ac4e-838c658ab78c-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.307201 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pw77d\" (UniqueName: \"kubernetes.io/projected/bef1f1e0-7f15-4a32-ac4e-838c658ab78c-kube-api-access-pw77d\") on node \"crc\" DevicePath \"\"" Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.518740 4933 generic.go:334] "Generic (PLEG): container finished" podID="bef1f1e0-7f15-4a32-ac4e-838c658ab78c" containerID="e1cf229eb9d12f0598a72b2903f9c42a206dad7bc52dd1be099069eada36895c" exitCode=0 Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.518782 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vmkjs" event={"ID":"bef1f1e0-7f15-4a32-ac4e-838c658ab78c","Type":"ContainerDied","Data":"e1cf229eb9d12f0598a72b2903f9c42a206dad7bc52dd1be099069eada36895c"} Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.518833 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vmkjs" Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.518842 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vmkjs" event={"ID":"bef1f1e0-7f15-4a32-ac4e-838c658ab78c","Type":"ContainerDied","Data":"e1c92c2cfcd0df48897d63d295d935c2fcf515fe3840aeee2194c5f3271f72d7"} Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.518857 4933 scope.go:117] "RemoveContainer" containerID="e1cf229eb9d12f0598a72b2903f9c42a206dad7bc52dd1be099069eada36895c" Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.548614 4933 scope.go:117] "RemoveContainer" containerID="b8ade3e8f1cf3ae1e9bd6959e4931b28b9716f9bded0550a712845c443a9eae9" Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.551754 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vmkjs"] Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.578504 4933 scope.go:117] "RemoveContainer" containerID="7809750b280f5c65a249e9b94d201bd1d867093bc4ed044847734a6927529e04" Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.580193 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vmkjs"] Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.622538 4933 scope.go:117] "RemoveContainer" containerID="e1cf229eb9d12f0598a72b2903f9c42a206dad7bc52dd1be099069eada36895c" Jan 22 08:41:38 crc kubenswrapper[4933]: E0122 08:41:38.623209 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1cf229eb9d12f0598a72b2903f9c42a206dad7bc52dd1be099069eada36895c\": container with ID starting with e1cf229eb9d12f0598a72b2903f9c42a206dad7bc52dd1be099069eada36895c not found: ID does not exist" containerID="e1cf229eb9d12f0598a72b2903f9c42a206dad7bc52dd1be099069eada36895c" Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.623252 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1cf229eb9d12f0598a72b2903f9c42a206dad7bc52dd1be099069eada36895c"} err="failed to get container status \"e1cf229eb9d12f0598a72b2903f9c42a206dad7bc52dd1be099069eada36895c\": rpc error: code = NotFound desc = could not find container \"e1cf229eb9d12f0598a72b2903f9c42a206dad7bc52dd1be099069eada36895c\": container with ID starting with e1cf229eb9d12f0598a72b2903f9c42a206dad7bc52dd1be099069eada36895c not found: ID does not exist" Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.623278 4933 scope.go:117] "RemoveContainer" containerID="b8ade3e8f1cf3ae1e9bd6959e4931b28b9716f9bded0550a712845c443a9eae9" Jan 22 08:41:38 crc kubenswrapper[4933]: E0122 08:41:38.623747 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8ade3e8f1cf3ae1e9bd6959e4931b28b9716f9bded0550a712845c443a9eae9\": container with ID starting with b8ade3e8f1cf3ae1e9bd6959e4931b28b9716f9bded0550a712845c443a9eae9 not found: ID does not exist" containerID="b8ade3e8f1cf3ae1e9bd6959e4931b28b9716f9bded0550a712845c443a9eae9" Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.623809 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8ade3e8f1cf3ae1e9bd6959e4931b28b9716f9bded0550a712845c443a9eae9"} err="failed to get container status \"b8ade3e8f1cf3ae1e9bd6959e4931b28b9716f9bded0550a712845c443a9eae9\": rpc error: code = NotFound desc = could not find container \"b8ade3e8f1cf3ae1e9bd6959e4931b28b9716f9bded0550a712845c443a9eae9\": container with ID starting with b8ade3e8f1cf3ae1e9bd6959e4931b28b9716f9bded0550a712845c443a9eae9 not found: ID does not exist" Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.623853 4933 scope.go:117] "RemoveContainer" containerID="7809750b280f5c65a249e9b94d201bd1d867093bc4ed044847734a6927529e04" Jan 22 08:41:38 crc kubenswrapper[4933]: E0122 08:41:38.624285 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7809750b280f5c65a249e9b94d201bd1d867093bc4ed044847734a6927529e04\": container with ID starting with 7809750b280f5c65a249e9b94d201bd1d867093bc4ed044847734a6927529e04 not found: ID does not exist" containerID="7809750b280f5c65a249e9b94d201bd1d867093bc4ed044847734a6927529e04" Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.624343 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7809750b280f5c65a249e9b94d201bd1d867093bc4ed044847734a6927529e04"} err="failed to get container status \"7809750b280f5c65a249e9b94d201bd1d867093bc4ed044847734a6927529e04\": rpc error: code = NotFound desc = could not find container \"7809750b280f5c65a249e9b94d201bd1d867093bc4ed044847734a6927529e04\": container with ID starting with 7809750b280f5c65a249e9b94d201bd1d867093bc4ed044847734a6927529e04 not found: ID does not exist" Jan 22 08:41:38 crc kubenswrapper[4933]: I0122 08:41:38.696804 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-j62k2"] Jan 22 08:41:39 crc kubenswrapper[4933]: I0122 08:41:39.534612 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-j62k2" podUID="b2b220e3-c399-42ab-8c7a-11ff4b494f6d" containerName="registry-server" containerID="cri-o://437c27aecff1562e503549cfc31a649f87fc8ebc10491a3a8e0f506c89e4054a" gracePeriod=2 Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.111343 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j62k2" Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.148276 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2sch\" (UniqueName: \"kubernetes.io/projected/b2b220e3-c399-42ab-8c7a-11ff4b494f6d-kube-api-access-m2sch\") pod \"b2b220e3-c399-42ab-8c7a-11ff4b494f6d\" (UID: \"b2b220e3-c399-42ab-8c7a-11ff4b494f6d\") " Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.148691 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2b220e3-c399-42ab-8c7a-11ff4b494f6d-catalog-content\") pod \"b2b220e3-c399-42ab-8c7a-11ff4b494f6d\" (UID: \"b2b220e3-c399-42ab-8c7a-11ff4b494f6d\") " Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.162298 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2b220e3-c399-42ab-8c7a-11ff4b494f6d-kube-api-access-m2sch" (OuterVolumeSpecName: "kube-api-access-m2sch") pod "b2b220e3-c399-42ab-8c7a-11ff4b494f6d" (UID: "b2b220e3-c399-42ab-8c7a-11ff4b494f6d"). InnerVolumeSpecName "kube-api-access-m2sch". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.227172 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2b220e3-c399-42ab-8c7a-11ff4b494f6d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b2b220e3-c399-42ab-8c7a-11ff4b494f6d" (UID: "b2b220e3-c399-42ab-8c7a-11ff4b494f6d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.250842 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2b220e3-c399-42ab-8c7a-11ff4b494f6d-utilities\") pod \"b2b220e3-c399-42ab-8c7a-11ff4b494f6d\" (UID: \"b2b220e3-c399-42ab-8c7a-11ff4b494f6d\") " Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.251594 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2b220e3-c399-42ab-8c7a-11ff4b494f6d-utilities" (OuterVolumeSpecName: "utilities") pod "b2b220e3-c399-42ab-8c7a-11ff4b494f6d" (UID: "b2b220e3-c399-42ab-8c7a-11ff4b494f6d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.251994 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2b220e3-c399-42ab-8c7a-11ff4b494f6d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.252111 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2b220e3-c399-42ab-8c7a-11ff4b494f6d-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.252206 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2sch\" (UniqueName: \"kubernetes.io/projected/b2b220e3-c399-42ab-8c7a-11ff4b494f6d-kube-api-access-m2sch\") on node \"crc\" DevicePath \"\"" Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.502941 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bef1f1e0-7f15-4a32-ac4e-838c658ab78c" path="/var/lib/kubelet/pods/bef1f1e0-7f15-4a32-ac4e-838c658ab78c/volumes" Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.551730 4933 generic.go:334] "Generic (PLEG): container finished" podID="b2b220e3-c399-42ab-8c7a-11ff4b494f6d" containerID="437c27aecff1562e503549cfc31a649f87fc8ebc10491a3a8e0f506c89e4054a" exitCode=0 Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.551801 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j62k2" Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.551823 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j62k2" event={"ID":"b2b220e3-c399-42ab-8c7a-11ff4b494f6d","Type":"ContainerDied","Data":"437c27aecff1562e503549cfc31a649f87fc8ebc10491a3a8e0f506c89e4054a"} Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.553293 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j62k2" event={"ID":"b2b220e3-c399-42ab-8c7a-11ff4b494f6d","Type":"ContainerDied","Data":"028f8824eb210ab7370e89c024098d6ba526bc6f3ed1a6d732707354c288c861"} Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.553318 4933 scope.go:117] "RemoveContainer" containerID="437c27aecff1562e503549cfc31a649f87fc8ebc10491a3a8e0f506c89e4054a" Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.591297 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-j62k2"] Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.596086 4933 scope.go:117] "RemoveContainer" containerID="4e7f07ad3a73c80003e6ee3249d078a59d21d665770500673e37cd5b75be6669" Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.600729 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-j62k2"] Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.622738 4933 scope.go:117] "RemoveContainer" containerID="e06409c7289130a2bcbfdec210ec8e306bdb34780a9b86e94d7c96520c7d65c7" Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.713051 4933 scope.go:117] "RemoveContainer" containerID="437c27aecff1562e503549cfc31a649f87fc8ebc10491a3a8e0f506c89e4054a" Jan 22 08:41:40 crc kubenswrapper[4933]: E0122 08:41:40.713919 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"437c27aecff1562e503549cfc31a649f87fc8ebc10491a3a8e0f506c89e4054a\": container with ID starting with 437c27aecff1562e503549cfc31a649f87fc8ebc10491a3a8e0f506c89e4054a not found: ID does not exist" containerID="437c27aecff1562e503549cfc31a649f87fc8ebc10491a3a8e0f506c89e4054a" Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.713968 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"437c27aecff1562e503549cfc31a649f87fc8ebc10491a3a8e0f506c89e4054a"} err="failed to get container status \"437c27aecff1562e503549cfc31a649f87fc8ebc10491a3a8e0f506c89e4054a\": rpc error: code = NotFound desc = could not find container \"437c27aecff1562e503549cfc31a649f87fc8ebc10491a3a8e0f506c89e4054a\": container with ID starting with 437c27aecff1562e503549cfc31a649f87fc8ebc10491a3a8e0f506c89e4054a not found: ID does not exist" Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.713996 4933 scope.go:117] "RemoveContainer" containerID="4e7f07ad3a73c80003e6ee3249d078a59d21d665770500673e37cd5b75be6669" Jan 22 08:41:40 crc kubenswrapper[4933]: E0122 08:41:40.714565 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e7f07ad3a73c80003e6ee3249d078a59d21d665770500673e37cd5b75be6669\": container with ID starting with 4e7f07ad3a73c80003e6ee3249d078a59d21d665770500673e37cd5b75be6669 not found: ID does not exist" containerID="4e7f07ad3a73c80003e6ee3249d078a59d21d665770500673e37cd5b75be6669" Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.714688 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e7f07ad3a73c80003e6ee3249d078a59d21d665770500673e37cd5b75be6669"} err="failed to get container status \"4e7f07ad3a73c80003e6ee3249d078a59d21d665770500673e37cd5b75be6669\": rpc error: code = NotFound desc = could not find container \"4e7f07ad3a73c80003e6ee3249d078a59d21d665770500673e37cd5b75be6669\": container with ID starting with 4e7f07ad3a73c80003e6ee3249d078a59d21d665770500673e37cd5b75be6669 not found: ID does not exist" Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.714787 4933 scope.go:117] "RemoveContainer" containerID="e06409c7289130a2bcbfdec210ec8e306bdb34780a9b86e94d7c96520c7d65c7" Jan 22 08:41:40 crc kubenswrapper[4933]: E0122 08:41:40.715205 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e06409c7289130a2bcbfdec210ec8e306bdb34780a9b86e94d7c96520c7d65c7\": container with ID starting with e06409c7289130a2bcbfdec210ec8e306bdb34780a9b86e94d7c96520c7d65c7 not found: ID does not exist" containerID="e06409c7289130a2bcbfdec210ec8e306bdb34780a9b86e94d7c96520c7d65c7" Jan 22 08:41:40 crc kubenswrapper[4933]: I0122 08:41:40.715251 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e06409c7289130a2bcbfdec210ec8e306bdb34780a9b86e94d7c96520c7d65c7"} err="failed to get container status \"e06409c7289130a2bcbfdec210ec8e306bdb34780a9b86e94d7c96520c7d65c7\": rpc error: code = NotFound desc = could not find container \"e06409c7289130a2bcbfdec210ec8e306bdb34780a9b86e94d7c96520c7d65c7\": container with ID starting with e06409c7289130a2bcbfdec210ec8e306bdb34780a9b86e94d7c96520c7d65c7 not found: ID does not exist" Jan 22 08:41:41 crc kubenswrapper[4933]: I0122 08:41:41.491613 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:41:41 crc kubenswrapper[4933]: E0122 08:41:41.492012 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:41:42 crc kubenswrapper[4933]: I0122 08:41:42.507920 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2b220e3-c399-42ab-8c7a-11ff4b494f6d" path="/var/lib/kubelet/pods/b2b220e3-c399-42ab-8c7a-11ff4b494f6d/volumes" Jan 22 08:41:53 crc kubenswrapper[4933]: I0122 08:41:53.491141 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:41:53 crc kubenswrapper[4933]: E0122 08:41:53.491962 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:42:05 crc kubenswrapper[4933]: I0122 08:42:05.491678 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:42:05 crc kubenswrapper[4933]: E0122 08:42:05.493443 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.344175 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wj7kc"] Jan 22 08:42:06 crc kubenswrapper[4933]: E0122 08:42:06.345375 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bef1f1e0-7f15-4a32-ac4e-838c658ab78c" containerName="extract-content" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.345406 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="bef1f1e0-7f15-4a32-ac4e-838c658ab78c" containerName="extract-content" Jan 22 08:42:06 crc kubenswrapper[4933]: E0122 08:42:06.345453 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bef1f1e0-7f15-4a32-ac4e-838c658ab78c" containerName="extract-utilities" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.345466 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="bef1f1e0-7f15-4a32-ac4e-838c658ab78c" containerName="extract-utilities" Jan 22 08:42:06 crc kubenswrapper[4933]: E0122 08:42:06.345487 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2b220e3-c399-42ab-8c7a-11ff4b494f6d" containerName="registry-server" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.345501 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2b220e3-c399-42ab-8c7a-11ff4b494f6d" containerName="registry-server" Jan 22 08:42:06 crc kubenswrapper[4933]: E0122 08:42:06.345535 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2b220e3-c399-42ab-8c7a-11ff4b494f6d" containerName="extract-utilities" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.345547 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2b220e3-c399-42ab-8c7a-11ff4b494f6d" containerName="extract-utilities" Jan 22 08:42:06 crc kubenswrapper[4933]: E0122 08:42:06.345581 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2b220e3-c399-42ab-8c7a-11ff4b494f6d" containerName="extract-content" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.345592 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2b220e3-c399-42ab-8c7a-11ff4b494f6d" containerName="extract-content" Jan 22 08:42:06 crc kubenswrapper[4933]: E0122 08:42:06.345626 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bef1f1e0-7f15-4a32-ac4e-838c658ab78c" containerName="registry-server" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.345638 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="bef1f1e0-7f15-4a32-ac4e-838c658ab78c" containerName="registry-server" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.346060 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="bef1f1e0-7f15-4a32-ac4e-838c658ab78c" containerName="registry-server" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.346106 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2b220e3-c399-42ab-8c7a-11ff4b494f6d" containerName="registry-server" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.355282 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wj7kc" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.376402 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wj7kc"] Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.510855 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e-catalog-content\") pod \"certified-operators-wj7kc\" (UID: \"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e\") " pod="openshift-marketplace/certified-operators-wj7kc" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.511401 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e-utilities\") pod \"certified-operators-wj7kc\" (UID: \"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e\") " pod="openshift-marketplace/certified-operators-wj7kc" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.511607 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8bdg\" (UniqueName: \"kubernetes.io/projected/28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e-kube-api-access-k8bdg\") pod \"certified-operators-wj7kc\" (UID: \"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e\") " pod="openshift-marketplace/certified-operators-wj7kc" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.613350 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e-catalog-content\") pod \"certified-operators-wj7kc\" (UID: \"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e\") " pod="openshift-marketplace/certified-operators-wj7kc" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.613457 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e-utilities\") pod \"certified-operators-wj7kc\" (UID: \"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e\") " pod="openshift-marketplace/certified-operators-wj7kc" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.613679 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8bdg\" (UniqueName: \"kubernetes.io/projected/28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e-kube-api-access-k8bdg\") pod \"certified-operators-wj7kc\" (UID: \"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e\") " pod="openshift-marketplace/certified-operators-wj7kc" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.614386 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e-utilities\") pod \"certified-operators-wj7kc\" (UID: \"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e\") " pod="openshift-marketplace/certified-operators-wj7kc" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.614713 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e-catalog-content\") pod \"certified-operators-wj7kc\" (UID: \"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e\") " pod="openshift-marketplace/certified-operators-wj7kc" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.632776 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8bdg\" (UniqueName: \"kubernetes.io/projected/28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e-kube-api-access-k8bdg\") pod \"certified-operators-wj7kc\" (UID: \"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e\") " pod="openshift-marketplace/certified-operators-wj7kc" Jan 22 08:42:06 crc kubenswrapper[4933]: I0122 08:42:06.740803 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wj7kc" Jan 22 08:42:07 crc kubenswrapper[4933]: I0122 08:42:07.283247 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wj7kc"] Jan 22 08:42:07 crc kubenswrapper[4933]: I0122 08:42:07.887860 4933 generic.go:334] "Generic (PLEG): container finished" podID="28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e" containerID="396095ea7cb867e836887a8a8a4b929f197b3e1cbe99244b12b80d6a51a4af16" exitCode=0 Jan 22 08:42:07 crc kubenswrapper[4933]: I0122 08:42:07.887965 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wj7kc" event={"ID":"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e","Type":"ContainerDied","Data":"396095ea7cb867e836887a8a8a4b929f197b3e1cbe99244b12b80d6a51a4af16"} Jan 22 08:42:07 crc kubenswrapper[4933]: I0122 08:42:07.888258 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wj7kc" event={"ID":"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e","Type":"ContainerStarted","Data":"8c93c949a7e9395641072fee135c3779d1bb3ccbb8de2aab1b3cee8eb1a8b864"} Jan 22 08:42:09 crc kubenswrapper[4933]: I0122 08:42:09.919090 4933 generic.go:334] "Generic (PLEG): container finished" podID="28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e" containerID="481b8d2627861adc1dcc6be71486901a8fd6302911b6333b5be7c415946ce1b6" exitCode=0 Jan 22 08:42:09 crc kubenswrapper[4933]: I0122 08:42:09.919602 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wj7kc" event={"ID":"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e","Type":"ContainerDied","Data":"481b8d2627861adc1dcc6be71486901a8fd6302911b6333b5be7c415946ce1b6"} Jan 22 08:42:10 crc kubenswrapper[4933]: I0122 08:42:10.929057 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wj7kc" event={"ID":"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e","Type":"ContainerStarted","Data":"50336b1c3901735238196048df78366f9269039211a8835516a0ab89ec637380"} Jan 22 08:42:10 crc kubenswrapper[4933]: I0122 08:42:10.953941 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wj7kc" podStartSLOduration=2.5127145889999998 podStartE2EDuration="4.95392664s" podCreationTimestamp="2026-01-22 08:42:06 +0000 UTC" firstStartedPulling="2026-01-22 08:42:07.891954827 +0000 UTC m=+10575.729080180" lastFinishedPulling="2026-01-22 08:42:10.333166868 +0000 UTC m=+10578.170292231" observedRunningTime="2026-01-22 08:42:10.94610698 +0000 UTC m=+10578.783232333" watchObservedRunningTime="2026-01-22 08:42:10.95392664 +0000 UTC m=+10578.791051993" Jan 22 08:42:16 crc kubenswrapper[4933]: I0122 08:42:16.741537 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wj7kc" Jan 22 08:42:16 crc kubenswrapper[4933]: I0122 08:42:16.742360 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wj7kc" Jan 22 08:42:16 crc kubenswrapper[4933]: I0122 08:42:16.833311 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wj7kc" Jan 22 08:42:17 crc kubenswrapper[4933]: I0122 08:42:17.064978 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wj7kc" Jan 22 08:42:20 crc kubenswrapper[4933]: I0122 08:42:20.310842 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wj7kc"] Jan 22 08:42:20 crc kubenswrapper[4933]: I0122 08:42:20.311414 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wj7kc" podUID="28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e" containerName="registry-server" containerID="cri-o://50336b1c3901735238196048df78366f9269039211a8835516a0ab89ec637380" gracePeriod=2 Jan 22 08:42:20 crc kubenswrapper[4933]: I0122 08:42:20.491362 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:42:20 crc kubenswrapper[4933]: E0122 08:42:20.491676 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:42:21 crc kubenswrapper[4933]: I0122 08:42:21.050390 4933 generic.go:334] "Generic (PLEG): container finished" podID="28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e" containerID="50336b1c3901735238196048df78366f9269039211a8835516a0ab89ec637380" exitCode=0 Jan 22 08:42:21 crc kubenswrapper[4933]: I0122 08:42:21.050473 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wj7kc" event={"ID":"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e","Type":"ContainerDied","Data":"50336b1c3901735238196048df78366f9269039211a8835516a0ab89ec637380"} Jan 22 08:42:21 crc kubenswrapper[4933]: I0122 08:42:21.338805 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wj7kc" Jan 22 08:42:21 crc kubenswrapper[4933]: I0122 08:42:21.390697 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e-catalog-content\") pod \"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e\" (UID: \"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e\") " Jan 22 08:42:21 crc kubenswrapper[4933]: I0122 08:42:21.390831 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e-utilities\") pod \"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e\" (UID: \"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e\") " Jan 22 08:42:21 crc kubenswrapper[4933]: I0122 08:42:21.391615 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e-utilities" (OuterVolumeSpecName: "utilities") pod "28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e" (UID: "28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:42:21 crc kubenswrapper[4933]: I0122 08:42:21.391957 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k8bdg\" (UniqueName: \"kubernetes.io/projected/28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e-kube-api-access-k8bdg\") pod \"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e\" (UID: \"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e\") " Jan 22 08:42:21 crc kubenswrapper[4933]: I0122 08:42:21.393582 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 08:42:21 crc kubenswrapper[4933]: I0122 08:42:21.398659 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e-kube-api-access-k8bdg" (OuterVolumeSpecName: "kube-api-access-k8bdg") pod "28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e" (UID: "28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e"). InnerVolumeSpecName "kube-api-access-k8bdg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:42:21 crc kubenswrapper[4933]: I0122 08:42:21.445960 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e" (UID: "28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:42:21 crc kubenswrapper[4933]: I0122 08:42:21.495492 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k8bdg\" (UniqueName: \"kubernetes.io/projected/28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e-kube-api-access-k8bdg\") on node \"crc\" DevicePath \"\"" Jan 22 08:42:21 crc kubenswrapper[4933]: I0122 08:42:21.495528 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 08:42:22 crc kubenswrapper[4933]: I0122 08:42:22.064662 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wj7kc" event={"ID":"28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e","Type":"ContainerDied","Data":"8c93c949a7e9395641072fee135c3779d1bb3ccbb8de2aab1b3cee8eb1a8b864"} Jan 22 08:42:22 crc kubenswrapper[4933]: I0122 08:42:22.064709 4933 scope.go:117] "RemoveContainer" containerID="50336b1c3901735238196048df78366f9269039211a8835516a0ab89ec637380" Jan 22 08:42:22 crc kubenswrapper[4933]: I0122 08:42:22.064866 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wj7kc" Jan 22 08:42:22 crc kubenswrapper[4933]: I0122 08:42:22.110052 4933 scope.go:117] "RemoveContainer" containerID="481b8d2627861adc1dcc6be71486901a8fd6302911b6333b5be7c415946ce1b6" Jan 22 08:42:22 crc kubenswrapper[4933]: I0122 08:42:22.117914 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wj7kc"] Jan 22 08:42:22 crc kubenswrapper[4933]: I0122 08:42:22.132178 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wj7kc"] Jan 22 08:42:22 crc kubenswrapper[4933]: I0122 08:42:22.150892 4933 scope.go:117] "RemoveContainer" containerID="396095ea7cb867e836887a8a8a4b929f197b3e1cbe99244b12b80d6a51a4af16" Jan 22 08:42:22 crc kubenswrapper[4933]: I0122 08:42:22.526790 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e" path="/var/lib/kubelet/pods/28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e/volumes" Jan 22 08:42:31 crc kubenswrapper[4933]: I0122 08:42:31.491571 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:42:31 crc kubenswrapper[4933]: E0122 08:42:31.492625 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:42:44 crc kubenswrapper[4933]: I0122 08:42:44.491152 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:42:44 crc kubenswrapper[4933]: E0122 08:42:44.492259 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:42:55 crc kubenswrapper[4933]: I0122 08:42:55.491055 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:42:55 crc kubenswrapper[4933]: E0122 08:42:55.492296 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:43:09 crc kubenswrapper[4933]: I0122 08:43:09.492253 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:43:09 crc kubenswrapper[4933]: E0122 08:43:09.493403 4933 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zfnsx_openshift-machine-config-operator(70f2db1d-40cb-4864-917b-3b99f69cdafb)\"" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" Jan 22 08:43:24 crc kubenswrapper[4933]: I0122 08:43:24.494900 4933 scope.go:117] "RemoveContainer" containerID="63cf97f444dc53d17e48deb38656af6f61c8a7bcf242bdaf977d40fe1c5162f9" Jan 22 08:43:24 crc kubenswrapper[4933]: I0122 08:43:24.859419 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" event={"ID":"70f2db1d-40cb-4864-917b-3b99f69cdafb","Type":"ContainerStarted","Data":"3c3933dcd2546793605716e4261c0d4efbb730280e02b20566d5d9fb39cea2d2"} Jan 22 08:44:28 crc kubenswrapper[4933]: I0122 08:44:28.429782 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-474ch"] Jan 22 08:44:28 crc kubenswrapper[4933]: E0122 08:44:28.431134 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e" containerName="extract-content" Jan 22 08:44:28 crc kubenswrapper[4933]: I0122 08:44:28.431159 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e" containerName="extract-content" Jan 22 08:44:28 crc kubenswrapper[4933]: E0122 08:44:28.431509 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e" containerName="extract-utilities" Jan 22 08:44:28 crc kubenswrapper[4933]: I0122 08:44:28.431529 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e" containerName="extract-utilities" Jan 22 08:44:28 crc kubenswrapper[4933]: E0122 08:44:28.431552 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e" containerName="registry-server" Jan 22 08:44:28 crc kubenswrapper[4933]: I0122 08:44:28.431567 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e" containerName="registry-server" Jan 22 08:44:28 crc kubenswrapper[4933]: I0122 08:44:28.431896 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="28a4fa3b-5fea-46fd-9f4a-a53b81a59f9e" containerName="registry-server" Jan 22 08:44:28 crc kubenswrapper[4933]: I0122 08:44:28.434692 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-474ch" Jan 22 08:44:28 crc kubenswrapper[4933]: I0122 08:44:28.443163 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-474ch"] Jan 22 08:44:28 crc kubenswrapper[4933]: I0122 08:44:28.500398 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8bsx\" (UniqueName: \"kubernetes.io/projected/5a795294-6f52-4322-b4b2-0dfe02d08715-kube-api-access-l8bsx\") pod \"redhat-operators-474ch\" (UID: \"5a795294-6f52-4322-b4b2-0dfe02d08715\") " pod="openshift-marketplace/redhat-operators-474ch" Jan 22 08:44:28 crc kubenswrapper[4933]: I0122 08:44:28.500595 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a795294-6f52-4322-b4b2-0dfe02d08715-utilities\") pod \"redhat-operators-474ch\" (UID: \"5a795294-6f52-4322-b4b2-0dfe02d08715\") " pod="openshift-marketplace/redhat-operators-474ch" Jan 22 08:44:28 crc kubenswrapper[4933]: I0122 08:44:28.500665 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a795294-6f52-4322-b4b2-0dfe02d08715-catalog-content\") pod \"redhat-operators-474ch\" (UID: \"5a795294-6f52-4322-b4b2-0dfe02d08715\") " pod="openshift-marketplace/redhat-operators-474ch" Jan 22 08:44:28 crc kubenswrapper[4933]: I0122 08:44:28.603157 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a795294-6f52-4322-b4b2-0dfe02d08715-utilities\") pod \"redhat-operators-474ch\" (UID: \"5a795294-6f52-4322-b4b2-0dfe02d08715\") " pod="openshift-marketplace/redhat-operators-474ch" Jan 22 08:44:28 crc kubenswrapper[4933]: I0122 08:44:28.603265 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a795294-6f52-4322-b4b2-0dfe02d08715-catalog-content\") pod \"redhat-operators-474ch\" (UID: \"5a795294-6f52-4322-b4b2-0dfe02d08715\") " pod="openshift-marketplace/redhat-operators-474ch" Jan 22 08:44:28 crc kubenswrapper[4933]: I0122 08:44:28.603410 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8bsx\" (UniqueName: \"kubernetes.io/projected/5a795294-6f52-4322-b4b2-0dfe02d08715-kube-api-access-l8bsx\") pod \"redhat-operators-474ch\" (UID: \"5a795294-6f52-4322-b4b2-0dfe02d08715\") " pod="openshift-marketplace/redhat-operators-474ch" Jan 22 08:44:28 crc kubenswrapper[4933]: I0122 08:44:28.603680 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a795294-6f52-4322-b4b2-0dfe02d08715-utilities\") pod \"redhat-operators-474ch\" (UID: \"5a795294-6f52-4322-b4b2-0dfe02d08715\") " pod="openshift-marketplace/redhat-operators-474ch" Jan 22 08:44:28 crc kubenswrapper[4933]: I0122 08:44:28.604267 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a795294-6f52-4322-b4b2-0dfe02d08715-catalog-content\") pod \"redhat-operators-474ch\" (UID: \"5a795294-6f52-4322-b4b2-0dfe02d08715\") " pod="openshift-marketplace/redhat-operators-474ch" Jan 22 08:44:28 crc kubenswrapper[4933]: I0122 08:44:28.636616 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8bsx\" (UniqueName: \"kubernetes.io/projected/5a795294-6f52-4322-b4b2-0dfe02d08715-kube-api-access-l8bsx\") pod \"redhat-operators-474ch\" (UID: \"5a795294-6f52-4322-b4b2-0dfe02d08715\") " pod="openshift-marketplace/redhat-operators-474ch" Jan 22 08:44:28 crc kubenswrapper[4933]: I0122 08:44:28.766177 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-474ch" Jan 22 08:44:29 crc kubenswrapper[4933]: I0122 08:44:29.325164 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-474ch"] Jan 22 08:44:29 crc kubenswrapper[4933]: I0122 08:44:29.748725 4933 generic.go:334] "Generic (PLEG): container finished" podID="5a795294-6f52-4322-b4b2-0dfe02d08715" containerID="ecc016669637ff522a13526f469fca4a87e7257d94a929e38b9f4f8d3011d8ae" exitCode=0 Jan 22 08:44:29 crc kubenswrapper[4933]: I0122 08:44:29.751171 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-474ch" event={"ID":"5a795294-6f52-4322-b4b2-0dfe02d08715","Type":"ContainerDied","Data":"ecc016669637ff522a13526f469fca4a87e7257d94a929e38b9f4f8d3011d8ae"} Jan 22 08:44:29 crc kubenswrapper[4933]: I0122 08:44:29.751221 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-474ch" event={"ID":"5a795294-6f52-4322-b4b2-0dfe02d08715","Type":"ContainerStarted","Data":"d33531303b458dc6b96ebb447d209481d68a76ce74ab237c113949608ed7acc7"} Jan 22 08:44:30 crc kubenswrapper[4933]: I0122 08:44:30.763032 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-474ch" event={"ID":"5a795294-6f52-4322-b4b2-0dfe02d08715","Type":"ContainerStarted","Data":"51472c182d7b1e55f291378279ace0f07178674f4aa540de2400bedee28571aa"} Jan 22 08:44:33 crc kubenswrapper[4933]: I0122 08:44:33.799361 4933 generic.go:334] "Generic (PLEG): container finished" podID="5a795294-6f52-4322-b4b2-0dfe02d08715" containerID="51472c182d7b1e55f291378279ace0f07178674f4aa540de2400bedee28571aa" exitCode=0 Jan 22 08:44:33 crc kubenswrapper[4933]: I0122 08:44:33.800230 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-474ch" event={"ID":"5a795294-6f52-4322-b4b2-0dfe02d08715","Type":"ContainerDied","Data":"51472c182d7b1e55f291378279ace0f07178674f4aa540de2400bedee28571aa"} Jan 22 08:44:34 crc kubenswrapper[4933]: I0122 08:44:34.814854 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-474ch" event={"ID":"5a795294-6f52-4322-b4b2-0dfe02d08715","Type":"ContainerStarted","Data":"24d18eb5599e0cbaf9b47515b57ef639a985f2cfa2a0acf9dc1fc48f239267af"} Jan 22 08:44:34 crc kubenswrapper[4933]: I0122 08:44:34.846713 4933 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-474ch" podStartSLOduration=2.384793164 podStartE2EDuration="6.846687365s" podCreationTimestamp="2026-01-22 08:44:28 +0000 UTC" firstStartedPulling="2026-01-22 08:44:29.753978928 +0000 UTC m=+10717.591104281" lastFinishedPulling="2026-01-22 08:44:34.215873129 +0000 UTC m=+10722.052998482" observedRunningTime="2026-01-22 08:44:34.836299972 +0000 UTC m=+10722.673425365" watchObservedRunningTime="2026-01-22 08:44:34.846687365 +0000 UTC m=+10722.683812748" Jan 22 08:44:38 crc kubenswrapper[4933]: I0122 08:44:38.767229 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-474ch" Jan 22 08:44:38 crc kubenswrapper[4933]: I0122 08:44:38.767774 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-474ch" Jan 22 08:44:39 crc kubenswrapper[4933]: I0122 08:44:39.838918 4933 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-474ch" podUID="5a795294-6f52-4322-b4b2-0dfe02d08715" containerName="registry-server" probeResult="failure" output=< Jan 22 08:44:39 crc kubenswrapper[4933]: timeout: failed to connect service ":50051" within 1s Jan 22 08:44:39 crc kubenswrapper[4933]: > Jan 22 08:44:48 crc kubenswrapper[4933]: I0122 08:44:48.840048 4933 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-474ch" Jan 22 08:44:48 crc kubenswrapper[4933]: I0122 08:44:48.928966 4933 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-474ch" Jan 22 08:44:49 crc kubenswrapper[4933]: I0122 08:44:49.094243 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-474ch"] Jan 22 08:44:49 crc kubenswrapper[4933]: I0122 08:44:49.985671 4933 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-474ch" podUID="5a795294-6f52-4322-b4b2-0dfe02d08715" containerName="registry-server" containerID="cri-o://24d18eb5599e0cbaf9b47515b57ef639a985f2cfa2a0acf9dc1fc48f239267af" gracePeriod=2 Jan 22 08:44:50 crc kubenswrapper[4933]: I0122 08:44:50.531183 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-474ch" Jan 22 08:44:50 crc kubenswrapper[4933]: I0122 08:44:50.623837 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a795294-6f52-4322-b4b2-0dfe02d08715-utilities\") pod \"5a795294-6f52-4322-b4b2-0dfe02d08715\" (UID: \"5a795294-6f52-4322-b4b2-0dfe02d08715\") " Jan 22 08:44:50 crc kubenswrapper[4933]: I0122 08:44:50.624028 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a795294-6f52-4322-b4b2-0dfe02d08715-catalog-content\") pod \"5a795294-6f52-4322-b4b2-0dfe02d08715\" (UID: \"5a795294-6f52-4322-b4b2-0dfe02d08715\") " Jan 22 08:44:50 crc kubenswrapper[4933]: I0122 08:44:50.624188 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8bsx\" (UniqueName: \"kubernetes.io/projected/5a795294-6f52-4322-b4b2-0dfe02d08715-kube-api-access-l8bsx\") pod \"5a795294-6f52-4322-b4b2-0dfe02d08715\" (UID: \"5a795294-6f52-4322-b4b2-0dfe02d08715\") " Jan 22 08:44:50 crc kubenswrapper[4933]: I0122 08:44:50.624624 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a795294-6f52-4322-b4b2-0dfe02d08715-utilities" (OuterVolumeSpecName: "utilities") pod "5a795294-6f52-4322-b4b2-0dfe02d08715" (UID: "5a795294-6f52-4322-b4b2-0dfe02d08715"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:44:50 crc kubenswrapper[4933]: I0122 08:44:50.626018 4933 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a795294-6f52-4322-b4b2-0dfe02d08715-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 08:44:50 crc kubenswrapper[4933]: I0122 08:44:50.629873 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a795294-6f52-4322-b4b2-0dfe02d08715-kube-api-access-l8bsx" (OuterVolumeSpecName: "kube-api-access-l8bsx") pod "5a795294-6f52-4322-b4b2-0dfe02d08715" (UID: "5a795294-6f52-4322-b4b2-0dfe02d08715"). InnerVolumeSpecName "kube-api-access-l8bsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:44:50 crc kubenswrapper[4933]: I0122 08:44:50.728415 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8bsx\" (UniqueName: \"kubernetes.io/projected/5a795294-6f52-4322-b4b2-0dfe02d08715-kube-api-access-l8bsx\") on node \"crc\" DevicePath \"\"" Jan 22 08:44:50 crc kubenswrapper[4933]: I0122 08:44:50.762427 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a795294-6f52-4322-b4b2-0dfe02d08715-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5a795294-6f52-4322-b4b2-0dfe02d08715" (UID: "5a795294-6f52-4322-b4b2-0dfe02d08715"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 08:44:50 crc kubenswrapper[4933]: I0122 08:44:50.830121 4933 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a795294-6f52-4322-b4b2-0dfe02d08715-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 08:44:51 crc kubenswrapper[4933]: I0122 08:44:51.000844 4933 generic.go:334] "Generic (PLEG): container finished" podID="5a795294-6f52-4322-b4b2-0dfe02d08715" containerID="24d18eb5599e0cbaf9b47515b57ef639a985f2cfa2a0acf9dc1fc48f239267af" exitCode=0 Jan 22 08:44:51 crc kubenswrapper[4933]: I0122 08:44:51.000883 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-474ch" event={"ID":"5a795294-6f52-4322-b4b2-0dfe02d08715","Type":"ContainerDied","Data":"24d18eb5599e0cbaf9b47515b57ef639a985f2cfa2a0acf9dc1fc48f239267af"} Jan 22 08:44:51 crc kubenswrapper[4933]: I0122 08:44:51.000908 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-474ch" event={"ID":"5a795294-6f52-4322-b4b2-0dfe02d08715","Type":"ContainerDied","Data":"d33531303b458dc6b96ebb447d209481d68a76ce74ab237c113949608ed7acc7"} Jan 22 08:44:51 crc kubenswrapper[4933]: I0122 08:44:51.000924 4933 scope.go:117] "RemoveContainer" containerID="24d18eb5599e0cbaf9b47515b57ef639a985f2cfa2a0acf9dc1fc48f239267af" Jan 22 08:44:51 crc kubenswrapper[4933]: I0122 08:44:51.001051 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-474ch" Jan 22 08:44:51 crc kubenswrapper[4933]: I0122 08:44:51.024207 4933 scope.go:117] "RemoveContainer" containerID="51472c182d7b1e55f291378279ace0f07178674f4aa540de2400bedee28571aa" Jan 22 08:44:51 crc kubenswrapper[4933]: I0122 08:44:51.040660 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-474ch"] Jan 22 08:44:51 crc kubenswrapper[4933]: I0122 08:44:51.059276 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-474ch"] Jan 22 08:44:51 crc kubenswrapper[4933]: I0122 08:44:51.063410 4933 scope.go:117] "RemoveContainer" containerID="ecc016669637ff522a13526f469fca4a87e7257d94a929e38b9f4f8d3011d8ae" Jan 22 08:44:51 crc kubenswrapper[4933]: I0122 08:44:51.133803 4933 scope.go:117] "RemoveContainer" containerID="24d18eb5599e0cbaf9b47515b57ef639a985f2cfa2a0acf9dc1fc48f239267af" Jan 22 08:44:51 crc kubenswrapper[4933]: E0122 08:44:51.134644 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24d18eb5599e0cbaf9b47515b57ef639a985f2cfa2a0acf9dc1fc48f239267af\": container with ID starting with 24d18eb5599e0cbaf9b47515b57ef639a985f2cfa2a0acf9dc1fc48f239267af not found: ID does not exist" containerID="24d18eb5599e0cbaf9b47515b57ef639a985f2cfa2a0acf9dc1fc48f239267af" Jan 22 08:44:51 crc kubenswrapper[4933]: I0122 08:44:51.134711 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24d18eb5599e0cbaf9b47515b57ef639a985f2cfa2a0acf9dc1fc48f239267af"} err="failed to get container status \"24d18eb5599e0cbaf9b47515b57ef639a985f2cfa2a0acf9dc1fc48f239267af\": rpc error: code = NotFound desc = could not find container \"24d18eb5599e0cbaf9b47515b57ef639a985f2cfa2a0acf9dc1fc48f239267af\": container with ID starting with 24d18eb5599e0cbaf9b47515b57ef639a985f2cfa2a0acf9dc1fc48f239267af not found: ID does not exist" Jan 22 08:44:51 crc kubenswrapper[4933]: I0122 08:44:51.134757 4933 scope.go:117] "RemoveContainer" containerID="51472c182d7b1e55f291378279ace0f07178674f4aa540de2400bedee28571aa" Jan 22 08:44:51 crc kubenswrapper[4933]: E0122 08:44:51.135263 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51472c182d7b1e55f291378279ace0f07178674f4aa540de2400bedee28571aa\": container with ID starting with 51472c182d7b1e55f291378279ace0f07178674f4aa540de2400bedee28571aa not found: ID does not exist" containerID="51472c182d7b1e55f291378279ace0f07178674f4aa540de2400bedee28571aa" Jan 22 08:44:51 crc kubenswrapper[4933]: I0122 08:44:51.135352 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51472c182d7b1e55f291378279ace0f07178674f4aa540de2400bedee28571aa"} err="failed to get container status \"51472c182d7b1e55f291378279ace0f07178674f4aa540de2400bedee28571aa\": rpc error: code = NotFound desc = could not find container \"51472c182d7b1e55f291378279ace0f07178674f4aa540de2400bedee28571aa\": container with ID starting with 51472c182d7b1e55f291378279ace0f07178674f4aa540de2400bedee28571aa not found: ID does not exist" Jan 22 08:44:51 crc kubenswrapper[4933]: I0122 08:44:51.135381 4933 scope.go:117] "RemoveContainer" containerID="ecc016669637ff522a13526f469fca4a87e7257d94a929e38b9f4f8d3011d8ae" Jan 22 08:44:51 crc kubenswrapper[4933]: E0122 08:44:51.136190 4933 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ecc016669637ff522a13526f469fca4a87e7257d94a929e38b9f4f8d3011d8ae\": container with ID starting with ecc016669637ff522a13526f469fca4a87e7257d94a929e38b9f4f8d3011d8ae not found: ID does not exist" containerID="ecc016669637ff522a13526f469fca4a87e7257d94a929e38b9f4f8d3011d8ae" Jan 22 08:44:51 crc kubenswrapper[4933]: I0122 08:44:51.136256 4933 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecc016669637ff522a13526f469fca4a87e7257d94a929e38b9f4f8d3011d8ae"} err="failed to get container status \"ecc016669637ff522a13526f469fca4a87e7257d94a929e38b9f4f8d3011d8ae\": rpc error: code = NotFound desc = could not find container \"ecc016669637ff522a13526f469fca4a87e7257d94a929e38b9f4f8d3011d8ae\": container with ID starting with ecc016669637ff522a13526f469fca4a87e7257d94a929e38b9f4f8d3011d8ae not found: ID does not exist" Jan 22 08:44:52 crc kubenswrapper[4933]: I0122 08:44:52.505838 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a795294-6f52-4322-b4b2-0dfe02d08715" path="/var/lib/kubelet/pods/5a795294-6f52-4322-b4b2-0dfe02d08715/volumes" Jan 22 08:45:00 crc kubenswrapper[4933]: I0122 08:45:00.185733 4933 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484525-kqn8c"] Jan 22 08:45:00 crc kubenswrapper[4933]: E0122 08:45:00.186878 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a795294-6f52-4322-b4b2-0dfe02d08715" containerName="extract-content" Jan 22 08:45:00 crc kubenswrapper[4933]: I0122 08:45:00.186896 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a795294-6f52-4322-b4b2-0dfe02d08715" containerName="extract-content" Jan 22 08:45:00 crc kubenswrapper[4933]: E0122 08:45:00.186919 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a795294-6f52-4322-b4b2-0dfe02d08715" containerName="extract-utilities" Jan 22 08:45:00 crc kubenswrapper[4933]: I0122 08:45:00.186930 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a795294-6f52-4322-b4b2-0dfe02d08715" containerName="extract-utilities" Jan 22 08:45:00 crc kubenswrapper[4933]: E0122 08:45:00.186960 4933 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a795294-6f52-4322-b4b2-0dfe02d08715" containerName="registry-server" Jan 22 08:45:00 crc kubenswrapper[4933]: I0122 08:45:00.186970 4933 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a795294-6f52-4322-b4b2-0dfe02d08715" containerName="registry-server" Jan 22 08:45:00 crc kubenswrapper[4933]: I0122 08:45:00.187235 4933 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a795294-6f52-4322-b4b2-0dfe02d08715" containerName="registry-server" Jan 22 08:45:00 crc kubenswrapper[4933]: I0122 08:45:00.188256 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484525-kqn8c" Jan 22 08:45:00 crc kubenswrapper[4933]: I0122 08:45:00.192525 4933 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 08:45:00 crc kubenswrapper[4933]: I0122 08:45:00.192809 4933 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 08:45:00 crc kubenswrapper[4933]: I0122 08:45:00.207168 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484525-kqn8c"] Jan 22 08:45:00 crc kubenswrapper[4933]: I0122 08:45:00.251477 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/57511d22-34f1-40c6-a880-d1b2950a5799-config-volume\") pod \"collect-profiles-29484525-kqn8c\" (UID: \"57511d22-34f1-40c6-a880-d1b2950a5799\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484525-kqn8c" Jan 22 08:45:00 crc kubenswrapper[4933]: I0122 08:45:00.251848 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/57511d22-34f1-40c6-a880-d1b2950a5799-secret-volume\") pod \"collect-profiles-29484525-kqn8c\" (UID: \"57511d22-34f1-40c6-a880-d1b2950a5799\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484525-kqn8c" Jan 22 08:45:00 crc kubenswrapper[4933]: I0122 08:45:00.251917 4933 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xw65\" (UniqueName: \"kubernetes.io/projected/57511d22-34f1-40c6-a880-d1b2950a5799-kube-api-access-2xw65\") pod \"collect-profiles-29484525-kqn8c\" (UID: \"57511d22-34f1-40c6-a880-d1b2950a5799\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484525-kqn8c" Jan 22 08:45:00 crc kubenswrapper[4933]: I0122 08:45:00.354691 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/57511d22-34f1-40c6-a880-d1b2950a5799-secret-volume\") pod \"collect-profiles-29484525-kqn8c\" (UID: \"57511d22-34f1-40c6-a880-d1b2950a5799\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484525-kqn8c" Jan 22 08:45:00 crc kubenswrapper[4933]: I0122 08:45:00.354744 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xw65\" (UniqueName: \"kubernetes.io/projected/57511d22-34f1-40c6-a880-d1b2950a5799-kube-api-access-2xw65\") pod \"collect-profiles-29484525-kqn8c\" (UID: \"57511d22-34f1-40c6-a880-d1b2950a5799\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484525-kqn8c" Jan 22 08:45:00 crc kubenswrapper[4933]: I0122 08:45:00.354903 4933 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/57511d22-34f1-40c6-a880-d1b2950a5799-config-volume\") pod \"collect-profiles-29484525-kqn8c\" (UID: \"57511d22-34f1-40c6-a880-d1b2950a5799\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484525-kqn8c" Jan 22 08:45:00 crc kubenswrapper[4933]: I0122 08:45:00.355935 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/57511d22-34f1-40c6-a880-d1b2950a5799-config-volume\") pod \"collect-profiles-29484525-kqn8c\" (UID: \"57511d22-34f1-40c6-a880-d1b2950a5799\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484525-kqn8c" Jan 22 08:45:00 crc kubenswrapper[4933]: I0122 08:45:00.360538 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/57511d22-34f1-40c6-a880-d1b2950a5799-secret-volume\") pod \"collect-profiles-29484525-kqn8c\" (UID: \"57511d22-34f1-40c6-a880-d1b2950a5799\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484525-kqn8c" Jan 22 08:45:00 crc kubenswrapper[4933]: I0122 08:45:00.375561 4933 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xw65\" (UniqueName: \"kubernetes.io/projected/57511d22-34f1-40c6-a880-d1b2950a5799-kube-api-access-2xw65\") pod \"collect-profiles-29484525-kqn8c\" (UID: \"57511d22-34f1-40c6-a880-d1b2950a5799\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484525-kqn8c" Jan 22 08:45:00 crc kubenswrapper[4933]: I0122 08:45:00.525958 4933 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484525-kqn8c" Jan 22 08:45:01 crc kubenswrapper[4933]: I0122 08:45:01.045211 4933 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484525-kqn8c"] Jan 22 08:45:01 crc kubenswrapper[4933]: I0122 08:45:01.125812 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484525-kqn8c" event={"ID":"57511d22-34f1-40c6-a880-d1b2950a5799","Type":"ContainerStarted","Data":"a9696af8330e4a34d1d3b5c6f7f3f8354250eb8908a4ea9945f4a4bc89dfaa56"} Jan 22 08:45:02 crc kubenswrapper[4933]: I0122 08:45:02.139106 4933 generic.go:334] "Generic (PLEG): container finished" podID="57511d22-34f1-40c6-a880-d1b2950a5799" containerID="96838631ed992562605e16c070398e5aa581b5ccf5a119374a7280340cedd1f4" exitCode=0 Jan 22 08:45:02 crc kubenswrapper[4933]: I0122 08:45:02.139865 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484525-kqn8c" event={"ID":"57511d22-34f1-40c6-a880-d1b2950a5799","Type":"ContainerDied","Data":"96838631ed992562605e16c070398e5aa581b5ccf5a119374a7280340cedd1f4"} Jan 22 08:45:03 crc kubenswrapper[4933]: I0122 08:45:03.567069 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484525-kqn8c" Jan 22 08:45:03 crc kubenswrapper[4933]: I0122 08:45:03.637501 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2xw65\" (UniqueName: \"kubernetes.io/projected/57511d22-34f1-40c6-a880-d1b2950a5799-kube-api-access-2xw65\") pod \"57511d22-34f1-40c6-a880-d1b2950a5799\" (UID: \"57511d22-34f1-40c6-a880-d1b2950a5799\") " Jan 22 08:45:03 crc kubenswrapper[4933]: I0122 08:45:03.637645 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/57511d22-34f1-40c6-a880-d1b2950a5799-secret-volume\") pod \"57511d22-34f1-40c6-a880-d1b2950a5799\" (UID: \"57511d22-34f1-40c6-a880-d1b2950a5799\") " Jan 22 08:45:03 crc kubenswrapper[4933]: I0122 08:45:03.637686 4933 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/57511d22-34f1-40c6-a880-d1b2950a5799-config-volume\") pod \"57511d22-34f1-40c6-a880-d1b2950a5799\" (UID: \"57511d22-34f1-40c6-a880-d1b2950a5799\") " Jan 22 08:45:03 crc kubenswrapper[4933]: I0122 08:45:03.638538 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/57511d22-34f1-40c6-a880-d1b2950a5799-config-volume" (OuterVolumeSpecName: "config-volume") pod "57511d22-34f1-40c6-a880-d1b2950a5799" (UID: "57511d22-34f1-40c6-a880-d1b2950a5799"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 08:45:03 crc kubenswrapper[4933]: I0122 08:45:03.642920 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57511d22-34f1-40c6-a880-d1b2950a5799-kube-api-access-2xw65" (OuterVolumeSpecName: "kube-api-access-2xw65") pod "57511d22-34f1-40c6-a880-d1b2950a5799" (UID: "57511d22-34f1-40c6-a880-d1b2950a5799"). InnerVolumeSpecName "kube-api-access-2xw65". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 08:45:03 crc kubenswrapper[4933]: I0122 08:45:03.649637 4933 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57511d22-34f1-40c6-a880-d1b2950a5799-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "57511d22-34f1-40c6-a880-d1b2950a5799" (UID: "57511d22-34f1-40c6-a880-d1b2950a5799"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 08:45:03 crc kubenswrapper[4933]: I0122 08:45:03.740154 4933 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2xw65\" (UniqueName: \"kubernetes.io/projected/57511d22-34f1-40c6-a880-d1b2950a5799-kube-api-access-2xw65\") on node \"crc\" DevicePath \"\"" Jan 22 08:45:03 crc kubenswrapper[4933]: I0122 08:45:03.740191 4933 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/57511d22-34f1-40c6-a880-d1b2950a5799-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 08:45:03 crc kubenswrapper[4933]: I0122 08:45:03.740201 4933 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/57511d22-34f1-40c6-a880-d1b2950a5799-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 08:45:04 crc kubenswrapper[4933]: I0122 08:45:04.177726 4933 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484525-kqn8c" event={"ID":"57511d22-34f1-40c6-a880-d1b2950a5799","Type":"ContainerDied","Data":"a9696af8330e4a34d1d3b5c6f7f3f8354250eb8908a4ea9945f4a4bc89dfaa56"} Jan 22 08:45:04 crc kubenswrapper[4933]: I0122 08:45:04.178379 4933 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a9696af8330e4a34d1d3b5c6f7f3f8354250eb8908a4ea9945f4a4bc89dfaa56" Jan 22 08:45:04 crc kubenswrapper[4933]: I0122 08:45:04.178906 4933 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484525-kqn8c" Jan 22 08:45:04 crc kubenswrapper[4933]: I0122 08:45:04.663791 4933 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h"] Jan 22 08:45:04 crc kubenswrapper[4933]: I0122 08:45:04.675783 4933 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484480-rvn9h"] Jan 22 08:45:06 crc kubenswrapper[4933]: I0122 08:45:06.505946 4933 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52cc129c-befa-4407-9cda-1b6773ce71d0" path="/var/lib/kubelet/pods/52cc129c-befa-4407-9cda-1b6773ce71d0/volumes" Jan 22 08:45:11 crc kubenswrapper[4933]: I0122 08:45:11.722044 4933 scope.go:117] "RemoveContainer" containerID="481c0cea3203437e4c89157235c523a5a1f197629a5048d28dcbbbb19ae90ba4" Jan 22 08:45:40 crc kubenswrapper[4933]: I0122 08:45:40.942857 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:45:40 crc kubenswrapper[4933]: I0122 08:45:40.943296 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 08:46:10 crc kubenswrapper[4933]: I0122 08:46:10.942781 4933 patch_prober.go:28] interesting pod/machine-config-daemon-zfnsx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 08:46:10 crc kubenswrapper[4933]: I0122 08:46:10.943487 4933 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zfnsx" podUID="70f2db1d-40cb-4864-917b-3b99f69cdafb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515134361767024462 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015134361767017377 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015134334272016511 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015134334272015461 5ustar corecore

7 *Fn>jDFzdStVQI7KdN'_2S cUKh=˔@L QYybgQImPTQCcZ{۸_&n (-) rI:BlٕdE~f%Yɺp,E[p6'`WSX6p2Kݻ##ʰ67xк۫ps7+q>@5{ʌs+2 dI8"_2%SnZ4J*.m;y9-Tm2׉e^LR4(׼*03z{/pw4B_QC7\-}{_T>?C'moɡBmIF*cRuQ40zO$ m&%+.9S' b$&ԸƎmyJ}^QD\g;?>ڿ/? 5xN3Sz3&O29.Dq~Y4^4$e51`s?+]0q}&p7w΀Pmj3Twg)f 9WεL}T5߹}/\%[M4>7MPo4hN$px ~L+nʨQMw5!oS[y}@ /eghxﴋz3~4Zco?]_ G_Y׃ݾ{p 1GFS{f4TFbQÁ撡Ύ0_m>ȃfv)V6v= oGѭYu|Om^6ix8Rm*bMT6f{yZҵtY-y4<("*rOI$yM2ɀIyגqd=8/k]}*zzTv8ZNgzºU4mu =Bug?\4}i9 z4} k 6K'Xb @۩Q'X7ڶ9Wʜq4T{Y)g2pak93Vx-/fA@7Y!֢IƷEYʣld0oA LQ?0v ~yY,zz^==/==Ydv:I h%htgmHO|!BHa9|p1ZԼ@ɀzzVl4 59~ A?0}wTih#mxk(f'FưEZCu"a%dY,fkci#h F7܋إΘYe&i b{et }1n7pq&p"S>>ZX#A juɌd1tv*(o5wu3 M÷dKL/k'x^,<^8 _ =ea8;~{=to~[x IqDZ<çmz;{x ,fb8 -(T>:R 4hbabcJ6_aC{iEUzJʼn]^n*TX b K\H8 !&!9=KwOX"aJՁS>ƥ>HDY&L 28n:X) ԧ:O֬yeŭ{{C;e^}rhMmp0/OJAȄ}=`'τb0d!dVu2/]aM>i5{TTIwcaglUO܏sOl2OIkw5IyHw'4˻NÛ%7'2eִK*! XWǵaMD }9.n`^]٧iE#P ɀ8 Fwr$c_sC "C/`ETQ+1eVwX\z,3&0!mVLVqCLST v$T{Ϛin]o=w7szP<\,( F{u/{w= 3;-zugne{qS8sLzyjᒗi,-\Z ʤ%/OӱeM')\ߛF`3t]M/zg wwCֲ{t$}1wJA swI!]甴,ON}9J (zPVR5y&s)(]{eY4kObjfx_T4Hk/=:];cW&vN!5f%U6 @ $Py}Jg)1l8sOn2 ay,bOBy~bi? {:6pVM6 QplΖKN b#~/"1_3F]*Rk æɆ"V:ֽi m`eA☇Dl4yirTP9S&?$Aoli m\1I31g[Β͝ޤ۪hՋy叼;''s"n_9'ho06"/:Pl75Lܰ].#M0j/ٺ^ 26whЛS/4IZ'4ʲ?INhoxG!gUza+wٗR^Vi M׍_ ֨`.c<|2+<6π:,0-;# syaXևhMNVN=T'Vo(,Qkұ9訊ϊr>+.u-JiTsʰQwo]ew/~HIyC5~6[|:rSKizC%äK#h5|ʭkr m1]RrfWnl]}+o-lVZl:R( \TFzxY9Fm%lگ]^Zquzf̍jTD[mIFzZsgWDǸhm eXHj2k/򐓃*#;uW9ԅLbJ!DZBN19qV${r^=}ښV0t0o7vl{ ޢ).BZ{k#,M=+nү*xz ةF8$j*(T6^>u1ې{ Rve c*)A=NBe_ i0_EpWQ] p m,5xSRPr;HSIZؐ}ժEQru*U}>TFARf4ڟW+"jKR2a|mP?f_mLd}#4FVi"j'+nlQpjx2 6]xEENlۯ^K7 0{=YL>8wHOo,ڦ1/Y+\bE ފIali"V.㍭T[&sl6;cB}|{}1>cE/ 5`|aT(HP-(1!3Aiid+Ž`ݽvpE!~/YIIfp<#l3gJ˓OW[5gx u/}%9iļF95V݅|"$/W  qf,7*pkүQR]QZr|.5)#M6ec6šع["bڝ˾k̜Q!#:erRq`u{:@0AX?vʇYaW .u S m`p9$"r;)h; Ny0iQ<ώәszAi ;e[AM6МTQNy5$xk۳8l%Hw% 8VQ`8έn芗 ølﶨs5Gd[uH Q[Ecs%#뽖?YgB{9Ƥ>;[n|"b;H}Qr&0ia/PŘrABА*X+ [! \7# ewH:Zd`?b//-٭ʭ8 +F- ad;Nos$Ԍ4ɻ\3_G0Pv'vnY {8}qy N̗!$ǩ5QC|x!a^` O $(& bXƑA4 E,PR"Hab, O9_ 5EJ*Lz;÷Sv{fq8н[69Λ s@ Si}u~h~ӫMzoߋtt3#tp<_^' ?6'Cϟ_]ŋ7߼zyo^} fXٓ|oO^W^yߧLΤ^}ze>Nnm2YyZ-&]'w׳ {3K"7~+~Z3 ES^LSsls3 1pfz]#&pTʄq|27Hj<,CrŸ9 c?sF2'ي|ka? ^2QMjlT$cm]|tI?$0h+~v|)К,)7&1x<LYyҼuU# /+X /)${{x\ U?<˳4 ȳG0NGwL;MsZZ | ޫj|;}zJ~j/`͛}')' ]o`QLG>&=0gWf2Nf&|PF0L}<rAi ɰ ;E06_iw_5b.k~E ,l]fY ppi/FmcIOồeLp]ckM1z.R\ 2qaJ+5o4."_*s47 "eT\=OWe{,VB``R"jN*@h8h d}hGifݍ#dw9 Z^QJM΅!a|}'T jmBӑ7۸"մQHR(Ap.f~rF1 IWY+q9kYi',>^4r]ڨ4luc'Ml<SFs"JsEL,b(E/02Z_La\.B R4ږQwqI~yl߀8 ^Z߾3Bx3 j| ~$eSG~"yKGq6("a,AF=:zaP>>/"{'|<󼔺n:أu1gpcJ+p\1Ws=-Z3.;I2% "!, b9O{ѻ\#iKyed1E:AcͱC#Qym<>9)@+dq1zpX8 DF0ġ!D ib&&:obH0b 9{SH=\t>M9o~߿N: a{^e&w`^=\eTQYa> *+ 7ߚ'Sg 0Z$ fOt%)3]ڣtt8wߎEJo?RE}~A]*+mH`wPrW0h{eΓ!-.K| o$II;u\h!3⋌ȈqWpo M>Jtτ~a*ǪEVM&,ל 0A= D;:rcq42sς#RkDd1p:S'Ap-V2H v~r7[OP$ILY PŐ[ &2tN:B1@iKR/ Qɞ˭o;>uk)i)QzTTՓ1oKLiyu`.:ǩE^3!|ku,w,o4[%Ru.TO.$oilc9^p9\mqӳef{.-r>%"RJԝ׭t_mXsѵVOJ77Il@ =w8x3s-j}ӼMg{RIMaN?cxf\e{w{r kπ^M7GWQׅގ{}}I:]&J.Ro s ._tjU2YO<[|~cev?ud4h=|ANrib=^I3\8Wxelx}_)aI&*OTZA >*kTy"p&?0qe\Qۉ>r1~~̕*&!x2h%&Cwc M:c0jJ9 +ϒqD_¦99vFɸUT(.O6:f"pXn {f 8v:\[NN[NF>L^Upo0\ W u10\&,X΃:@B󃠚i%4$R|||w=g WD!r x}sc>cU *+@* +4 iq^znd.pbTxG_ByG(>!* |NnIjy41g{E*Bh=FĀM{=`4V:h!JA\Av6m2)jPz#bI$A8$H&/9pM^% zI>ڒMUBͫt%kn%8H]6lUΫWڰOq;T7O.%} ɢJB+p˱Z6]0.F)G<2j`чVJN¤ ,EDX ̷bJ&w N='WO\"kYaɲ>O֨e j ]K\|hTY+48!D5P-S]4O<^.>ԡ4NU!wμCo~?=5== pU&P߿~#\rw߾~~Ąd"O1>-?mrUZ(ѣo|By~|ϖSYJ&CPǿR*%)+">>+4~oI\R&|ñ/Gar~qc(B%)]+^Xdz펢ǗFq,?}5%RJfCf`d (:rŲ8y,w.,w)]>7@Ԡ{S;!i\A&gR”<, |>aP986 3Nwތ'rkM5zc j_iRJ4xyR 0ʸJL'B 8 in (}R`XqAvK圓$5[DBqn>M1pG k= ,VSRc[#BsG{U>}>9RA7z_P[l3*hw o4U-| be4)"x_xI룤d4: ^ IV: "amI S2Slnޖ]%0MPqu@VeRPXX!R*ouRPҚ9&Ͻ*IerV/}8PB(%kMjқ -V܀ܦJP5Xdr83jiW\֙5PJ&X JD"숉u.p_=3 klM{rUUeX7EL&&׻0꧰]OL UYb#$C%xh]l|tC*}hgͰ8roU;؁z#Χ𘫎Ӎ3}O7l m澪t(gc{nӼ钎}> m$!Yg׫C({5Ou~`zB|wwVC]g3/&_pFt=7ollC1^ (wr% q23Ȣ ('N&O(fjOi! T= $]tk?5 ݢF&5Z,߯ǟCHڰȱ}Eyϗ&\cƍ!S| *VFDx[pA]T%UQ6R$ XiQo+҄t䃐^hP b!BPbA~)#Tͪ#W Jqc5ā~n梙ʹ 7}q\Uq\U=Qo.ᒑ t bBcpU%\D3v2hB(uoP Ǖ钋"phXWsq՘Tv6S)hvUH$;UWva$ī5PZ8dxKAۛ[aG:$덡O?'"1mȃ>>z`}!rk:XU-9칞dv|8?W*+zx]ɬϽbA&jxqӃv6@T0Cu|l!{ 9$:i:n}UmRTÂ[u@{|`վfIö^>-܈^yh}V5{e'5%_._6=pU&-?$Gd?60y~S6Ec|z>?)s<~xk{3%.~I F܏׷ݝqRN:ALl*)/L'@Er(Dh`! 7$Ы㯵`QȌ bT\  KW (! ]v4aE!Z _x)cPϛPI^+Kzu͋٭1~tZO`zֆ҂pɍ-;oW^ZYΗwJ_[Yݷe?'kE6]?ܝP[@f͵o)wɪf\ h0` 7eд EgmE \a6,d2Pժ_CW ̲rl3屏p4;S,c>(!@Ob| aff"L ]I GS>PхܤHl&ބߊJ=  8ˁB}Ok/nzue_gI8JO!{ⴏA]==AuDK߳ɳ*ٵsV8BC?ok?Y rBT>!!$UeB:PaI 16MnI[{%Syo2ϕiD*P;RʉeRZ }5  \PW !bCӕK&rA7-)76$nq"m]_eyCml1z"QfP&_7;Dvmni_q ,Iϧ?3P ݟd˸|/~ey?y}4w+Aϔ41ϬPZ!pC2ق]5=%o|[FI|\6"p,PIX>Z,9Y| ~F%[?`Ʃ:(B3"%~*tr \B'eIY^ KiZHsi}AAj5T.PBrmJ>`%֜;XQ)oRڑB(4%)c T,Dr#`1Lr1h 愱kuFik!gS!,*/J9S\Z"9!  e6(u(:)UjZw r0"ЮL2ER+m(E9P46XG`hΠٵ.tZ)'\ rW!琳leuѳ =Udfs`YMArŒ*pi W8HE .Xti4:`']A%Ή4N=[G{0{i @aJϮt~UӪ?kyagTu[8s|9\n%5e}{vwцr~o]z\'Żw2*eYʹ֛ ; "x  r .B8 F3 'J}P\ɶ\+jL9k̽H?)m^Cgl:[.lV=ܟ-?ߞ=~ v塰E\e%gmdWNTv#6Ⱦ^gLr0b8"Qrq5M ?1|GF Y{);0nV35#L\3EWVe'^< T#0*ɼ{|9\n -gLJ27RMK2yMxIQ_ǹ_O26a I;yLh2 CD&  -*;o {sqart8%9a %doCPi[>c[-|gkn*Qg_DR:f)?^uJ4P=naZ3F5cY #dh6:ַ) TM:P09Uggr/\A`/fȋ 4.s;X{ 1 ,Peg@qJ9+-k^`a+Z [9Z7 >nGPNd*S*ߡ\֕OsF2#\h%a*^zc W8L xQ1(BBRNݺ FT]õNx]ǩdV7R$s@uI3Ԩ6D0 huShx)!Po$757 WDj 7@K`3dbƝ8F8eG׷9'v˖+(sȄ:jDzȜcKi[Hp S{Մ83'!~Ђy4AMRVn<2h CoVǂ} k9h24WgA%NCTj= A{zhѰX襼Q'' T"Jqy`Qa 1Fb$1.1hY(_Э)6#αICr94ZOw;FX<4R$7`:;b%acobp+cXqIh7t]: lç ;Wz1al(+woIl=5`HPv\9.4@g6;=kQċ53MY2:y#`~cXL|~}u{LX )(hECԀ4&~ }hzv| PCJf]4lznGa| (~ NACKb'iÊGq]P ]Li*+awQ[;mՕo?̱߭^YO6ZQC-:|GZ#[Gs1(/P=PduBeE|Lk)yR5QíwՀ-v`/j֢Rז(&})mQR`Fu(lv>sD{cPeǟ [2Iz^B|.6v+jQX}ed}J3)fw7;.^\]yķSabq_\erj#aT*Z!)ոYyC~FQRtm5L狨 Rez1`oruWf+wn:$.jcA>UPj^dfwSOU$aol&33QPAaMk[9g5[g<~&XF;ΝsxGn_9]n] '33;s8_~OC;M)%`b;v!c#ImxSV]s0PHA*5S2 LCNlbqj .Ղ1MQl3-[z͔Hqo[GSkTLTY-Z, oJ2<Iz1D̰a)@I``$T8vUP0+X `D1ɝ9fL2KGnGpQYmƄ8 GgrS~4+c'nǾlv/S{f㫋?>UߣwT=c'GO7M}›o|/]9gbǘ/9dtt8D;q^?Ϭo2?sf)[fϴvfN5̷r6u2vЯsηl{!d~pșhO1Ђ\ b1a1Hikn nUpșhO!^K7,s@AĎQFεiVt/&(ݪ3g펁W  -։t;4Ut^htCΜE) MEbwe{~'m;Dem?8یjGA+ΨC]FHٚr@~l#1ǐ"%FRZ,edF')ǀ+ˆN@0cx~M!Z f#bȔD91'Rȴ" d2L*1Bc&1Bkw#B˱M!@9T@QT<-}xP2wt{)PeD_rUf F)wNuȠR1C; X<3G@Q;OH+uHX}`4%"O_;xWF 6537d =U{I {C-.VO˅z`f"çtvDkV !yQG4l IfknbaݹCĄ1Y΃4bfqKڏ3uv9JorݾWPQe'YӄH g(`akr!oޜU0sd4iBAtV8hhĖ%±/*;~(-0D1( #3K8 Z֚Ax]Vc@qZ]QfJ͏7^JTX#M3-SI$4jbd?u LTb.Ww?A=(PpJRTHUO^`ƞ9{#jcS h+$xZSuߡϺby֣8:+v_x몐A{ybR/ XOv )eMխ]@R>}?8T0JpdXY2׈ n9sB!ji)Jj`jcwayDyAv HV"!D*_ H UFr\-(ہ!ڎEM/n7{?B{,$;\x) ?p/@#(.PVַ(Ikl#}У&K^2e #ږr[֤2n;LY)Me!vZ3 gW<[%H g ZyRv9pq3-ӛY- #4ĩR[ŝ*AK9(y{1PniQn3w 4|52Du؞}cƻ]&,|Yak TW[ *oXEbYǹ>@C11Z 9v5go)V0٪pNu|V)y,t{ E$י[@FfaBQ*Pqtyuᾅ_Ww1oF ~q#+QGU?`q 3ȖOI_5eKD٤H1V l"nѶךۃgCC澖 " C.qhcXBťȿ{ 7_'ӒڡZݒ 5$I. YG:e=ywJd:rRǭSKSG4AKtu>K@YoO"2|I2 4Ւ2v(d*A@}FJD!@ 0 i!_BHy; +HA,%kU$R4@fJy})Lg^k4Y;?3rtD;emTX.#E}9gIʽLԤS@AԲxt{LY[E[ATJS12HyF`&!"8U&Bo KK&R9X%r9"R,Br4)K`IY;oΦH*Z!V^FYN4>jZbCIo4kKY};{)/ զ<kJujo[l|SC~5̐V"f~u m]!&9T Aq]q?c,r.k ފZ,7 !Dj(jD rZQ.5-"(f\"H`Pf]άf&"M^H0$c>[)Ɂ䰅ҞlEW%sJKX-s \щt#I(QgA0 NJfdE0OrZ["'fZȱvw#lss  Yb?÷W_6~|u|Z4׏ |yƆ| >o?fQLKVh.HQX)R)ḻ St3a4Zy/A>o~>[:/pR)pE";$~&M- 99j2˳JDڻ:5}G?;ڐM0+ (O,/*___ANߟlͬZt27Hs~*G:ѭ 'BIӒi~.JMh ^}\% % 壟GW_ Ü>kΆ!r7VYu՛`?7 3ɿm}aqu?$>C?OZSIPQ5V1e]8je 1˘ӹpy ޤ^rgRMȃM53r+>ZB)Qz /_&̶z B؞%y[ЮcaDtHe~|D<,v~崛v&8@"*1ɒVs?Vp;Z0Ɲ֗*֜FADI>$&3P@ɳ:42zZJ!S)7INڿK֬ yhuf&?!~R-xƷ*4GcOg㻞'HsGpq5SsHKQntO//) ǙsO%H˘Dtz4C<(n ϕMwR \ A|+XW:J[{JUX;Q7D.&k#5Kb6c5JQbxjI̯`էjCĎ#۾qb:Α;Tpinu?6ș=׫ {I(/:r^C k;\ ww~#yJ)b&z y V(e2 H肧QqLn34CƔx4ArkSrp-6K"WdGXo\~[ zRg砍N<<ȢQrPG'^DKI2}Z< AA )Qiq)72poI* rmk ' /uj3&aFJf;6@a³]&O2Q 8eA>%vA-W:X|t4+۰}yQeyK:eo;Wq9XAs\\ 9 =%>ZC;30V͡., &9v.Q閣67BϨͭK6h8ZSlyG-K!aEfq^<}q[?%G w!B`ۣp`gp(Zq԰dep u/r _|u3}Dۤ 6L_i3hA%dl@C7#SYOh T }>P^q>Pir3H=)SU\9YA)2l [t^ZcsBo Uo[/bG7TwDʳ-V*a1S++o^dnj|ILwJ)JPGM~+m-:({V !(-4SϜɸLAtN9#:g)(FXfRmbo]nҬZe7jaS(Εl9 HyF .uZKF`J0نFY>˭@/2)HyZ@Tқ,c#X.4A. >͘8oﰬ:P0-,ѴDteSU"#EdǪ XkO;٪QC\˛U7*ٴBF}kmoG JEALj/]@I˼`^J((LS_XqS$c-GЋ}I5YEmGOE2H?3[D "n58O&U=EmQ m~v_Ę SG|ǏuG/16jn$cMUAg,(/&<iv:;|ؒ<d|_ F;ߓ]X c0)#Uʅӑrm%SN~Gn<ڭ-uD3h#ZiBR羾[#ڭ y"JJF }wP܅'='_ @vo9`uGZaA0 =:!qB1ձ -vǞXViUv f%yMFRrexU|G38hoM h!dJ#T @xy,߿ܱg-KXN^'km烑e9I̎Fkdw}ѴbZ=oJIklyRV?ݵGmƸ ;ozu>#XtzώA6#t|F0 X1hO,D"TdŒOeP˴yiJ!~:w+,*V. #-yr~|Sy4Iʇ?8E$ɊTZP@B/,y$w9fյi}\ԊRbJG9E>RLP-J^ΓO_& ٦tn݅# dg0[1lX !G$rB?zp[&6Etr=ͮ}Z׏nv [~1ϞA%[l hyO@_ m_-7+5/UjM*cMd/! -cKf %\™dZw|4['j0~FKo9 lH*cK~30sQslI]̾O}@X_q4,bp^;Vrlkj\qPd ΐt#+u:Ǚ4Ki)% nsԥ b7lF S7;qVc=+&B, (:glzwjcmf?tI1rI֟l~F2{awXlo7 _ .Ճ{u6bhDj`(2ALlk(k:FhH$!WVR&Lj'jˉypgtlRND FZ6Rr8[+5 WLpC+:V;.*H|Wq{ǻVz*fg\ QO4ɩ {Mu{97Q$. ,ԛLv@/Y%/ca7b`FJYVq>Ǐs 'Z*I=pz<t]§T2%PTHM:b>sqyO?ԝMjq)r0(_Ye+EB%.<"L8aa#[V9u2-R&ÈVr(HU 92ʄ3W PZAr^xIBS}| r$*Ee,yP(\q:)GiYKS}Sx)E:C3=9V>X9UkB),BE'a婲2͘1\DVx)&6i[\[~_;> e6x&ēl|Il=~?6ƞfmq^rUkg*G˨*ݬvj^ ̑ppXʗm+@R塆j)BsDH-=I}G*7]2J(QXE)"P-21caV:w &Xc"ԟ2&Y+jcw YOPǠȳ dE)eLĄ&@0 N(ԉ54 oı8, Dt$-'/:'Ha̋ +[5AZK `RlQ:0 *a*!wȨ坸jp|_ۈ\[*)MY Os*Qe29jN_v$ܴSa*g!\ aPP be 1 1L ar:\*eȎEPqdƹh4sE'TspHtc̼b! †oY_bZEJwaFC~hѯpZLA(YHlS6_ \J5S C^+[e44"}d@6iO(4dEn [lCmȠYې߷ `2(Û G\4XQ BN}FcO1XceHw.d*}.ҏ k7(+-=G%D#BL.ecEqhӣNsF(w|FBӣ} +Qx$F 2Esjh4́FkCk;hZoRpXܳ[CO-f!nj?AVtΌ\o/Xŕbe Կ[)&8&5oV ;u2p(B5!I=5e@dA*9CCTޡrcsgh_WKٛ-(zVz8{x`n{;{֑d@)E$ۻGܖh`Zn~uQ~<w?sP&G^b觢Я ~r}/ @S8Y!Ry:RC[ǼC IרJ}k/|yf쭛1;79GJOe @IDtp6sa&~oؑ0Vms`vǥ3-PBbE_q9Du~h4K^NV%@+ ֑F pD(f  {aK% }_7RB,P"/1 WwQ?TBLԏ]U ?YAٰU?IAx91ECn˵yŽc'g>f$˄C\xb'/~7noz,)HE(u!`z/}x@e_}W/b'7ٞq έ:ܝpV2fDErK^,  8VH_B % A^:yFH0F؉@DhY%.DvTυ,Č!0:0(30I^- 'I(Pb V\hf#N13 y0H"S0c WHl0@(VjXþZ!wȪ qBiRUT(gTbys仉ռNH)>?.jKz}5aJ74bjKP[Ck 3 }QC( @(`83mPRX-B:r΃7L|xg`ZD@a 1$c+"e XȘHJ"!4WҽOY4TInjtU$ Gl)m4%3LL2 7KE!1P1: V[m8'GCJR/2q.tJ*Q <7J\{7m`. {"j5G@ /HD+Fb?7@ 1Д$on2zb/1HvO[_rOvd6b+_Sb85!KυN K126nߪc$$H5lvbQ:;qht Ƅ'R FC!M0JKf O+pĆ('1GA &&2H+ز(W8vHe+*b&La(^; 6 T:0Co:}n2}w+%c2923?_^{ZoЛ۩-^쫏G8t."@,0Js__?j;r4, tQw 5xLy^{z7#F׭hqk\c՟h Wq?#4x8;\.!N[{qp *m|x0]h)Qi4~_ ze&{5ja/` v˛߮[7|vޯs~%WStQNU b bU5 Cɝ2ɗ*!QD":r&}W>Rl!ع_\`vYIoI0.1~A@Ÿ#{A\x3X_z4G`H8|LwƜ}ϟ!F[W}Ϟ!`&T\wΖ?XdVlyC17aMJ67C$',c6Qs[oϫbo?Ι~x^=;isIxڛh{:#n }oV :k!zqodH}V#5eD_[G89uQ!p Α#upBP/7S<ˑq1 TaA1J ]3ΐ_֭rpU.T苪eE05N3)EF=ؼ/ ᲛB]< t9 !(5eI09T.B0K勝f7坡@& | Xy`|k%܀A$N:a?r>X +mFwp,0H:/w i pvKKn2[eؒ,ERZ 7"Zl6 gt`4,a)񬸏Q&hM\ZIV, 7}zxɬ h;GEb1{dEa8m"my2=|qs 釄#'W|/7O{='g²Hx/A 3PIt=ז7]VWɑIwrӇdij맘w˧ʋߔh7y_t5j6j-ӢrݰE;M Ƿ3J]brkkqT.C3ǫu~M_+PЎ] b VPwZ|zx]͇UyxbE.(tWWJ*uJG'g ҁJ) Lq t`!*iaOwa龛[KnX5#ZwKt=@)| `H& &=Ϗ . 8y`ĎʢBO/Ȕ Z^V(M'4tYNw,}W(oɜEsOr!最|gm4{Yw{Yw]ogW 2S"udpC&3) !(XkڝT쏷'ed߁JE3NI[ _ncZ\AM055籁 w} t %gE@o3:-+`8SRҋ`yO'R"d%V2̤~)% T  C1$zY&RtYCRXy%րpdmP>(\J~DR@R6 fp:k\aM&a;xgI.+(|$ȘF+4]Τi!(yT¬TKnUSH7лoyiJ䩴@gA;B1Wƛw !} -41@0/}- ‹LCj87sYmQ>6f A[Áz J*R­0pxA;CeWKo^S 9M8O Ծ9 3`t.Pbguk 7N`a?_2j]f(w_R9=̪Byef>baU>u"z6}ꞌҧ:qTIçWfsi;q 7n=RLwŭwRILLZ˨'gC$Bur똰WKONϵ%& J 3[~JGpB$QA7ӻD{fC"ux TOjoH ]Xh rXUL6bc/g{xFB ;殩# E@21f `,ɃQI3ϣu11!2T'whi%8tqkzz͡u| oW"1T1GU4^DgH*g$:4)cuHZ@19茦Iat(dYtuKΧ"i|:sR u`7&-礱Sn >r8Jdz) OM 8c60^gO%v^US"b%]R4R ɷߙ#kt#*Z`zt.dI,ŵ^lɟw/iR>in֪""=> T?wSQ]K>OaqG(4rrC}+>;Nߙ뫋+_^'LxӖ31\}ԜLep{J2.+:0m[}T;-yS-"{9}owfɠ]/I(E͍`AI1fl+R+[#Č|9&@|{JgP-{#lfgl:Ft })movB!Ej+dp#'|ٺE>yvoM}Dx*W'(Ipcz6)#G9X`ms>| ?,wf$X?{+Еr,t&  ouY_Ϣ-4OH?~ZrU9K#ǵšQ= #|`8۟wx|{Q>Ɇx ^E\0@;((܁p), ]Ĕm~#`/KpNtj:폱I%J*g<4s 0^\MZ*n׆4%QUvIfA#JCԁk1AM!m:x_Œ.Y2$0H2!(١K,EiNBp9 c `!~V$HYoCiKZ"14"i.A@zDMVJBmZ;r߁zC;{Q[5QlW=Yw-[ԅ| kZt|q i|+_&;M}@%oPPT;7N1MA_Ve:!N9SưRR՚"DInŤ0`Rʆ,Y&:/-y!">J+ yjdVܓA &+ DmNTXjhN$[o葽kޞlp8s d4wqvt8OF+ BNenHyUbHmoEJ5= ~bc*Z6|WdxsUf5;[S$n5c<+ECK ^, A(*lP#DIGz@99HG5!6YVلY36)j}cBkAUq`a\x`r:9*聪2irt_,sH r2 -- pvRZM:֌̉19 Շ%S?$I&tAUS~& Q`x%)@+6J׈DqkA`|0zC~^ET4qH/l MN(BtppY3 eiETDțx3}U24ۙKƽ1bZ메/KJ. faZ/ fis=,O|xC4zYD !'t`P8yʩ1zL[@h] 3H8 \"C/]ٸD,qCܬJܬځs%JfN$4ld.lE 4̂Zg@$L %}ո[iA|Mץl)m-0BDCVYbVD٬%f,1k%)e(ÄƱPH g^qǔ{bho%U-0K`|Uit~ޖ|]Cyz }Ǖ={0UP29ٻm$WS_ȇ 3w8` 2Im8Im{"ۖmɦLIs &3ӔD>O*g l 9P'oeL~38U[ s)]+غʐ{ 7eEmg{QMX, A0V)C lu789 hsO<ߏM MNUn*RJvPOT60=V٣mͻ趔2Hz\ 9 )؂DM$>N-mИhx$`bqg]`x%Y5 V< aGbȰ Z{g쒌M(S.T%8ž*UI R%R&AHӒP W;V3S k@B%wyPj8PPw^8)ɃTAG5֯|,l=UpC'./p^ߓ^~~Qk0_Nw ۇ/.`Et/ᄎqܭy^H X!v̍?xR_"lZc\?>C.^qKJd,9hܤ" y ]4i 4&W)&B&nK5}y=T6 [ /),T7e}HLʑȴ_7;it% 훐sO;#KAS2䫔yMo2E}#lo08{(+ wy|w]U^@gJW/M 0Mkm~뗳on?^U?jΪ7`Yn2FC׮<}ElNvG&#/\)mQNpMblȋGv54>M&ile`4Sx8i;էՁ,h-)uRtt06/Spv݁ +_{8N;vN;)a° ?#J^bHbAً ?%Bͅ|ׅ =aXAX>8D -i˕G6*~:B]Mǡ?ИW6QCru;$]J)05u^7:iW, ϗޏ($\*csNF7>XTTS1*DOSrinAGceU XB9/EXJ1ErPgMt=2AF=ABSqH7)a\#LID7K$ð +Y(04D;ELj`vAZa&mY$LZ0}X7) aX~JR&U۲nr "L r>Q]S p~R$ݔO6M=qtϑZ[VYn~dGegZ[u55D9Ԯ485e:6nUpYʨ#1Xn*dA {}cI29:s=~/`e eH^riz$ W/ڪ9xc0R:w/fGW~qdAMv xt?ad*]3'HBpDCp-J ȣF`ig%FZ`y 5؜պZhR:߾?]q!=[ Z|N5$'+ 7eu8Ϭ  bv8 1fB``&g-]$]ݻv'9y-խ3\ |x27-/D@".P8[ myu[VI107(aϯ?.]"@S!*5E'kwZQ=gk0/R>6Ǐcc!1 q!CbqJ2rkAu6o`cMb3챜t%S"ld#,WEbҡBZ"nJ ZB8*͡71yE,yDĊq]$F+;iIǹ1YG'odtpվQoN_IP:ԣCl _^A/ñj{P?Wh_ՉT(<~4Wfg:nCx`6 .:LxXCB{_^3WCYKYóonցo>#a`?|W[!w%UmC"ph".k<tzU? :_c, `- ^ 'hƋT.\Iw7u\ ,J5ih~@K_{˴)_YOjZ{`HȬ߁B* t Nr鬪d6wY2$?K9xP~-`0H8BL\:_"Miçş[6V*\7OWN,3x̅yiכ > WHby0e=$`(\:U2x5'ApJtة'`U+!|4଒B?U&.>|+C)L'S`x&i S#(7a;c11ipbmcwI \0Js}y;A, ݞ$IҕbĦ ^V?5#=\ 2, cQbܧ%~Ixf ue_o@X O?)///o澠4l%Z"4! )\PqN-(6rQ{ ~[)ۊ Pj=3D bo&TG'}t`T*&5v5V.NdCF3F>;XH@ ttl):d[4c M7pgXw䠣9RC?jPLt>&p F^ !#Qbd"0rDuv}|#+<4V4:XiK8IjvՅi-Q23*)bcm۟5E7^NRQ;HksS'xM) kJSZjك~6`/0AAeq،i=&O;jioRuP1xwdtzt21oI. z -nu*B"D﮵QQUyhqG~bBȱdzgo}:D>.8Se ?+"Y?J1Rmˮ#Č)Ӿ`D@ P}-GS| $51$ьǟ>wrv7& k ç8FdH:+AJ \Q,)&{zt21.Np[Õ/`ZK^PCP=N/y@K&4Vе f.d.0ɘ{NytZ\SFQ5^ ^wvî h*k,my{\9f5f~k]?ϛC66I=$8)`ۗB2f6H!Q TȕciĚg@]49LC$?bSƓ8gm` LAS`9kְlcz:_!3Kzl` bēB_-RBR8&)&Pc8<M:2BJ8X(܄W]6^M,8јﱑ}EFjyq[H{Lu8 ">+D(1RYς0:o*ASEӹ7|UsWO YgŅ26̉-ϸ豮Y;pPn=IKWTB2Rrb_09m,= Yp8Pʽ9U!%/ب:~>кg,qx,8:cL[B3a)A`ڬBEZ^wU:'? d3_?.|_rR" IJKm1EkaRiD2(S y vK9т+˘xOӡ˵{ft&w>& !L+ F/kacZUk̫Ŵ?{`&Xa̳vX3 ˁXAI_$Cx7J!6?fweEIQ^=T""Rvh.k+/bE Hԉ"Z˺ވ*$iN١tQjd5ESp7 jOD@D^YMVNԕ)ڕ2OZ@;(JPV\=N: qЕ[1V!)xܕ7pzR:P:I]YysMUg\jUul{AQ_ߺl'ENG j~C1Mc&x¥x!^.n+F=FhcDx9iz O/ @lg%‰ן).NWDPE8T`<`qa4Y155m2-9 . =hD\ܠ40Z8sxj6+cNDݚ~9 }<i&kz.;oH+)Pd 9ּpjwk:D!R`C==ƓSg {䖍eOK۫fr3؛oRMn> I 2Qi,;M֜˄@Lz9#zISh9%i%^{O2X[XKһw*42+Z6m9K1/a!'_{XiT`\ki5(Lg)Ty0rx#w`>8o|N-I7g&4f0|i4[-`")}0?ށ 3=[Kx L)i -vvVYӕ]a4^WzӤ2W5^fe%[Jϔ`}%uy;03Â6]dZr}u:'wIIE"4 vi~sraQBDL&W_?`X- $҇S͸& -̝%:Z|,}H&VJq̱DT2`gEӚkPz'Sp`aHM|/-S@݇_4Ɵ%4 #0nltoc/#4I3އd6ж`KףL?/¶l*df>ܢJ(Rc4>gKXfr%j +B1Q SDSQEIQ"cKNs{+##k,9Tph p7% xܷ^p$K򣝆"oٺ[e=bh='?>? GW }c<<^DD꺙xFo2-p|=34-ewK~z}pl#d_1aZ bPRq<%N%,pLk[\ͱ'ԑRtV6H~\ןetlN__ȗ>x]X qd<ʹO{:Tƚl8*Q+=.p"[,y4e5jfj4- 1z\ŵF8[^”HvTu9-ڦ3";Ev)@E\Ubڤ%qzVsqa4Njk/QpAa۾k:VgqSȀPb6kC`&0WƔCN:x *^0DSą>#8%#$Fa7 &tx(hB)d֦bq22lQP`eؠRG1 ,5NyoUBH04 RF0WUɗ\#K U>=^jA#-m]ut.!0i/n8>Kj$D6ւRR:bHU}տͰV)"F+V,SDv輥k9OyPki-F a[flyKطyVP8n U0NDYI.I&S00 1j%crx D@=Eœ-0 OL58\"KpsfH-|Z^#r[X%Noawû3o46ݠ,_;$Ҽ9c1y5b, 2AhYw;XWof]\"veț63tSZܰmXGlQ2oCi}\jem6kY=U_S2 i!47{+-FA#llŰcRKp{_SqRęfݢpUEzylI)ȯ-;]llӐ0DP>[Ç㯦p){]}o`lw5$Ͼovi3ö́S׀.q^~O;ocĔl?w-t.غl^:b1Z4m9 b6 3}o[G• G]KfcyePq#Zxy)^};(XJD+XsfΨ)\lPNEGn1 4**aR0eX&'4.\\-Ƹpa˰Q9# 93(iDm#4C"Q0+VzǤF!- e&8bPE)Faj HЄ(7RYe",RDÿ5zumVڭoYC <IZry^=*2~_+2\Iv!TvΊٓȾ:jOrVu,$>+`4'Z)n! 0 `ǔTNq#SXΨ {, @ܲ ̩`r D<N1Up:%jPNBS*UTbTЛt5.Z.Q]4ky] :Oiʸ@۴I(RXI_jk䚫H3%ޘ% $;!4\K۪v'9,1Yw$ϼ5*ĭ&"j5JZX0J-RGuG W C*9+8a$ a6Hd.r4m y Yp Ws_ -њ4xǫ ,ͶnRw8ҫft(r8z՛l{};#R8ּ  ׳~X4+:ũ2Ubp[wo߹E7勒0ι˜iѣM"?24i;X0/k*KH$iRm@9& j-i)ICB%l}-S3^u6d=inwinpQ;5c~%%qϸhrrkD 1Sݢo|+bF%A VL@ eZTd`?\ӉֻV)ָf@+dYӭ30JH{?ZsO?WZ^&!>/d`Σs0Y $'&I~"1!x x݁ƛ䀺զ{(@m%@v# &)@+ܣjf#K,Y~{W *t;?$TTu'GTu㖄ӁPeJ^jbRSo;Rӈ(pGF1ANhc[%-as׎~:<d7x' IʫmRmu0Tqku3SVv6*S zoP-nӞ?'g &q#Z/k zp{Zarx_ s^u7@eiS9c)NO&L6G 1R̈́^h*6Yzcf:f8GnC㇤{K\wwza %ܟӿNDup?H'%s9ZeX&$!/\DdJ;n抁踎QFughJdR!!/\DdɇM抁踎QFu ښvsLnuH Q'T?}0Pb`#:cn]`ӴvsnuH Q'Ԯg43ZW4?; D?_J STL]wX 'sJ8~Oc%H 'PyOx~2~z~RPJ-ZGg?E 9=?~Ou%L_g'ZW>'Ժ8dbQNOpj] 8'縣PJR)ZC{bfHu٤Dd\\zf8X׋ zEu.~w 4( ezF, =~nhޛy.9^?+- .Y5;sVkd輽^?Qw>h}-ϳVˤ@sa4h1uM(]{ve[/:1wj Ar:%S^ES\t׳/gDӎM{9-!;VMJ!:I ˆGCi6xi[@ZOU:Fe{I(ׄW Z֦-c47i67F;{K8S ȁR9imT2dAr,>çw?aqF8Rh1|*m1! Q0*p4`L&YXqBZEb!>K_44~/~?V紃-,[8 YӺs٢~/W7N<[jQ<-:N˷ZA_0F! Ǻb$sRh*g ،4|5G!_gUM<kiXBUa=oKVPÞ' էgFi0Ԍ>9&._#'fX_ ̔ γ+@[ %0<@X* JTKҒ,9FVJNgkهD3&} bT>=aੀg^zjJ"siA˾RhN -űBBhH|E* i0DӸRa-Am֖ID7]M#䙬]w\7M^6:[]/xĿ }aVUf]YN^צ 9)Qb@X*pvHLBHb!)3ߏP- B+A^aj-$GXaOI`MT+"$ P,e|a_a逖T2i]FϏuOl.#/sY.]kqLB$=e Fyq4"ȇXo\޳zJӺ ׫/Jz&IEVP>Ճ o WӉf>/}NO|}&I.՝fX4 %YhNkHiUɐ aO/ vR&?|HWVdTRBɈpcLdI-FZbjˡXF$͋Mu]f3NӴAoyU@39Jb+}/5 kڙ\%S$,ȓA0h?fgF1;׉*f(9XuaTۯ;nM(,~c2o}{el8.׷o\ⱞ3CT)"5e>2AI({Jc OxX6u"1yc]^ݷXqp07dyWJ8#1cLL,} Hruj A*{[)/;`ǖ24!%I\#Ox4v+9 05_qߓo<.Dzm^O5\A9Qn[Z7GiNduۆyoidҎ`>)'Un*:FLxߛE)GbN#rÝ>:mwuV cFvuOG9bY/AUS5tQ{Xn!2ô:}<[e J,?ML#vtQ2i}nS\i"|Q$0Q RJD!N * C)X Nk3FA4 Bԯ?I?+Us?$RʂG@H-(k7:{wKb",1c,0-!NF"ǂ"±(( !)0BꇢGIM_#RrՒ-4̗ܝ/\9ar :}bH@#IPH$OԒ́yR+о( .{9aY P'PZJ<SC U ү:kQjj (i8J`T'ZJTr)}B)ќ'D <,9+*i3'2>=sZ(q 8~+ 0f(9zzK`sbQ;%(@iEHb!!&J{I /< b)(Mo?i8H947z6parXq3,A-%%֜Lς﷫g s76:5K׷^F|s|y`Jf>8[wGFY?OZ/[OzGO(w:7\٤~gAz6h,@zw͍H.@u)3Jm2Inܖ $6Xrfr[߯A6%Qhek7x, h>O]KRc*2zJnǺF5QZKFJhCp l%KD7j2EÈ:36*ݵ-.OAfOkS)1[EɫIgkO.xԏ(+*yJ.;?Ͼsc!,c2ݥWx7ثѼ>Q R0rLd7!oܟm@O.U?p[igTs=LEs9u'ESPg) BfmKagW-c}.#;գ6tv ]u#w V)aeo*g47a1էiXpuB8YHf l0hCW*/D."M1>((O9 w?l"7SV _0`qo ?PGq{b I9U.vt?n&ڰV!hZcw?d4]Pb)UJ Q_iöJ n;^N+]6rr1֧Tyɸ7΁Nm-Q c)_xLp.0j @BB``4 \hAY|FlΉ28|ܰEu(B5pP͂PD -r%wyp.]`-ry-l&hI0CMd4ZgCJɔƩ6F/ZH bl3@qjT3 Z1aj}H" 7\!yԘ&)374X[(b.Y(CXqm$h$)^Yǀ(=x{XnL!5qkS ]-֢mK"k"1~Vd0z0Z!`D!l&+t>0A޶Ye^MwI٨0s@D4h2|hQHOj/`dA{s~t8i#yC;{jYrFzz|hv+@`~ox S47[ L)H}9~^0ә3@/zmy<%dhG{ެ-@zMnGN`N'](#J7f;"|bD4Ї;]3Y碓BOay#'\zS5KN9ruf܁t*NAlK+̬TFcJGc*AA_v/T ʜqPEf+N9?dGa TOiyCU|zPjϳ+@hrTגZE"c ~tp},-}]z/ªSцM!OgItt6,0v5uXkpzd#n (@ѵWKԃVm!bG;!k1QT, e\mNLhR{,7f0Vm$lt}37*-nM`L\LW|OXЌ-DЄf[XQO%%z$ :9~ y 40PޜS*x.zět߈)C}C4!Cm=7$*Kh睁'?c4ҳNCDP+nw妳k;=?3!T}[,F'Y{VΩr[k_ūy\$1p<C|Жyfbg6w!$ӊb^4^tԋZ@1ԝIDH+IoXԡĞQt&\s7,Wm׸]'c_-ьSLvs(q7$^C͘h`Q3(q{]X،k{0"]2,;i3dLS|YAH K=TԑG:$`f:$ ml_q՛ia/`~vѣKS d##Ef_% 3l"& B->¼~3ȵ<;ӏ?l(0SգWm .v5A6f!jЃ)OLPơɯgl+^TuH}-țBSһ$S{ʥ2 "R]Qs>8kD==HŻXN7_A5NkNzd3݉佤HNO-=;ҭ؎/Z `4+ Ÿ 98It%)7" S@>B 3'f4SRRX{Pr~vj?NJ_-z ;/.7K+ɨd̻i ƳI~6o$ÿ?OS.bX?Z?eZޛQrU4RzA#"J\Pa-8$s0Tߓ}CmMGzѨmGG?~6nѰr#_yh~+~/G׺6CxMfh!.Ml Q7k] >#c$l@caE8p7\čՍձu1gsl`F)9/U>p\!rpdP*.'m;OJCf'5Ó_EW.rQWd(E(qí}6éWԀ ESbPhG[E!m;jE涝&ǽE*tW A9(o^(FE˙S',7%mԅNYߢ>#*^u_e],iҞ{'sD< 뵥A]zXhH!GişurT.sG&P7O|+#IQxiA*1y~WTA(,(!LAy*h!5"-#`)dݍ3ϺAJl@&U {#{mݖV;lveÔ\E mgVj& WV{WK'8 Ј*I@\s|Th00-P0<'Ti(cx3t=SV#5ͦ 61|z.brQҰ#4qTZA}i\B[ㄡ)1H6XӉ%|2}x{wYN?/w2!iq`˕)#w!7ul顉sV%0^|#㳧ge_&2[|0ULK_ }|OWW(EP3"}O;Wv6җJO|F H=tR2)kڌ;qZDбq+ 38:m cC8ĂD0̓Qh  ^N` z2Zg+ԖoFgHv ?+6GȌ!f*rIdA )LX(! #DYHhS>̓υeX[-44/|xЧ}~[(BYz[yXܨ&piqQlhqiǚFUt%c-$PUl̐Q6f8m~|̰)Zu6gs4)c]SYR!BlLFrц֫\ $C*%۽Ahؒ# S#F'I1XCY@iL BymW*G=L|݌ĚfԱV ѹ1ZrXT@k@G0M\) c=eaH` WSw9|lO^Ii֗E3N~LR@I$:Zkn>eIvݻ֠.^SN@@?^~؂sݨcM!pHR6ύ kPk=8@5 ~ xc%&: @u`>5 Znʶƺ3JѥZPH1PZ\(g$7< <ͭ`Z,ڰ#J#m$JUqCX=W؋{P4ԅD^vXn+i*V(BAQ,3%N؂jE!PNJΠ@wmj1X2R2vQ(%%&Fj"b4Zb:1% 4X+$aC Kg$1: t=:DhtEo]T H4AT<[z1jn *b0!b(F,ތbg&k)fhi@8F1k`ȲS,z+m8s5|NTm[8ΌIAQ"Nм)߸7BMD$ja.iݘo5*fnpL3ź^SF d]Z^*ֈg Pg!qΚ,kBZJC/ yQ :x"F 0Ӯh".O*ʔ DM-)ӮZZdl]h|5K9¯n<a$_L-7Z7`\1mXO`QOn5vcK7A_65!,1.}~zm5Ľ)6Kr8W}?ukM{yZ7H}0o<]\0❈,ԧ {ӯ!|J+ѼGl>]/l~c!xJӔm6*=rj7%!Qi*= k"=T!c#N$vi/7@rbo r8YV݇/w=cOs[_,V!.OL4.N{t]mbG?OOԠrѢ'=>F\޷F_mU{WC)ik(^t Qhuʱig,^nq%K%=j,bX(Øܓh⥤@CЦUY5]o52](=o V;2nebg*n+{L /WIwHۓErH&n`&|7~$n~Ŗl%Vq0أVY,T I=PL{:Aa3}Ƞ<[OybރLAx(\< D RW9Wr6i04zNETIqXJGPlZ[M_$Lʏq;ؒQ(2 %yQc %$蓌`-8#ЃK&@XH$9" hЁV|J ; ~9 ;ˍ8ދCqV*Oˇi{ܻ3K5=-s+vtt])~ŅZ.Njvv@)m 0fw]G3g]A<|m{B.Vag._@ft@kN=4K+t_:6-)>;ivlMe\۬ 4LjpPi (aw0B9Ua.Osmh¼'+ZjI#ՂxcYSEji]g{:IBeټ-huCyǿt*'!Cʱ(ױ/jN9'ӹZ+Vl5uML{R,6?g~9)GN>jC\^ޝdqnn^߮nn]|^nZJNUjxo~`k42Pb31\;.j5VR,P6\PnZiB)*W3!1~ju6Bޡ~67(Oޘgy5,'$I6_ߦ\%6tQfaK’gsio@ar\_azy1r=)l9zUa ?joOql[4Zb(C \xY(ް"6,K()B*!hwӨ}ώ}B<{8B3-to:r~~,@@xJP wUT F2੷dV7,5~*&˪9~wvYz;_m(cxa&I*^ nSB "SZ Efz=9c[f!]z;Xf[gapiU9[}qs]# Gh!̀c]E~{GnO[Լ`ή#aF )10>J L3шfr'xv<1)kehbLZ-)>ⲅ&9*55!hp`P@ JMOg }Y*6&Pnpq\ AFF[-}Ж%)Dؙ;mpㄍQ?=^ я88|0:]K2oNuX,Wm5tޝt^S΅]۸߹ޠ --<|gS p99Yoҿ/~=^p 㸱QN%!oiԲ)7=1,1%ohJSRC0u0db?u x]eS;41BDO.jdH9kF@xF(N8KJkcxmSwmS஍>tq:-OA$^Wv %ȗQ/9qV!X/qG %: H-24iIw :0oW"@'k U.Q`G+;'~=\jKNX#BIBL,OEkoK ZђVjRF>;0qte\\ QU:IsqJWMqE #cl.JG5 =FNS?gURBS1-n¯-#^E:O!77sbѯuǑ6ݳT ^|òwR(2eBSC[21S^p\?si;)Bɩ1֌NV d}H"B$گpD<;6kđ!聋.J1^HhhW #H &: u쳟bL)n,tYKgTqȅܨA&9/=WǼgV+eS 0AxDeqC׆:8tmA@H Oep谅\oKz ZRK(IvW@RJ1VcDZ+fVrr{q˒i('+eH۵8LKTlWpO=Es{)ՅFJ*vc :FLݳ=1Y򆦈kFuo` eQz)q"ȝJ0-.)w:H}ÎiUe:@S>V^ h3"ZaiHn$<1ޠ`QI4ow0a[N %ҶM%V 3vWpRP 2CŜrX{tVa˸ZMM+\0yk '$Bj|LK 93ʆr>Ab0*m!yҲ$d`Sˍ>v#8yˑOK.Zc?kY|d6oXGg3PB{\=Zkz6]xW~e CtIX5(D@]<+?cdDkI$ &lG_9@Q7s*GYdڸ;-yG T\m 7v$Xd\ nE%YSZA4*)Q -bZG͔&-X8UZQ@H3B@qnPeJځk}e= y̪ߝ ryR .ߝMNNb"+R/h4I)wY2ԟe_+)Ij͈cNhp&=S4ZÂiLҔyp2y3ŰڵNЮ<|5:,j 4M@ 9;׈8 - AwQ {B?v2ūA\z{)I<źAˁT1JJ)9)uv]wPcSIgtYpSd74[t$3E\y@E j-$BYIh^@wmmysm;i63Xþ"5юc;<9[ݒf/jBĶWb,ؑRd#e;X 欮ʤფo VK8js0jFJs^klgIU$tD c'0L H^GopS84@cKj׍\ZK5ϲ ) Hy,2x Xaw}&~;5 Tar03쾏NAɹR2%81-P)* {ާBV 7^Jol^i4DZb/>^jRnOg9ξ8w;jgyn䫣*~LzGp{WD k yv=Kr +~iSdB gqEOτgN0x&XwlV+Ɍ<| B5 ;Z%tF~G&ScƨujRc?h@_Rc6jڭ\]Prz<_bh-g@tա=գ6렶Ti4]_x- 5Ii{ҏ9qu(S-bU=NFͭnjn{k@Q/Tzx".sxX|J:bK9͌ի*TXgX-R>}Wn no@#Րo6Ư$kE\ Zꦴ+y}u`p:8P5KϔD-cfWNjq{!ɔS9: D/|tE*z "רkAAO_bm7!_}W+L_^tm_ū߈Etu- R6zqR5wF|U1_)w e9 1Jwne&Ȃ(39esm6ёw5^r 0Y*_TAcJĵ(rp.JM2֢vPݯn,*)Q}Zx5[.Pܠ_/>oO1<\"eij:w?֣RS;:KFB2ZpZv{5;4Q^4%'#E+X*SX56eNPZmrۉ3FS)%-h8A.hC2Ѩ#dPHMEzmL9GS((*B %cq( ǻ:*yC-c0jUW3k$Fok%ZD" jrI^)흣YAF T7Xi_-h0>l7uV*#)-{ lE%&7j/ ., Cl[=ŠIP).7lpM` SZppʩ!#5^JTD[F& L\^c zzy/LiYv4҆т3 N"t9/(t)!Il7Y cj6K!y4 c{ V;tPIjr}T͇>^_C_-mK| j˳D #aŘ@|dE8-%-ڐ#]DDMfԏ֤^Gw]s2)/Honۻen >%ě8K%}0E=;Wؠh)peoеD= s('1*jU_[7ig t+" '!ѤE 6ZcV9 ůAhVWޣu,w)vh9^vV!wk~7ߝJ9}qǓwNam!A/_!Ł6!yG [BMu}?4FFy~n-/_9f5OYM %=n!naHs"؈8\.k>۫]}*_zP Վ囨Gz8rlrӃ+p7Ө)WA]>h&JYjt) 4=!AD)'zf*D ¦1pNiלřVnHݝ|i|QnH/yG_5oAK3F]W@ k)byZi `P + tO8QOAeGpv:!&7wFuDv02sC)9l[/BUwLgJ1f{ ڀn%4ƀ0jo].5L{f-Nݧ]I]*adOVXi%I މ=jtdjynjw[O.z(EwR5yy DI:$|D",Z rMU5vik&M!k&P˨ӠQ_|Y `ѽ펨/̀ݕPnhVu4+x7E @)Nl`̷ u ɇHٴ妕_A&'c%\Y}Q^L*8(6Ʒ5j{9[#YZ23(#bS,7acCׯ42  .C*,@S@8Ɔ:v=HM-9VKd˜947eG41LOHYʟ$p3Q)DW/cլat0Bދri\ud΁53za?P_^KtYm!]Z22!e. MX4:87BRbFcrʁe=aYf(3\j+WZl_Iыw9NS.^ve^]352o{ɤUlK$ d*Uql,.uE a'Wˈ%)//@ze'l0Qrsk^`"(.Zc[2U2]y染+RB9{sUWwʪsjMkJiǝϻZ\spLBM szC%JhjBP€:LpJE&&uڲ!pu^ r#}m2S Aȍ50ILr[dgCBO8AXE+ps"s})1&9 BS #J@OWԨ8M8Wd; OG<hS(BV뿳qc-Rԣ6,%KVl*RgwL ?ne,1Z#;h5|;f5' ٧b*UDf㞗2vԊ{6 #?2?R#o!&xdG&/n$4c>ȧI.Jj=8b6/8nK"`Ңp>XE~v>x :&bO3F E,$?@%NK(@p pÓxȬ̕a5J'd-SslF@T #ZrxTh=eeA[Σhɥ92x4.>[|w;Eewjj3[3 lͿ6.6uV$2Ϝ$DaR q>cFx!,&Ccx @Fe`Ztkρ z`e^|a3q˼H׬[Wڤ|bi7uQsK!5)Uۗq Qn̶@_ . VD .rn5Yq*qh*KpN)i"Tˀк} QG0>ɠzǏU71U}8fTkrua?V4{a  *K" N+8BRd 1U/t >Oon4RYcflw{5K"֥ॱ%1cןpYCRZnD" .F(7-XwZW LwJSY&5 Tݑ^MaeOil Ż7D+}X=~ \QK'Ad F_޼﷧P Dp'g}hK++ ZRj Qa0z޵5+鿢nYڸ_\$sjjwOR9/30ly$9T6HY$J&Nf_7`_SU: ]rbP9g[<)%Cfb_]ЕB$ӈ% ;hu xP^=s Y4|iNr8N&Eʰ0 sFX&횥ZN\?XO\Ҫs˙)˸,"^nsO7w;V|_bܸ\ŭr}apk=޹ܒtl`"dq}L*24ѼkYdYս3}\R&G9 15gnm]sEB+Od16,va9_Tӽ܍&.$!rc{3 zq{q;hzqY  QN2^I~"Ԇ&C^Y*/}󫤢SɅ:V<^|DxF|BxѠ}PYJ,5b`)d!-)ae),4H Uª6i!H胿|KVZ!~ fsY+R)lU[߮J*͍^#h3|͋MLz 4;: cؠ>#TrX_596&=2UK!kbp$>2Z[ lXdGO >ؔ~O+'?}m-}8j$`g-\)q'v V# ϒ u>V`/cWP'LҐϐ;(caETcY#1vs߁}P|?-^dA{JxܠN$S&"hj]!J(֗*V*NPgV!0=`3)sطTc3s#n~E.)Q]O)2X,[†4AWIoZS+* qspIzEwsȑ|EYa.wԊb܃C@WRY{[plA:~V-;nŤۀCb QWrZ8gI"*|N@~1Q}Q2=jP4m}Ư|!V ^| 5umkۗhh",j4+zJh@\Ed9dˍ{l1K~}>瑽EJUnqn~c[xfzվ5w!Jmw]k }bYLb"bTS]rFЂ FWggӫތ~2Bm;lG!R˸.tH1NwZ*Zzq=͠Rkoi9\dbH]dk;ָmߚ`0bzi7D5{d:w5hQs rPmfg zArrQmbઁH-p4g{0)lヒJvXMF[]p)$zЬiT7ֵ^%pAp=򮛞Y vkZc ˡ7G6ve7utpCQ_S0;bGý†s,贎ȜN,LN5'Tk/˴Dd:就iro5@KQ^ p;SrT^PD46 )GP[.M%FIiQMu̲9WG |`wESMJrxVǷzxۀf" ;jRjʤ%Eʰ0 LLbZ (A\A5SXѱ z8QBګQR]H5xEo7)ҨtFF9fG˵!rDρ%z AQK?)/vܥG\!»kú$WNC:Uŷѹ: kD JS~>t8e3k򀙱'PNHgW'H pwpJ^Uz}Vg6T`OJL{/bF8ca3AXl{AN1ru"gh^yƞakZj2@$nr(߇oáp'[YӔ&4aRD;o6ˤKU8Qp7ჟ=E[)|0J9 iֳˋ ky "\|`l|C|=o[C察jnn0q~:{;4pOcpq+ḭ8'03|nb E(%};](;+ Ö]Uț+&!xNzj[m5FؕdZ Q@/h0J9@[n./КM>7[;*L9%Ӿ.]䗃G"{~zXJ$Zp(I)_\9CWZw\(jȑֈ_9*O GP:+c_6x= H6IaΧe&MdkҲ묩$@u-@0CmT3[%LO -~֦'y j c9$)Xt 4e^O==#0@ xRɡE$u`F@('2kf(\gn(sȄJVFhMjGDdHq-CvHM:x/ GTl&M:V4+п~#̃m W36wF|q>X1IlL绿IGuA_b'I4% Oi2'3nB=7;.2EYKa=s}w_Nρ[KNt9W6g?LCg?g_EO~=V8I:' ~z)r[B2 rHˀ$3k3+tۭ/vt:f]`s02nS\hirYVQ'Fւzqa1(Sf8@ _>#AI}LNy.BP-⌀LzɨyE0)M3yZR[Spnnr(e7o fij47O"Q ͼP15P^fp"QK(Ym42EճYdEMd=EoJh^fzF'#;8} R,z:$?AyE q=.E{(UG~w_PiYJ!^ЅB>/q{׀ ^4\2[G]lâi]{& d><*v00Q^S*A&`#!+2wtcŠX`5R@v碲۠!=@=픞 :ң:تbdgcŠRKE2;-]!,X3E@P5<'3NjB{vG2c;i{p%,M8h@( !5%gFB.dqaw7579:$%,NĔ ^Y5*9J-2?EI v>K&C qEIt4! [F,B[6Yiv1&`) PY2ĉ 8;m"K%v޴tM9X)K}AV:7u%;;74:d`Ղ2 ~Ӆ.E2㈀?N#oc3B ]I1JYHJեx '$ 7[Ow3z He ɠg[a笫4JS}:P *~BRzkPfԇi F7bO1c0!S Gb+9lOJD7[nPbV&x)ᨡV8D(UN?^MmGWN?fv0v"g{c ͒– >|AaAap7#ufSQzN*]v\}QވZ_ӟ:#?m]x/lpK,Zg.Q2U2!z%Gn9h#to-zu[EpRD(?s]rր:teL.}*Ȟ5uNӍ\:wQȒ@a7<4j w9m ZFqAu3o~b-w1dȵ ݪ/ UF0T ˖7r˛ z/j OLLU3wws?-xmj@z:]koG+,ClgÊ_=3=GERdz%[-S55էc9ĘM߮okJPdsd?5\\ ܴ#66:|w=|NO'[COz.]kG<6W*mqڎ; .{ţkGB K#0o[.ST+Hۀ="A#dR?]<핗jB[0tsMqC F 8ceɓm'׋o33FZ/`%za .\&cT0^T1*EPψXËMS h{0/$u+@pq?#V:֌ S*;ւc=K-gVDO(x.@E[WPR-U֣|ūG  uWkSRew5ҬRar(»b1C_YkZGFLA %g ƈ-Y:IqЧ/Ұ0bs)PtUe7Weo]bU]G(SoATzUv-m)ǥ]7U2PrHIJ\ڻ Kd[mFR' VGNE4Y ,B gC^txtt*9N1ѴCCfG!2ܱ]g0NE[s6#ũMbTPH X+,Q9Ks ?JȁwjSV]#B^osWE$St2eRl:Q\fR)//ZݭXYX92X rEa%LrcSDĤ9(A:Kռ VBCա`Yn"bҲL%Y.Im$*$Sf`,e*OSo]!z cU/ԇw}cOB8ȁP߼}ۍD%DaJ֟\og$;Z[ b#x9Q mg宱i6y㕅rys=cKL fQe z*VFQ"+ᑺT|@g_mQLx,OJ i_۶@k"6fੀK=,ay,oxvUd6wrex7 /JN̺eæ$4FRxǒnsqr2nԸpVl8ʅ o-eW~裤:EfXe1!G"-ysm 5XqJU -}Tuddԋ;uV"O(} #fJ|Z ޶VSgU0%5J">—yj0\'xry{$[o'\(|X<1WɌƊ>G޽}bxDne߀qpsocs "O6A\/~]T(QFu'w)#3 B(0-9M~NaXČb%W=߇t oM GS9Rz@kr:pJ<X"r ujrlRlIrBb44^h(K0:&MҌ'ިV?' x/0*-r@(1O c2E%A҂sV/߁>jdRAB`o{I i?;_Q+N\ñ䦜R>sEP4cbw=vxkA)&-{B#!0= J]7:?Y.׷Ný^=OVW0wT{eq:{J&%ZNx^6u ~Aϓ{Ҏſ8 !zXE Hc5,[$W<]#N+xç Ƴ䜠R}wH DDqe=icWv-(^}$} 8t#FKlu40Bb$b;Mumі&qi u )*3D%u"^czaH<̆lt'SF5dƎsL/YßCnťaƥ&REc "*fKQcĖaneΕCs{x)vkx4z#\k*k\vׄ幷Kh\SG=pK4TX.F[04!$S,ax\*lSi-gjhK+nڪ*5Wu}Zg?beY0aO\өS4@j[t4 îK;E ;DC1,%Ut#=`1hQo9EP $J&$LrB`)IY04*Ċ 'X%i\=ͤ%qh1^+X%H-}^[9L*N QH3rʉ4Ol5La&,%#`$s16RJ&G*Bi6{5Z"PˍUG)"iJ;i o3&\ﱶ[Ҵ$FѿmK-5f,Ow^? mϪ;Al./0[ _~}A4%<?EcޏvC16 ]gv=rƵitj5[VROrhvȘ"ix= >QT22A絻礏OAOUped'_0[j]K^NqOsvSZTzES7ɰrZ" ELe*6nJgܣv+K"SvuytZlM/'QPU!!_)[鋥CM4VG7ZA׷nU!Be"nlQX7-ƥxS/`Auf} ZW!S9eoƲ!P2{;5c RWQ9U93Dwi{l> 8od&׎Wrm~URﯢ<(YmHi%mi=}6`&p%3FHŐU#.DQ@څ*1`K&H8ldI2 _ ܹ֙BĿ>O6;p.&ޏ*Đ9;]7Ѣ#l\.P趴ɟtg?cqIAb(ıwK}ɰl>dfl(|<媸Q(č9 CMVHWDBj,啠\|dRʝYA}Ekt#z5?lmw ;ضt.{pwھS@rxc (љH0NT\%7c%X6؏I̸whB).\Dkn ShNY?T=4ߥBrZ73 |0_yKNSGM)+(gzq9@F*j+iƯI]sx^J}|9HPfH5!"*q*~ ~:7>MGdx~t[d8>ǨMlwlW3ɾWx7z]^Y\t\nZے&M4`jӂopAsuZ{#Zo|A f)i#R5+4aDWd5&ը.k.wWlTqxq}Qը/Q)DdsZr\[_B!|UݢWYн-"mk@Ha~ z7{7_,s=`RZ|X XFneZ0wh[1u{P!Ek3Q,>E鿾)&diVA$ǥ?U%hbc15Eu!٘>!du>HjTQ2F?F~c.u=ZL JV"}%ȏݹ7({(LI"2t9[tluku%K6-*[/sya,Dǖpr%X(q'#Ü9 =sȬ+OoA4[<6e)ûMUa1Q2 dUn Mr$D%corE!D6ǃTmy=Udqi;;N]2B%լFzuc5+)HqX3]X,i<)V;d$u((E|-9n|u yipeTsxǙ*8#;VG*Vs1XUv4Lr|7$Q ]Wu'%\ˇ J(4(3\:[tnaͬ"S~7|I)1W8*rx,T=1xȱ0!C"M"PŜc%e 3+2Jm!֞z^!hYH,:az=Q6LmxMT,m#Ҳ us DbD%">#.25D&2&# eh/1弶])RLؔ Ī0;&hp-5=x X?,pbV7"p}Tp(2J?`ݰw=6Vl}* Rq\tq"Qk -sx$jƶwQk mT~unf;&;;:w$L;9{?KƥnT\B36QBchu"Aˋ2׌eJs`祔_Z2]w]P]b,TN]r YŁQA4cP%EEL3%ZYK#ӽg`n (w'O2=:ǫ㷇 eeqwIX๙ {m(>i9x.0wڭۣRa'S1_" $5`+o~|'?| j@'moAxkyf.ggΆ`rmם| :n6>*ޠ60btφnԝS|J;M%0_GϿ:f90.x&xsgGOf7Mfl~/0G9Η8(O_=zLL_a~c qj3;z#cϯGqdPO|'D'OV,db\4Ϗ~~Gσljy21]O_ ^yˋG?~t]~n5xR9)7F³ѪDCH ~>- ~=踩u룽U0sCPivex\›ٖ;3: %;{}WـUàqw<왏&S/Sg#?>7}5v~ohj!meMᵕZI|-gWPs>:\JoG{::޿w ϻ5?y_4l]^P{޹;pi釮o\r޸ivgك?⧗^^N?rNΜ@JЧL/97yw z=N~;z{ >>/{ƑBnc v`"86Ԓ~3$5K)w19t]U{ͫz+ЪӋ*>|igoNO^]͓}]5 %?L- >kP~&Wg͢LsICxz^] Û~?ۯ9 dTx}bٵyyuqr>cj<<*t2>g/mi !d}>?/ټ /"? GW"6};"y蟨 㓦-_buv&+BŬ]{8I}i8o'oǓb&v_'0T45mZf%$|}:~rx@g5Zl97to7Hum'ͩ NrNAd0sXo 7;'s :;qImIpŻ}g's0׮!=;s?e>a #گS+0I?|U;4+A',_ϟbf ˟^Rt?׏^WMMd3ΓuI*Ahke'b3%B404(F%Q ]v}خa>lׇ]v}خa>lׇ]v}خO؎[5=u>5uD  ˠFv7,ܡX\]nsl:}7ցllo蹡;g)B:&%ud~ߜ}o>۳|b{>?@ 1<>zFH5RrUU B L.,^UI\UY> +!kK*% S>Ϲs}>cu^+ڢYy}}wzO̼_}ogEso`gY[or#_!>229Cſ\S}.~?c[oe3|VQ2<ʇY=c#/[3V$p<IG¤s1L:&iX RQkM$ARXPԌbBvcElv_$Tii 9R 9k`Q=q(~pZDys fN)bXy~q :YY7whm'0c-ݮn;6%.s"B/N-ɍ=>޲/j4R:2q/'w!QAk 7PFJ_-KF m9fC8S*k(8FX9],Q1Z.4X%C"V ,JzҜ*w*%"s(Ԍ.JgTJF^͑fnTj:]&+(yJjbT|4nh.6iy>O.aw͊b#{zkV{<^%S,T@ZX9RTE6feK&"D*woN[9ONB!~k'Y?s.B|uHq:}h{-͈r5yTeRAQ!UJGMf{N[ 6^ K=`&1@R -zXZÜ`h8h"D+|ID/#4+ZPН>*Y/,(z=BPo*r' ʥN".GҿQmH] Jl5"3y. _]F??ϽJ}_g~O?F/^ᨹe'O͟0 IE[ЋtguZb޽k.`-XltpF`B=l )<> YmTjHT»T)R:j)T΂ )AqNvpO:D=/x|Ks<Kgp#Z,kJw߻<]rcJQ)W>x_ʘ[eOBIy,;xoQ;|~]@Nɠ&2G}mz|ZJHT] \ReQ&H:Ho (ƕGN\` h+|43˔TVSXToy>MYSiGMdMEW3;90vq<|9BY[Q%wi dJbXF-W|fn}fm d@HQA x X}8s1Q3&|P;i<AѪb9^ Z-)Bz+)%5@QjQ ƍy@1hI(8E ݊N82%iu˽%JxnZPip8Eelˡ\HTPWL% |PxRb|j0]~{ &Zg ܾSD1( HПX3v۫0Ad*xGrK0j,Ndpj#TµoO!~3e!gEgS h <pdQԉB,&Z I*H>¡ECݶܫmɖ.qEnά1@ 6!|FF(ъ:,dH&7~B%ˋ*)9&Xu8l׬@PU;i{Q糁F3-SrFhA*vmSJtM$ (p'd" @✕AE|21zk[<8˥)H9ZZə{6bͤ<V#u˒Cǁ$F>v;2*0 C$!@mNog)CHH\0Dk*DMp@ۭ(`NmlX qC1ǘr_nwO޵u#" Y$H.৬ yu`*k5Hc}=3g<}σFi#XdU}:w)%Hrm &7s)a]$0c?RW1d[K^,)BA X2!$MIjM>DY9O_HR!֑آ<:UǒRA)F4"yU kz;V0j(Z.I(P:H,fQu.`BW #/;YU- #Z}6(ޕ41Tg]u?$lϰdqLwZ mDGهɦct}cHbtn fD%҆=4+|$*yV] V1re ].qPv;ss`p K5|dCQ`N>E K`d\Ijy'yXӀ?u,p3_~S^E`e.^_0rgD+>d{˓:Y(^yUUDU*t!PejJņ}_/{}0{Lk{`ʴLS|W`<)-0RuK}}}/Ʊ {u$Ȟo'd8^c-ν^b2y (y9df-vY"KȒ) ,YĀ ̀ڴ`#kH֯)uؒCǖ|+.%Kl[rVk/G!%lvAdQ[R'Lӈ[L^p&꾕䈵lb륓s;hf,NWUwYV#xꗿ?%GEHoH M42x0L ו>R#ƹAC ޽\T}?OCok'~/jQ_7!9c;qnh@g^UbAUhkl6bKCkԔtY=l*E+2bQy | r5JE C0gTCURTu)ŐC)!riL&#dt$m}t]/݉f,6%w!&. B .@K2y8H"ZqJ߂]8cL%˧oŁ0Kk6r[hBԿC,rUFm0_xӪOBozoo]a_մy_N lt(fp%!qEXI˷х? >7o y=qX ֵF.Ny;Nu;bE!{L |U{9MC};Nr7  97ؾICCq=s4z^M6LC' Rݍ.hoSJ>i>zʰL #f[DLi!7drC/)a]SBP` ٝyq9ʜݿ(s&GQo'%gdhuyj`2˛ ]o8m><ߍ$Vr3[^Yӧy5@> E JFPR4X@s)aސeE&9&RPQ%%" _\ Q27C.֒)@/ ^a! Ah`L|0m;\8 Il_a<d1K̀Z˘ 5>/;1)E U?{C SyAz3 VPqO;&j[zu2%Yb}ִȹv^H|%+՛  Oِ! 6n:mqp+Up]-=nl0yDZIbUm6̑uY` 5BֺLF]<V;rNzh98U Y A I* >J,VRQm l3P&X &rKr*<Ksk^M;('KW7֠v ["Kɵ$MטJF3K<1a3755όjE/Gțhj!-5{VJ~iTZs;}]<# #"Aե<2vd&?"}c-}x/)r _ ~,:hQ I`e<ed X9}wO6NdSzBk7C2xG~Y^PW?m{1_9]f'pԊsc9Fm91JvT!'\3 ITxpg}j zlL! JY+2BPhyL}C0:ZmHiσEFpIxJAWdIF/b v#*=$v֑i]=5Ej^i p)mҔ?^[:`Ѣ54+2prۧ٦qޑMzxjnȬJUb㇤$[ T1J$!mfR`2gVAZ}&"e1BWLP(X:9ThmNaA ^jF zM)m̰'aHr|>bJ&aoQJcֶs*JFbi ʙܫ@;U1)͐6r#"nN0_.[,.Hr|زcəWd--A,HbȪ8/ϷV ?}qabI!%H0?¾7˶/=tH٘T%u#Pbo U7ep[,0^\ ^BZI|Bm0?! ޔK!NJt,U^$},ȥJnnzKaGo UP(U5 fTIY\!df'),o?.Ƌ̋7|7wD(np^c*EƮv{wE_*Ux/J0#Ofv6L7rDifϒqFJa麷Z)`9JѺ?t|@OK%b~tHQh]/Y/ K@SPvʽ&5nx Z1^x> MP/Pk䪏I3q0ãmk]_J>EIwPh0Y  qtc|%%JZ)jk (A=_drUL4zSj퓎>SuIWBԒ)T`1(,R^K}T 9>X8:Wܻi@JQ&҈6ylˑ[ ҨPo58tAUr} l:iPVv ޟ} `_dE?D9 -5 PxI ![h{A}{neuj- 8GQPmDIG|EVo|B4N})`m);TqbJ!hheFYhPHUEBH`t>ۖ]Ͽv {vJ1}ZV.H8ZL`DYq] GrB MRS|O8l P0dYeaD؏*M;+7n4jx!Xԯ&NeA Gjvh#K_ET(h0ˡ|b&k,5"Y*J:$g╁|UTW% #1D|H%J-HJ]Yԝ[֬Mu-RGRCP%+[[{"0 6oB9!Ba<Z:F&=4 w[6NȿD^k4c1+)6C[6<S_/bK.w*B~Jm ݏ3;Cṃ9|`cj&;5_MU=1~si>8si4o .%Q@Z9V~');IINҼRPhJ n`ڭ`7NhZt`>K.M7˳ʞU}^C/< A xsIkW~]b|zʛ+x߳wo35zG/QFK)T6Cnx534J[X#a"$EZrG|1r83lt*;:b"yuCEA!QzZ֞ %=O}C|,}璛;3Ɏydͪ&k@㗗7d:Aí4iO^R޵gm pw:-I"vW$cwLK7o-Z $mrdӲ)ӷh˷kjG+ud'>FŻīyW Pr}8Fs9x9ּ1$E>=,i2}".fߍ[ ͼI9 mN0? ƣPكK,Nyr% BI("duD$Ź¡hqdߥ3kg%)+Jy@`(71;wiNLJGT "!Np:}߿=1l5ڟ7]تqc_?zSB)^he1R)&jJՂk&(D6PǵwQF@d{IMe{:B6>8pAidVzN8(ަyZ/BZي#ܕP&xf%jyVn*u;87f\ e-TBJʛ3oi--p97.~`n,mLߗ,#w[hf` @`LPU00oT 3{1dEjB'ϋv&'.M} k9FQwHD@H43,6r;SwG[]Lq}F.~7 JKYiρ"xY:DUq4IЂN%ׂotG)ņqŠdDdKYSRA]ҒlRˊuV^* GF{$29CdZ]YaiȎɷf%V^P8 Q𩤷fX@rQ/o-'ߚJeqU3p|8־:lfk*O.?p[Λw93*鯧̺WE针LQ6~M&}7#9O :< jxjz+ e\ĭ]4 #w9|]=AbHPYͬVPMM5H3:hQ7)ߕu Érc, Nʂyk+nrzj; 2M0?3&ŀ1ofLP zEXq+dQZvX[B(Z0N͞n6~ +gC<;t-ߛ'W9W'-}0$M#oO즜ō*\=779.;*UǫҖ]Z\go&N7;^ftf G,ϳ۝$%g50CwU3Fqm/F穇1:Yi[^yڬn8'Oa<6kTΨdQ2ne)>ڶZm~lם0͸6^i+5eVBSHD蘈dxJ>Arj۝()r0t̨^f6gJ: xp&fe!;{BY-^-Nb ]rSfJ.nlQzOfXrSO]16X܎Q)ae9IϜi\'ARHuW@$š']0=(~ A= _= h#0*:1CyuX#F}@p?@y1WDF/}ʛ7~T!OGto$>7Gn!t̖+dcBtz.e\M)JD|ǣ~h= ;ʙQ-X/1؄Tf=I9^ 0 3ΧxRqvis82Oq3';tƹ䌓vL55o,"U9ӿ''ijd!n549@{v' ˻]cf(qXlzfE=^}k^G,l{,nRUxg`/.?VL4+^yt|€)Ar9gJ8_ Γǥ>^dvXtRK5' hDӳ  K^T6cQdHE9oL=\ed 9-Ջa>&A3^rc#Ւ"6d'6 ~,;QfäRu1adsk{M<q^#OKx}3f ^r!oS7N9驵;TsQgԊa6lͤKQ vZn5f76 @ڰԭuR Ac,ȃ2䴌Jtj, HNEkO2uF-ɏsWҊQMzrIǶKְⴭT35+Ed̯e08$ggf/2F᨟Nf0o&c`` ~6)ߘ%] w#  #H:%;/74{O/ƣ2$O1;oVF'ѬR#Z|4e{wIGFbI xxx|Dj6&~{ܒHsEY~v !HE)o?j͓30O灾]!4Q/f8?菏CI#!dKni ,\D%"%  4R开 aB `[ ?vBi(,9FWsX]Tm /u[eL p \LFA#ryBqsT\[35~OQz/R N}~^l\ &@怜89wKtxf$mr4{*ͻmxrμBL|j*[r@,1TsǹW|< lH9N-dN)C_iXY%.`.K\ &RK##Xq?j=YЊ.n73rB3jifm M <ԙZ_xc;!"k!1RZ/FWvX{1츝rOݫD;xIjAԱN$r-3d+s%9Xm1f J{P8=k #hh]$Pa8^T*itBQi @L46;F9mhoD 7Q"M&zbx(`ƈ@Dtq0p2pYc$hCXc a-K p*`m [U#nrt?\ZU[ԣB+jVKEZ5X ji@ri"VUoUB9;-B(5-5%t/ HS-P"3JK'.h ")BN'(t jTZ #y"ɏw<E`ؚ;3(dlN?o" UvVM5hn L1h\`N\L8I1"(A1hbLqeH\#L&.B1@hc UH@%:2)We0CwP\t9*yrzr|;o1Ex3 P=psj !K ,Ӂ*`,hA眄+@T<*$j7Q*P AcGA("hH j9Gp JXrF22z*l5GD&b];c D$uJC4!&ɂWG4ف$0Uż-Ah@ <!⊖  +JJ7xMKZ#q=:>e j!U x&O ͙,s z*E:QHc, 3n,GTz4!STG-ӨQ7iK2R;[qaU!ΘD\tm{5 $\`c3]ƴq1EL;ϨcƸy_gwSuf3๷H̭R`hH50A0äEbDE$RR1!mxbPM5M{JF88 0+P8EI3->2atP\pZ8ͽ#}6Y\a <2 i΂rܧC>V1C8jOFkDYS9j |q(:ʡPBT/ȚoDh, $,juhYL-78LU< S/FgW|X۟:UQ7 ]?.}Y45Qh^Մy*raCVbuS*7o* HW4ŠLTTh}bwK|3=5+TE_xqYߤUұ*e޼J_(ՉAwRJ> Z^JdNd,:*apoUJpp48(k-rOXGӐTP!;8=&!럙gin-8䜸Ao޴ZGO) ^~r(zR!D% `e"YAc/j9VHknT33oWmk? sUG &tLc`aLq2wGQYh.0i(AHx0&8U8GVVG/:01z^l(71>r֌N_WoabbBVjھ}Zƒ'ߛv7c χ ? G~OOiypxt2&CA]J?xA,?β"KT!uH?f?f /I w^ٷv{k >C3/o.hpL.Y`oUvFagpU@yP2/c\O.Un)^v1NIaKҥ/_^dŷٱgeoe޾||ًtl뼉n~*:}[zA]x~2éX 8/ʋ<{:/B;Ƹ vZOdޛ)~8~sA$'{R8:'Oc~G8nO,pٷʞ|7Iʫ~G)^޾> O7u?go5߹% ~>엋1N@G^3-5U ]=A}PRťf]Ke_j_]ujogsQ7}Ssx-A*n >m &WT+l{ʽpbKһ(Ƚ웮 |0Z [[FGpt?z ~ڢL|7O?޵#bքG9 ,If (_ct|%%?ԶeӖD+,jZdHUju#?knNJAdž(?xA/ڃ#ǃeǃ1zuo&\4#&2of&'I%Ze3nyUA7]?~s4>pkMnV=Wh?k@o3-?XTFI!"GbACTG[ܛǭPj LЂ  E**e? 0!7ceYJc7 9`RܺU K9:; r**emGBc KHqc0jjG(P Q=L$w$742KHd{KNTE C (qz @ gF(')USգ$ca=vhG2)?=G\*Rk u2LݱJfZ>NxL../i#VckR+ Vt\hmE!ᇹc#I:_Lޫ;i+)c%&ZoH'J>0X=@[ ITe Jšש琖D+eJ O=wE)Zuk \zom4QϲAz{\w uze~>d5.0pK4RS[.CKI{e'頯Yya) _-B)?L>v{)l{ z|y ƃZ^oA uf5 2I7_[2s]H/ݝF*ɌUyÒ.Í^p ձDhŸ&O Va>: yJd](%u0GX^K}trn}!NU>>q{5}Ncx$eY*7ν{m!10f]Y-Fp؊u볮a4yGNW#0Wםh*"mdפD eZNx1L@ҷ1]'ne8,uжA;mG1YX&dVAda)]T@T4 1%)1*͢{"UtB Ũ4&&#Ati0>ARx>/)¶Bm׿+䤨ļ8>5v&t> [4y_*, JuBRi3ʥF[!kŪݢG>bQ-Ea C4cnK,X'> M g CȭӚlMuV2L)Lz*yCcZr:|:%R\5op^R65IER!4 2Yt"%N&-BKɐ74I"lxqBi)JRyqU ," &>&>CL<ʯ7>VW,a-mP?&4uY4N2@Rf,E6ј!R2FGH7$VXVȗlTPb-LmiZݰroXV 3`Kh+U/Uf1ugzy+A\@Bܮ $MD UɹxգGS hTI3GIк'#nZiVB뢠eNc j!KwlUo(y[ 2 ,J62,KIh&@1D3w{"375n$z<5Bcݮ(j$sBek d+9wKS 㶜yRH٩TTr3Cpl6k6u̝*밦V@ZԝK֦iec03@XAsOȡgv7:Ubq<)!GiJ0NФ7cjw1rw]/XBakBN( v A(^IjQ!w8J߱OG0 w9NN:c^V, jqםN:J^V, jeNe0aEq0:5N?sh]=3p2&NWs4o]=3p+J{)<{vc'U1ޫqB5=}7T#uSnpѼq{sc=Zgv|4~HquXXo| S6g'n<}k3٬3.S(h ?kAR%1ƽR ߔ#OǃA͙0Ɇ`aM2$p~Z1ǿ 7Ɍ\9RWnDM~D&aKnx[ޠgwzJ7KQš׹ⱕ<]/h$tE] ڽ؁R]]H vx;Ld^zm+j&9KHdd O[ۡŷzHO9"&d<6m.Cz ^U0 is7GN'U%}:FtW+O?z[ ama5 jj5,j(0A)'nnT<ҵ3 d %(Kb4UI,bZd*ng]"E)iP8ʤ!bq1)'hԆ= )*3J)P rmT}|Q\_>߮z͗a'f+^?]\xs71ӥgjc0*BʉHhdƁOR'Ȋ,40cXB VEwZz1WXPt)z My?|UYkz>mzs BJB#3s$2Gj`G9_ ,Ey:v@ɝKt؟{ڠb I QAz<  aD)~n 8 +`8s a.>Wm}b͍`5P@u헛@HWxcs=!;r_C0Z 't:d7ص|P.)0g&_f9#CSUfvQLX.N&)iC? =K$X&e@ @8Chab;eZSjK2,S5[fڡ"v.=#11G: 32ũJ _%#E»W9hN`nsѺG!IޕFr$B̋a@5gdJ^b  /v/vvgU(O$*ȸ#RNEuKk iKZlzֆ N,_?(~= Zx߳$:m"Nj*D+R_3&F%C@U VqjB 1B aI@Pͣ]1E7dt@[`?1(fG}ucKN0E SLa3\r-ZG<(= o9z<0nQ4mIUQ\xV^ O6H (;״g;kFԚr WyW6,eq*y|Ͽ䨕Ϛ{"T\3oLYYJB[]}~k^}ZRA=󎦤t4TU( JZ>r]ݙڄvLڧ!]?<?{{gRtڦ_佉fgU7ȻeVG^Ft7adWotl_7V߿ =c{JThs0ԺQ4MdQ"ʣRt򨌈TNc 5[}/gh-u:rU`<yƪfԙ<`Ykhk,WO=$ [HУp/Oq?C7no7qsw.O/M-J7ΧЈhcs#&C}u tB%6 t^h# tqn|;Lr֙,Gku6mj|yh Źh߸_5vFD# E4^dYTF?8)$fn@;,ZhBZ˃?0i} ?bND.!p(H"râM-O$v6׻][E. @'f'`>< yU'f'6tQ)o) DDޡ@] 6E*`:jD9ϙR.~ڣL&.}Y"@隣D#}Qف-R8.)"5:Kh;@D`Je;2pP2ad4Zg}YhzP"}|'xɔxyH^wqGg;l&-+&Iؚ+Dt[PzmƉD3\JqBH8ERm@0"Fp$9D.7c*J⏣d/Y(K-]R| 5#…k9Mܧ 혘_6+BTHk!D]{cbO7ᾳ7mصm p?̜ xfUT9{/F.8 &$q~| )J޳yOha>G{3 ~~ճWsp}Ho{]\]m1yIݑs Jqn@S1rԩZ챔 w̚94~fQ9O7uZYFY*ٚ@cE+k D$XgATSH*)8!V`!$&T[M|4ć$9 됔D\>RH:@ 9uuBtFEޭNs^n+ùg}$7j;(\ֶf6KGf32Ԧ|OBQ6vjBVyCaeb(s`c-V.vӾ̐ui)vqܘ_%Y[Zp-T'V;q%CGDl$sU}?!/ܾjRêh';Z{8E5 S0Ȅ >0aX䨙29jr7#?j@TT7k}Y3:|a6WХU'.WEg̜ygR88`&D)IT8FH$%\3/4g~>kd/3T1:~Dnsl)("Fή1ecʰ$ݓeS5\ڠ6L2A]/&'cʗo5)_#hZxc lԖ3o)$ DZʝ֧LĄKXD;Qj-^nCDKt(4Yj@[v9MTy0ttp;[6(WУLO/o%/-hǝ~h6옰P,Z8Tp$\JHO[m.߯gH![~ y/-k o3O_V5mĻ (ɥ+\l~pȟE/\"B}jnn% kO TӨj֬=:vKRi%^-LKZG&i45"xht#7GLܣ,_@ 򊐽.|<(Tjt:}׻ߢ+6RL">9ZR _oHogL ])},]MceC0xnr<^$u&aS$,x$T_/:İb:$N\qއ|tz-x5*{á9?8E/7a)zt=|xSTsZvx}s~H!4D[ވ<Z,'Q,jt3?rBH۰NucNJzŊy:EӵgH˙Bp{Jԛtotx8a fP=@p(}>ƻy#J|_|NowoNjxgUk `w'DIoL(wPv4 afo$ ZOc.qd `h>q&(%LhXzfY(d[Y:5oU@iZh pYMc"O18' ,jtMTr&йq[koͪ Gcv,Mw 77l3ktrRvB &46MSElO'ez} ׏J|fq%_Zvy-b/}ҬDXHn.p=!V$j)(徱]DOڞa,nyS ZFq`Hk5Zñͬ؉.>e!=j2BPDZ 6<qo\~=<ɠ"mMF*R㑧8\DJ^^BhOxCi){s#wޡv"Nq}5\{ZmGWn^F_mPTh~a ;0kgY:Q@9-PA Rʼno8Jry`Ymrr%L%:)Vvhs`?>0ͽ 0F ʎEdQegD:"w) ϹL" H`,IKǚ,dIY|FK; XsrR8H)^^gJzMCvE7Dpd~髺 R[Ƥ*&45'Ab,6\h֑c,,'iIAU@ɦUѴ88M S!8tT6Z= Bk909fIQ[-*}yǒKw,ɩg=URJ8"0TL` $^Z4@ $^]6Fu[@l(V]s7"Ooοzv~]~]+0 AwSq3ljrp %ُ.?\ޝ9>X7?1AFoo,[M1#``{|fݞ=$oSt{݌^__: >_nCb$x"v Kk& 'ʒY` $GV3c[*@gIɝR'YߝYNJd2lѳևww.psSoVJ ̕{+v*Aec%y\]_nt<6>߄Gx'X?>GIޟ? & ~/iv[uw l\BHF3+7WFBr͒)_tqWq$*fnN;h iFjnلjhLAQ-&dƬ eL/5g0N*ǼV)q<+6^j- v9cs, TTؠ(C)á$^r̀qƅ$ezAewۘ3ȇ')ajG8eS1ŠJs io&NV s佷m|4l?On`ߓ$^l2[0h(1]*Sxϔǜ.+7~DОl^:G#s0c2cvT=%jd)vS&rb FG;'=%Z* uUhd9 zpZ6Ab oGtq@J,~@ kՒ qP$H g ]; pག] ZKI](R[6|gks1XV.n>ٿLvrW~]!%l @Ir yGEǏ>⩠ro??ys߿C!:!cΚQzu|[׌ziG4AGi1hT=[uzbRrfts ٪f΅ 9R1rr{ɏNܯEDQ &^=ܞ~\zx%> a2^u\01&Nr:0ڬ>;]\]ڋW_" "^Tx 'aCLu  si '<Ҝ'N1Q]ǴS`#T&> %'^Dek@oQg"R(zħ< K% % c<%ԕ|RR1OyB + a:A@I18](Q1GEC5"+ obGc#D ;-T-8ħ<ߝgo=Q9QM 35qğ&> %h-C#&4R:&*7,df4)O(B.N| [teO:Xjbe/-_' TK9XL?}cӤu.uz *_y@qg=c[d%ʠ@l;ދU*;9SeNGCbךrh&Ciji.+HQ*&u5R/[cRz߸V]8t0 0?p꓊_op>Ɗ69ۡ[dO7?۳@#S~Mx3ӣϟcswDx⟬Ac矶 ="Gw`%{[X3kºZ}!KoեK{o{3hx934Ĭƒ ȯxxX",DYd1HnΜ*-9S%%-] )Eas1n񜫛-&4nq E4Kzm)0kղ']owa۶9LMZ͸E&E$+,b|^Dq:E{(]1i8m=ІzL@˙b6)\Лճk:7hT뛟Mtxu~cޜ9cM<=}-[R~vG FV*LV- BIW$] Ba=d/-&i+")[Vn⤳}lbzͮPptKb).MɺB 9a4*cZK$?ʎmI;Vb%1&ގo fEjΩ zZqB@ Գ XdUo+ͥ\8C'@Hph莼c2Ny` a_JĪqꯞKwD~=)1J,e Tyi 秜D8+V&SJ*|DW`8נ/ g*H=8vWN_ۭ9u0etO, \c/LrE$/xx/:&J·DfmLBGf;\Დ x$^P2&1adxj=II8+YOr}=PO2ق$ +"X:V0^4cRa5ũ%c1gI`9 .BFe`fT % ,c"<TMhjb E4GR/Tn$fcT|wR(Ne]@Br͒A(ƤNàGFBO *XDY = dƬd/p//Q}#p_m2;n꘰&z_B7^uGޱ Hy z#mxN9c P 7)͗Z:g~8Z~iX12Isx >D$ Ax(N&+[ X~~/|]˻  (מNawYn-)_6<P&u@=^&/uzV$=|׿\eWCzDQ?Xl/?.T?gѧ; f3F]!\`N5`\$,DM1[ӏխ $]4޾ea)&W,*x_IƷD5n~n%w,ZYEC$V 60rU#x>G%xkؔ?]& >ZeJV/3v]?nK&%tv|x:Ŭ?ߟrhο ƃɭXּܤ3}Q^b9)W,e%Ng1ŲK6̜2 EQ^4n4F|bX;Y\aVUHLʺW&2\*Ԯ n<0_Nǘa a5]cD^q!M2?8X/0ˡ2COs^kK R4$Fys}ۼUppbvl-DPXDnm6Lqp~k:9G}lTiHFV1aK(C$ōfnܭ/fcηp xBww>&߾#@XcMQ][)sVe4 j%R4S S}qM)F`ۋGO9Avr~{ϫ|v~vfϛaҁ,~6CύS7,id(Sпރ2hne`h]_V/ Fft $ٗ:?[_maJaQHذGfŦD%yjkW^#MG㼒9WWJ!Sy-QA+Kv'^aҨadf}nݢBH$z~j` Xu]L[+xWT)捥kLE}bCRLu[2Ɛ!瘷luFT|q[V{4Ҵ[x؞JőoI|0Һogo߳tͥŒy_ MkeRӊ`𗰟qf3>5τYkxr&$?hy{ygg˻u)I(74 ϐN !"-y{QK{ n wۓB h+ZeU=,tڱ+ܞ6 rVBNKY}> ݜԃĔyΉy Uw۶>xХR-8]zj9[JraTCeS8|JVԼ0DAe)/M;:~@EXSyAS)yA[ᐠuo|֝30MQNf5֝dQ(Ңq}ײ{ͺGB:V", :4dNDgGװsQ5L Is=5\kF2.#K(ؿ"!)"S'^fLb"z.sNj^Nbzf HybPGJ9W5i; {@OwIxcyL7CzYHuSתۏjӠt*$?m8"BIEύiu ebi4ϝ>kQ4xgOZD2(]]T 3AUIZGzZ..B#VA?$N{p .(Ӗ䠈uQfZ:QfRԠ5E0 >]E]w oGFcʯ??͟LUO&w >fJYv4EkC5Hq= "mMQpBő{PN9j~,Ap$7 /[‡p&[y9Ժ߮}s* b2h7FDxڧ$GQHxk,V܀rL"&kDRUT;E-!{jG' ~`֫cX7p{!_I\?lSvQf"j'3&!m0N+ J`s" ("uY M K{nINE jQ0bT[(2mka%!*Ōk^R{TZ&Lt /K r).wj#@E JEw^툄d@PpTa݈膻M3Xu;zaL!ݝr1Bl/= qg3G"f3ob͛G7qJoܠk1k>ѕe&Tzf2wYd|9 `(nL$)-r@X:D9IF5R0ظ#9& ڂ D-/_m g =d+į+r,b\ mbzYMe~Qҿl>V7 #6j1j8u}qqƮw"Q:3Q:3nFdy~$ h"<]IhhC#P ԯ+vG͞觵E).Ϸa`|-~e$ Ҙn'(H_B=._YF5r.R=ws4rrL#gO4U#/\Dsd6z:l-߉N忞Mn%$ ,lgM42'r1H1n3Oe#Pm=TVBBp͒)raݠ[~F`9C#TRJDI]ͪ#c#;y9&0)(RިK#aÆgEmU)yEw&^=lR })ZF ըRJ|9%c 7ҒpnX8!#/e{}ģhW%-&qu|"%>R-(3v= X1906=% :gݳ`tWÞ3 ݗTR4du]b5FJqQ۞u/V|IS,*Whě[3 jtil]G /n^kO )\Vs/>[ePQV8Jtײ]]8/Yptwiמ(xHvnPQ+ja6][PIAO1OGWkga䅣̓FݝHFT5s(oVTk-!c;IF w!wI H.Y20nK5햋Aqv됧(Pw-lv+!!_fԥcd@X(MZ-Sc H1#5D`~dL9VqA+ș~yxPGGG-8*8f9bb8tX -4Wb,)^L5FR.Ւ4B{4r1Hcr8 Se|m3(iTBBpX8"% U1QiLҳ|ޭ~ǚ?!.l@:0 jT1eeD*BRQ*ְ`f7Ӊ }ߤp;k“^핫+0zyUǎ)Ar޽<JL"^i_|JE&1.D<ĜVUS1H5"ة:(^|J%&ϑ'Pz(1ye5/uܢUUYi EKlA/O>"o>+4'&CKJO6)5r֙ X)=<S^Ľ<JLvTz [0XτL-a[-3z(2y+FEЧHKwmW[GF,⌎vn'J xdqx6Xa6Xa-VSz(xyib 6b 6"GCcT3ĂdxPm;%3ŧPdR‹)4mHp'Scmpl[p="Rz(x,XiuӠ$@7z)s>'ݰ2)ԍM)tk ,^t (|oN簼}֜Ry/$݃u"v߀ Z-=,X* wi9W!R n8C?slE>p5TwdA=se8Xxr, ErZJC@+%v9bq̷mCt i  c &-G@O'?er}m,i&|Xn , 'o铡z^,?|훓''~ /n7#6xgK,HsC1q2"S;6a_cPys~9p B7N%ϵwC}hpJYJ{͕ D}y0_4*UoA]V;GsκVG[h8amKJ AD!lxzTvʐTc+IIϸv\\x3G$K-ԪF( ąPT1)#h)t5M i)*清 Wl"y9c*fkeNƖ^ Sz,dƘ5Vk$g[aםt׳TKcX sz/ !ZOm WݾY=+7Ѭ)9 M]G*L*a-5uXDY?i5񮾛i,M'~;Hw3&x8i&(&/p/9ڦ=R{$LR\kiʵևtG!:H,%3er†^\ΉB[)˶ڍN M9͕"s\wj6S; *eABƩ|Aͤ=ўZW6'j mOig!>nq; G' &œrf<O:]U4dね* O :SXMp1ΣUUp(Tug~8\~Tqkv֯,4Ǹ'׹v\.&xaݖ]>s[8It-2<\$DBK2;c8(ӵ@շ(FP>oc8:ÑY1T-ksNh6NɖYg[eJ 7B-fd;gּ@+sukv戹ϩ1%f}?Z:d2ĠUgF#D3Z)kJ9n\zֱ͚͚ߟ91Jd11iLXkR>a,܀ΈHp9(E&jZVш tt^ P(7na {41}q,J2Q{ N=.H5-`ye)EB^x3kI;;a6\ RR'tdiuv6Rb67gx`^l2k'`\.ʹt7aaxS*&WۀBF!0 <)%@pb3嘩Mr;y\ng'ѹ1J&I5磝3Y4I#ݘGѲ\ jdѫtځdzY܅&)!sS0 J0#Of'1yNi>yV^XuXWǰW__̍~^i>/{!=JJC!xR #0Ec񀨬nQ3qF/qɦ^.|h8{;Ot*p{h0{%juW\`jT-6nO4%&lndkKNi=:!Oc|J9)zLS?,nhD_PwD#e|g=HĽGY/'.'xKMoTz )FΌf9;mFYw h&t`}\4N܌?Jф5;1=&|kzLjczxs{2N ֊$ ]a1}Os A'Z%-.i3W 1^}x#K #}`C.0| GgD;}~I$z֘#S>pdhH>fR׋.u3<^U3A[>H3fE /^e;?%oį}: '< 6%GքJU;>ՄG3ghphNQ"H y++qyI- #&Yj閑FSFj,Ǵ"%6? Kw?ۓ^^ޜctFj'Tc|k<PDCzs2F9%\)7sC"j%.qNc[œv,^\knaaa)P )ǂx:3ke4S0]D⏓FV=2^N6oO'X2$0YS(?k­GՔ7șOZEX.VaÞzga7J@@% E-)IZkEB+# [iK͖2HIa5!k{\ޢ,8.= l1z24kw\ tB38\~rk&xlfCoL 996xxk}J(hR6!몉KM zPHIj)MG'9IՃQDE4;sJ>2҉g o~v3NjvJu0: i\1zU;=,XRmG!/w$1g 33%!4K@91iJÏoFqf|XIF{ _=T]ţG5G͛g|y3rxfƺƣ[-cXrxt ?Retg) 7ܔư}Hq'SNMǜVJ <àMv?6jVjj' th_t*fXbtw9-gȋB5l8ӿAǞ<Ǟ H/[g?p~%}`YC.zc x |޴סӂuƩ4ƿvF1l|p_ b4I_a;&&R"i:V5}0BՏ UG ,Cj(B_w`xZm$UCZ3#ep2B 1 C!>BisfejE֩lUs#3"i*<0Ke:ZN9؝(HYJ8_#Jw|(Xd&yLʃؖ a'q'Ά,+c{,GI9z]3LzdA¦--c{(# $m޺᳖mj3Cn*qq^S#"Ўw+4AP3N9E#LF ;;V8KzԼEgEvd"< o^+~Of!Ȱٙ$gVNJ"Yc+D_H+ %'>"K\1]U"YQ]SI<P3[JSFlwE:+'~s2OS %>`$Qd&u8e֝+hC-q+Zxo]מ^}tNܤ'r[ W,g}k1 8fN܅ JmHNA\00M ĸԸa{*pXNFlrP5Ϟ12iBe42peK;'p퉄գWULgb ޞ:DfBnNm40} U USVl"QlxXsMEѺĽqYY.nIO٤?&WxJ鼈t5ћB$%X1PX3Vcw±n{TA= GҪp}dԋa1X$`2=i`"iRkݍnJŲih9dF(c&<*a" X D^N fzqLD/SN&| QꕼQ%D'f(BEOb/pJڅf] CEqL(EfġO16L8~Y`ۗ H6l_LQ} \ƺ Nn2cԃk cߦWy2|:A9`㞌=9Uuݕ#,* +/978Jzr䚅FwGrlu{7{@S%a]ij7]'pEJ;6Y>_|ߗ-h%MZ5a,77S!=nEgD]:?~q1^Y5%9 7?9O"mTp钠J0ƪ|aXaiGk!w*ZaZvZ`:9e 7 t9f9mգh;^6Q!$VU%Q8۰(1o: eKLpq!ns ϋ>.`+_ڳU:um5:BkZV(dzlPt ' : BaPja9 ۟Wc]pWtp_LOwޭf~OJR'|H9qJXYAT5B)WLB2௳i*E Τx# qM di5FTZc\_`Mh2Z]6cJƣy IbN L.d&J4Ns=,z= @~yjt= u(:DjH.$"tcZY=9xW5qKXOk-fd|xsLuJwP]*!<4r͈p2TZ`b8L1k[|4-)ƋĸypfzH4kMc\z:6 v.GЅF2Dd`ǮIZ3nZ_bN[Ct#ӎm(LZCy4)tUid=p<WίMQ\2@% z3+5].?N&'xy;?Sox OF1*9-1z[oU%L3Ծ "AL B.DÂ"`)bV,ÔhQ9]k'A k`>D@aܣ=36˄MܥQ)s!ʹ!ƕR<Ϲ [Sc2O--;6}c KvN:22CΑ ٌ4F!spsbƈ+`3[Iu @95:,wѹL1X&W8c`(̃(ľ+=$_!Éz #t(|(K΃'oGSGrwcQ{xVpr aNUQWnVBHAqM'{TWF^z7Xuԗ~XjIۀ2V@@o-A> q# u# t417_ 3?L>`ji7X0o3]`2-|ч|ь/ ޟ*C|vEћ4gf+ٍk)J[0,0Nȇ}fIcd r5?WڳR%vuE@b~hF"MwR;nQwb:S}:ԬiݚАr=^8!$+z'غ svrM8LƤӬV NA iZECh4N*:.rĄ"&ewSq U>o ̝mx">y '~twNF-|NJ ,cf)6d8ϥrsZ͙ӹ4 i)AN3( VlS*bO~Cq(/oo]0 wu?:tA0^ sYMG<ެA KFrc'ܬQp%b$YZ|^~N쏲/ȩL\{xt}Ζ2q,ΦbɈ␔a9xj2$3ˑ0B̑R\쏖{p^ ֟rҠ\*J ʻ~޴sE/R>)oțC^ r5-EW@ IX zt(t]0WNYj~(=+{!`!),*q(6O.LH Q{\JqMWaFw=Ņ/^dE{B3L0}Ia~kx+唀lͫ]'9G^Bz:ezYptL7G/㻽ŸU1XN=7V8bEgVJx9߭Y +ݠz@fu;OKXFa@hz m;|7uy]^sPB=9f6A5X9d>?xk4efbV։>#J3m7$a|$WZ#:w/rѥדv(tP(nl ޡza^ǵ\"*E.]a/ٶ)W緂y)wW Ds~.zn[5Rp+^K-ϐvQւ&E gaݶkX[q][PcFɺFؼ۾>_C@t#tmXm՟QCc;QN_r;UM+P1Rޑ#uNFҽۧ-#:'QeSiyڼ)}m'ER95yVOj19FL詼HakS90i~IOC˴hڌꊶ54a6fϖYidDuf O6Q23:3"`ϖZs4Fai3K\N#DVN!KE@QdȈO=g2u zU[ j3-X} B^} w" wfa$2bSͭD0h2)2E) AA)bA葷DPHDG\{}?ss&o.EYQD]WJl OW?5̎0\WF`5|Ru)&sJ.n0]eqv.| y?JI.W)&JAafOXXȗ ?ۗL$گW1 6 qdmH)js%ʉͰM2Y”`2Khu.ۓVXUQ`fx j"P tʠ@z!3cgV)P`Ef&GJRk5J#sbgNYwIOͧInV|Mh~YWNVMnJWmdReG#jghEw!YO:Ki{J\KCwX[iD2.k>Yj;&wr[ > DnǐAxCǀOz{*_v϶5{c1_])#_=ް_]=}rhjFgn]'sVOWwm'0R%S܄kp$ÇmW!VMUcS"<}o:='?|#ɿ"랔~UUwYpK~:%5߷EZ3C8HY 誮ysqu"o|iմ= C$]{?#P/ҫ,; h_}E*]Cc yUbj`C.Ep7 JLWK$e6&(Ai}:K m'ԧ㡖֍mãXJQ'E" _\f=_&le$k-[]Z9jU5Ð $-ApD1*z`뫄*.Py˜[R I_;ULt EIV䄵1e+](%PKaHH+MV1D[Ց6[谰İl1Mѕ !RI6#%DyXK;P7(3 dgVV`FUfJajMGv~IR{ƃ8m 2VAKi1:f!T.iL!EвDHLaN؊(#0O P#[Z uQXjBHʳH,֐ #T"X͡4 P8((2NB !Z,(l?t?וʣ`$ad',fM4^AN=D`3t dT JԆ&U"Vy[zj= bYڦda'5 q8-dTխ9:djT 0ZۺIc^N1t8OdB!t6.Ȣ-u+k]ƚ^xovfj| >o؁kğ]0t'1#=M[u}-.ή݊O+,\"Jm"(0ʮ|x];vmFӹfnz}dx}8TGX+pM,wɰ`uК:֚;_^]b~dVDw{n^Kr'kifvZ?k#3%PRo-W탬*l3eՕiI>D"0Bl*ds$%Dm+>2٥=ǭ>IOB\\mC0>$Wfj}/./kT]gz&8+MY8 =y!9+/ZRmLI`/#DIL/J8P9 (=LyH&,l0%E^\@"I-_L:Z3DT^ld9$!jMPe*!Bar=* FH,i_^>wЍwPDwUĶ9n\}>'z!/} ﶗJi܈$UHRhY),3Y]]&ã!8 P^oY`tJzVTcUm" hLX{@OܺNwFJ>C|Wi':طukoEiݦ?6أ8Y7֮fn4#:1ĺ͙Enm= ً9-vi簊ӷW鹿9^wIi޵u21da]l o^mŒ5H}#L_F<4`L9U.kPJ AUu져>\*SZ}D5ϐZb3Ӥ0@t&a\A V\T,僮J~t' ƃ!8DdtJ6(A+jddT R!2ĤKqnČ_dֺ&Rl]54=&Ed`u!h릚 9ItF%N˨y/>(B̪M0^ τ\] !Nj5Yj5*`q$vxI*:f)E +R3F0$P Ûj b S5hXb2Qs1v޲qF"dZD"Wz(QD<6Ƃ%TT흄x8]j4`.dd1h# +V݈FsR^Hϑz5X%uB@ [@dֆXJp$kM w6RkNY8p{oa?vԪ@(y*o?_xXŠ[,I:-p$>CU ;JbT7:JaW%bf͢sٟs,1ɫd=%ؿNAXlᕗRc%%T #q1Ji1.pN!)r3NZUY-j w&Fcb+.RPNAU !3q& ) #ud?h:S=NlI8ubQSyZ8۾ C^C_$;݅Ȧ`"h(j90br'#< >bMܜqXӮJ%\'#5ЙI Xf 5+kxoh5hTc{S3A8񄴳 :qh%%Ÿ1j./<gVsF$<#hGI3 F NH`˄4p |VSA2FI|%#!3HFaؒå_Đ;FY >\ѿ~cۥnHɘzjHcOϰзR0~aa.1y"%B .1yt쳟/ǻ Dņӝv7}kqҨd<ͿQ{y8QI1Dcm<> :+s>n H@V\#F~9F^Xh>@c!j*}czGSjN)eBbVx:NARS] mNw4@2IWbĮ&.jUy ΪzaqXy䄎8h;QȆh'e4dR%f0+&b)H3j0!*`dsq\ +BJj^O)N[!io|6]$p/?i.5{׭3_F\_|ǻN1QKˠX\;% ՖT5,=Ux|ɹX geH:a\)k%c)=˧&G)E" [}| k3da~HLA} wԀhַgڑ3nMwSw3>P,4nx_[[v_H(f}ʘ(Wqd*R*O_O|FIJۛfCw__ Rm7ݷ޽)W߽yoۛ VnP<Ӳ«m>^)m뙵z]Wu73@z|wbo`τ C^T9B^Y2ٕW&z_ɧjY{;aTn%l<;jtYB+O͐u8- OLj`ԱC0߶Qg} ̎@⟳c=/1!!l=:yI\|u*GBKWh pb#!ݜYRAFKؠ3Mt⑺xS-5A(D^߼~uu]|EXOcuP\ zJJtj:1lo#ԛݛ V1}Y'`1j~XƔrS5-d!_β)Iv6ә1فtpޱNﶄ|&:Ǧs׻ym.=3c9*|L6Qw[BpnSNϰmoO- wh8D5%Nm@hib{tq3ܱ*v]|"Q= ?:tpZ/" 4-87tZ< 8yY.x>4蘀sPOXF:Aphk(IЂv˝6VPe- bVP۹׮;+k;<dOv(& $UCU.\t&bOeۊ/>S\c>D.jkʨu"'{lDܬc]FGFmpMM!QRE5r8ER 5Ue[FʼnA[0FFS.֖5p)etZ@ʴ# >ThRHܹ)HЧC',Y;"bN'NR3 BccȊ(P@Q-fԒMk>lRcė +(+ P." R.C)/ʏ~K_n@4;H<ͥ8]36z:m+hƐ?G1fJȯ5UuOGE"ul/⓭RѨ*i%ij+5gE.J)}VjJ-5+uxR>+ůUKv5FZ\[|QKgzYVwooJvuumݮlnnoyxwF^ I:7R#8çu!< !;J7V)0gPʔA'/K-6Z)Ρ%`!O;hpqgkSgﵶl^rQo5V~ . 7ױx^u Wvf 줁72E^<_+zc; 4 43g-&P + JYZIXx6O9 ycD&DzN6?JԷu_mC")ԶUֽ?sIOtޓ\Nm*:ڞR:Bgu\JM[:I=Kå|p ƛqg|JRJk\d_b@ Wr=V^e B!2hF岎 WkȚ3\)xMEf8yg.@?/u6Ŀgn 6j3טERtVnn##AI`;q3j9 lTpʱ&&mّW)?{ȍ`/Ұwq'g3eg>KdՒljH,ů>dJک5͔j}fnF"yw&(yGsG 䚫A޷>@|z_bw*6mOĵ2 WLÕ4\+?/%j9jIԺiObtFiiv(qշsj[<٩h:O׫w$Ycp8k N~^cJyt.o,'T('E]jrbqEK1eH5Ry,-8 Xz,:td 3>y,-jQKX!j CCKKRzu$3WOK>9K*-`i^ ĉRy,Ņ_* X:47K䱔-<<)Ry,]HOV3V}@ݕa9xwhݦ(-e|\DC)@>| Ҍya==fPc-k~U^hv մWlL1gz%u[8P[%EO7~:S?Q,cSy'uTःlHifi*qFhp7iOv*{NNrZVnXihE9bt:TJ*HFk2V\W]p_Ꮛ{NF%ߤFS/&iϱGY8@DDD*5/p8̈́*8E#hq-eqp(we9E F?|Wy>Z+8jrgJaD1]c4ZN}wf3K4*͵*-'2adQvM)4jA* o *cd hHGir˲%"VIi 8N&2E VP=aeѷO2\iog_<,WwS3{ݽ+O/*s_~Wg' 1C]qrOpPǴG \: Su; YhU_yzj^t}ϷXC]d@R`':^h nצʼ{BS-r[4k.52w)z齻w[o.ul3.u*m(C9Qb䠘_-=Qi)4q#40z;We#n]]&R:nxzjAr9x7zT~жjN:ܖ)n8>ʨTUE(Sճ>`K5Ҵ7>*)ڊSl3ɺuSZPmMZ}Z'֭ rFǹf(ukAuڶĺrAQެ[hLC^9Ew JhWj*>/yif0>Dh;]C;AD/i('3w .m~wBIOdl׼2vj}xL>>F[%R{h j%l ޔ&X =әuӺ[=7zn>j'wO_fjyP1[1@:R2q;rC@ƐX iU/|n4!{mB~m :w͹)1Evrߍ)B(5!(J@E*0XYeL.xDz%qϖ4z[znBVZ9i_Y߈~?+<!p.r-*=n ~O5$q4vaJ*iVJQU.XW T66JF.vA]9!d>PHi.s>\:p3%跌 DR=6(ƌ#9-oHv^NƷz (;tYڞU-Jk)4W7 +q܋tMЋt)/nʡUN)*P1n RCU^3[Di<%>VvM!@l=[Uz݋GtmR qE#qd- ‘藲"f#PrPXLDJʊ5Y&lq}_h*c.PN X「 5hLYk)Ӯ gFc4smܝF)a?ƳOD֨V㤿w29?q).;Ɩ|l#i$G7n{s0B^@zw 4Ǝ%5|88 q(~QTYEQmiMaJAE'ƨ 3AXY\`agR#pE%9/UICZT]O5YF̖6>ѣif:$ArT|WIY*+ `eF19.L8I,2ݦsZ'mkK5Z_(eP0:I3$TydGBw;'H}#ᜋ.>/tO<'fWJ5Hy1JWckxxÙ_~i,S*%Вs4p _̋?'4_L[utP 0& ,:וE0&)Í&.jJ5qѲP]-qɨeԉlE~slQ-v~1> 4.L:/p8)MY4i8BhҌ"[0+bFHN A51REQֹXնѲf #VQ$emj[73Zȡ6ը$0g_92ٗiξU*CZk, Z)- .I|2kډMgb'Uȸ$gSmCܶ;D&`bmsZ5zYa!PPCЌ g`D}GTd|RLu#8~Ѽz|K!׭ RvvPk3szE&5ZxFonI:VQNxEPE1"v3et^i") Fbk0[j8Fb[n E&BĞD*,F+yh ZkI x%Ab Pe%P44#}0>BKg#2;k1 JXq(3y5uL ;j{4YQP蟆RR@ I)3 O[9՟ꅶ!2G/5BKF}IP!h+UBlU %_dBO*;>TMS@'&L)%j=~UN@S,>T?Dq27 SBҞɷ67G&+}EɵL=V a؀z|7+ո*zyrHvD8 ކ@":}hso9+뀈7_?sGThnCq)j F hL%r2ש?y9WwTV*_ΠejW \i1MzY~dAO(ZCGaVx#ٻ涍dWP|ٓݐEUzȱTkg_R2XH/r3@$20N%9MOOOO7׾|S? TS-m!t)u!ᗉDY^:p F8ĀyCRgAp2v "'L-'ꠘ Z%ʟNNv_N$hI[-<}Ǚ SlQ&Y<ϓIW$|ǽ ?JtxWmaYjnƵC]K^ NQ7WqkH@3Ҕ!-HPu!PdjҲu-Ӊ7;(y_wFz/Gw='`҈|R!H,Vۓ G"@鞃Vt.buiW[CE(ilWBA!sa~es\<]nIDhpUۭttZMFM-4 6 .4jy4CX+>o%tBJ1 !onhC^BPOpVuOh e3} f7aX>ZS]wՠ2l  k\N4ce!bTm":N57mf•"Rjao-+4RRJIe[/˞sݙT'5p5S\RT;DN5,^jz(թLO=EB?3śM̛gʘ ƥdL{7'~5W٫̶7\eva^;=ļܙg}p\ Ve 7`Tvt>W2ӵk'uhe%8Aة3::98D:sk%ཇ`P?}J{acKk8yI?[N(KiKb;yttZMF]'ŐB;6ϟΟJ X6DﻔVRe}ݭwdCvTRL/$0, +$ 2~J8c;ʐj)gջQ-^k(fQ|)j>jtrRFQ(ɝ2Y1q#F>bEI(f'|*`!UՒb K[$42*B`ApS~"IQW7XĔӀSY|."՜J !M| ^%O,"ض^t/7,L?_ۋfkjic5ep׻.| )冡khGzXvƥ:S/S*vM}d~N[=<&MfmCx݄ZTҪ !s:a ί/n2;X/ȥHں93jk@Nb &ڴn2;JYw ҜuseںՀ9D ,Ѐ %9 PvfkhUf $>rnB IdVR"ѣM4}GdGy_S;3|dkseL~s`3VsMWjyIϨr9*Fm g.i**^-O۟`}q`8kB` {M1F7g!lAF}+ D0җ}H3F~j/S+5kB5ۺ )sm/|9!\ <4>,5>HI]o B6d.M3* km4( L{5dVs?JB7ifi= n}?V*_)u H(x}J* x$8 57Le;^Sc:aa EgS舕}[w8]X̸; 1읽6:2O6t[쮗L&|3~y& !OGxƝ! {Z7lq@G?68+ %,I6q;0Gmo%ZIh"Q«](aD &" D#U T)E>HLpռ WaW6Y`F ܪ wA9XR(%4R.(=kRlR J9@!E!QʯUKH3ΠT䲮CaE ҳF]ng*0(:CTTK"gRLPibJQ=,ReءC(=DH53J5f\]zA9r;d$J!CiB57J1C)N⥂NC;&TCI.F)!v(%$ zX!~Rj/+>KgRih%' 0"Ր*1]Pz(E(9!wۡ4rN/(=kEetvQjQJJLM`7l!EܺgR _@FS@mXgQoX[XWt/ϓǤxkLT %J.Jɇ[PJyv$gkP]?S$w]M% C%VUT*ɵYxa,g=AyRzrRKIR): ;TBQgWM36WM3%w֖UL᭸õ Z)D6I, koy`Jvhe#i]74p^ܩSM==jB/ۯ*6Ib&~ޘ# Ml5J $,-0; Lϕ; #R*|T\Z5/e&{vfާ?3LX"\f>7io&<7X-p49a-H)>#7 S"HE=xnrzD6oKvLhMJdMO>t?`mknXSKͥVx+W& pDmPTc͠/:KGd:e'TDj,\",ٱ,iSv痟f;C_SzTԬTn9f\SG .AKx7O Y.+I q&mP}5U賊 Lk%s$ibX,ƣPVb0V{}o}o}o}Gvp> |C%b#J@R D,\?4cKMή/~UB7#d}}XGz92Mo\K1^U1h,Q$4;TqL8Aq**"# 0XCbE H}xfvM^+,!z1:uLt "3F(En}!g8W~sa $'H$R3iص0ݦEu˷;.-W= %ʟNNvG8jƻR~0Uж2 [|7~L/~>&w{}OiWzdu խDlm{Tj~!#k֙*uVǪW}G7dRZ*jT?G(=ODJ;^ ^~2]zsr׻s9 uY%%-%rkq)%=9 =\7KFIMaQ8smcnϞ@T=Γ2< ϗ/wJ0|[? ߾I&%d@W"˘O cdJ+ߜft>{Hn0 Qz(>IU?eGXPL凛Փ?WF.W6O]!d%9D(PnBFE(i"_9JC)IQ\(=DH5_QJ-oiZz#+>D`eLH5:mB;"ՈK[Pڡ&%IZ*kjcTB ֿJdqk(." 'ARGGT.R!c(b;$ !~k}>;}|\MFok.SAvMbZQdhZ9H_毗V"6W_فbGX &Ju%@wsy]pY%Ǚf4ͯ8!zeߧkS~p,U)s\#\ҏͳw^91Z',@U1pHaL8@,m!TJ0ypb&ʹ5.Wޭ!*CQDB! PcG<0Lp B}WK@³Dv毳4]`H/ O(+ϕ {|*lԵpr׿_Iִ>m2#HP6vFeP@ӚX (% A\2G{)+@4 hGRhDGDqHG(YE\BAI- zBEX Čk9f$voz0|y#f is6ڋV\U|n@`)撬=Oŋo*ѣ x@`?T Oi2_,3_<ļ9x/h_Ambcf?Om?ϓ՞IfL chAݱ$Ƶ0)3S~BVwώ.ma$껞7A÷Z|R<<*,.&kk] bjp`Ab1D$Pho(wOMvb0@Ez|Gh^03;%&Xa>UEy5:5ŶGՕҜ2>U:T{|Pi!f"+Qs9%t5Tډ wRtj<$h̝ĞhFU-ǰ[VؼPA5 bF'/qRh[8IG7dR⤜F%vlVE bgDsP{F2H G^i7fO#KI!X1hC))J _9E25Ȱ2/9ޅjoܹ='bkMSJGթBv`iI9jɥ,-F$GvGke>YD3meWKY1NvnO?&"t5ҚpEƵs]x󭷾Ý|M5khG%Z,;>hR@WK^v?mz^ɓC 'NTY-sLjBN¬+Koܢus t\Qƺ}?{Wɑ\Jc>v+ 2 h+b_)qCH5ZCݙū#+٥VL*+)`-=`ȉS4S'?CވnZBtKŀCoDW=NkrO-=`ȉStST\V¨YsOs󆭚LNs+2,Bt/O^!7|XrD:eUBj]+!~rC7b[Zuc7)eNo¦Kcd=3%̖]_ЭK\c&#ywxҽ /{L{J jMRkV ]e.U4ʭ]е29_x6Zk|6LqoU91"1u+#@*%}C9 BK<+EQtWY^P57tawGC@@;]2khRxnJU \+eUƼsQXʺUuQׅSR=jFbH+4zVe;4(k![2'[X6S2,4(@7\< XeQɷssa=\A*C E!]˺42lU /}ϗ0n`^i mv+Pd_9ԾzUJ\ӁtGV;Ӿj {ٕsʚpjē'X|[R(e#GR0*YNJt2cR9%j7 "}u}qr8[I_L]SIF J (}wxa6%}:@")[wG{DKԀ8* ݟVqX=}y$c]dק&B襱H0[ ]؝lv3z.mۨF[g=ȩOP׭v:%D S2ڢԶvSh p)IĀ%(і8MNI 9q&qg [Y#FT 8IFLtsѭ8Es$[RJ0ܮAjj<1Jﱀx!wpP$^!bHp$~LjD({I8(3^Ŵ7H_~x1} ;4YZQW7UKvrӧnM<%{K zP锾dT Uti>NA=S7aKLOٮV-IMVdݫiW':I {ߚ~hKNWt$Lإ^BAY/xP*)TTݎ[+YW/$ ֺbeK:O~mDUBsv|eE(=5 Sa`jVsbop 9:BJfuj}XXua(3FBJ 4뤅^| Y4D0y-ĪZ䅲REY:QDZRUyY;י$poFB_/v{jg2 vRd-u]@&2i\ZFUte^ eBB*P31ԫFmb#ePI'(:/ʈP՚\GP)Qu bC`b+R[+%7|w,{,CW R:J+dQ+?Ckя2ȠVtruQJ N:cSڅ6/lpW0@56Z6Ixg҆ ,Fjj|yUP+gվU WFch+=J@Q*_hR,IU VZ2˾>6"L ¯Yo/ ww߃byYf?o~S+X'yy 2^Q*d~_|Rw~eݗモʮ״! rY.A%I/uNT*ʬ7ؿT_/sg 5_0WЬper~HPԊ3n*d{/|` So-d{҃c_ Lyg>ZIև!s%~$߆oL^#ݸUx4hJ~F8Qğ/$An프^NZM^K; ̦-{}qU񆩇 }h`8A-Vu^nFObU0v_tp 5Á4N.B'^uVxׯNX" <$DX;9is9i$D~N_>iH[W{$b5 3ѝf9dc7$gR$X0QDb(;j&'uwg:j9_P)bJFxBiU=Lճ>rMT[fztSB$[*~#&7>E[z+3h ,Mƹ^R1P'1mUq~O`ȉS3DyA^>yAv ".ڕAzu8y-oнF*0KZw dnfZpcd \Kc{6]t\K2)J~-IdC&=N@IߚXdsfKi3^VZ?Mr7$A6Pl[YX Au)d:YyMtsy9EyF  <<`8O7R)#TdN h| FB#tGi 7c6S rbK+HЏ!SE#V9BԙjRa"y1ԥkZYu=1!A7=2k`kmeY[!KJI'ansUי*9D+~" Ox.vc s㲪01UeY*'2Aa"̳GfPRQεBb,vWc`H2!P@7eU\eD+.}2]VN 2ل#w ᅯk#BLdA-nԳ`^0*R/iђzMwdz]]Ι##$~$ (:mI?}} 9qpʵ|߈nFnkR1P'1m-V硣[zn3h&ͷL\e薊:߈n#n݆IgQ3[ 9qvVDǫOu?>A Z)z/2AQdJ?0rN^ΩjnbV(ѳzoqz [ۀY[KCs*_|*_F9`|0%A,x%4 9-u0)TsڷQ3<>կ|&׋빝y-JtsEQ!+JH>nuYde33EVC i2?/HEV_^ /L)U7G(Zp`aŗ/wg5K'}xtx7/![V|s]#~ <| 90b?J?||KӶvy},mkO$ LԷ;%Ns s>ꀭ ʬ\iL|j3Kx,-?6&KY}nkg5K9k~,EXPGR:\]Xu%ؙRx,nGN$9 |jh7KTKY}nbRK!Tme1Y 3YR4M,f,EcicBGRr su9K_|jy鑳zj ,~+V[泧f"KU&RE<6Vy,c)4ʘI4VGRy.VIzzZk93K%s$VOX*UY$9'R%y,z/=vjTcKY}nRf^=7Kx,&NiR K\+6=XxUK$K[}n#g<&N5 JcՖңfr<G7 *cicy^z,MYK97V+CsqRlr,L#"XX̜,gu gNNa^uRޮn#~Vۚ!PKUW Zu%,-VVvZglVRV(k? A1AZU_^"c _.~{vY؟zQc׍%*Z|ږ)! wW䅮Ql5ۻߖ4[ޏϢm5zn^|:R6].^?Kow7Yqת@Gc8%ni-u~SzT'7淶W4<;idBRe6bzq]DZwQP-kSu?Ml&slqUМ$ 䮱ϵǾ?C\٠^5{-jn *G~*"tn.weB=( TB̢̤tblQ,p2j StBZm+!? ?|!qhJyùC.s^zn}8ׁZs,[pCZn1wsVhq #7Rfs_yc#=6M)+Pyeq*g?Գ񂇽?lj$:R4"+<m,Hg ZcA44?:$< [$Pr$ .?Sbk#n_'\t@!|̧W?K0Y.W+domxM1׸ݷҰ!t*s.p 9€O2*#J#4R)$dՊ>!Nq*dܘg<2ӲшprPZ0 26DBF%FD<;=w軬ozzbo*)aJwwzMf c8oۓQ_h7KWs\u~ "O._7GT\qM92jT ,/O_2fB%07W߯##3f 4  ^Xʣu t? ;'`vTY_"ΗoMg(СޡӴ6H%bJ¦5pp5$x-fJ@ [ap1~׉jGX- P{α2tۿ(" Ff"i6MP+U"d{j5!qAP V:L2\L2xq>4ۼ$jiGpk:F*;#@#$3g`BpJQu3˓ſ.vgnݲZKWyPF+]lgg{0>I_LՆf˹O`<{Umu,ZZP5һѨ/*|2񁈳ܭjWU˼Juד,ĺ>狛\,V)d|:zdr_\~FG]+𳯮$k{Nw*R:lU=79qSUFdzx$!/\DSd =zo[՝[* BD'u6K/ek햞5ڐ.)2 }hKI.([" D'q(6\O\ε[r"evkAB^6)̰Tld(s6?Vg a' 쿸e\vB'5 OGt2 wI%Pzg!J NTWs,xz}x_MmVT.^̝dRՃ*#S.A:^ߚ,;^-}I$ǫׄ͸J.90&3gk`_ lϿV3jFlr$izFa%8KkH`Isvֆu] ],SD}c $4BB׸m} մϚIcj86+Gksմ70LƖL,c-+v#`&h`Rp/E{x'0QHR'2tc56dmva&>$5;<& n] Y3`Q ~L$g[4 "x.^ޱ b׵x_Zqqx lQ\b.LxLKD0.x %a7 źHm,!z ]`E$S`;0g-MItуj2$0`.o3-E{xrѴ 7C0጑VnBL D & W8Vkk+D&E r𕝦E5P/2Hw2($H­ C,Ёx) %<%c49d )rQ4)T`˱^= Xf聧qq_5& ݖ>@3 PrVlzN(`ܡ_8 v@J؈cDo+U:3+O)A@ޝ/GJPP,d͵"@-H@9cvs8Hpa,e .rY*3-r4r&utzҜeC`tFcWUSm3tZL>i&Y󛪻fiSlWVK<_Tׂ>E 8ȁq{;߱KDh*5K.u8LggR@L smٰ/EίO7 8ҍNMG$X4ɗZ4DpLUIn & kcRG>r|2S:#sHZ4!ɩ"Qqwavj|r'?,8nqB [m^~w.1SU Dd#b'G%]e>Fg"a2k#MIGԄ ;&H/ngDZu݋oҗBr|ܴ$M[pٟGp+,42%"p5WWt\ FTa4p.$el3"0,Zwz0;%9kMmH `1#ElshX5a$&*7W>|Zwuo; j_4T)Q$crGbH.}&lÊ+D#Ihqg m7Ѝ !%pLfGOrKP3"^왮Mw IFͤ"yjsdtpETBJ/ܢd"k 5.QP'j5ӲurOPsDʧ~U/fws9Y,/&OE^K,gjcG)3]2ǐyC:!EuM%a\:^CdMt[Q<"̫ѵnzZXw|q3Ū)^mz?Tƹw8/Rר / VWף >}Q& ?q _:`WWIOqiIB^ȔDz6Ėv#6g@JI]#F gz2՛vKoڐ.I2U1V_c[-P!!/\D7d*3B%dhHUex E y?"%9#Hm:iteiCFHf8 kjXR=_^s 8QJ;8zw.j `퐓v&Hő@'Vq $ |OTJ(! {7ZA[Oթ\"&bbx3"0,$#+ϺX\e{=JSG3 cӚCbЎj 4ݯ=`%f[Mo=^ $➺F̥p0T.]K6$䅋hL=%[ڍ3AGi$:kDi\M)'?<+T !/\DdM3>JI]#F Ip*1MnmH zBiY͍=Z,VEA{̼7K79)W8./dA.>5]Af7KX??ǻϩϩ֣> <>C~5,~s_Ekbr|pɿ_7iq0S-.] y% aM2d-.ɡ]f:?+\5Ɵ -|)hjJ4S!>n.׃5cvqP~ysZ3i];P@vcܿPZGsFW*O ;yO2=;R2[`K+?S3 ;XR|Ku\ $hȒQf Al]&KCVYBhvI; x}yB-m'Amacon`I(.Xt,.?v05GX#'1O;OXo M8i*_CK*񱲹R$C*U!X1F8Hf,Ikn^jr!rc 6ulJ ۪e68TLoux@9I5嵤ݜj''f^ܡXWPT[PR)PmQ UVU"avmlr[k؅td#F*kٚ %^TITl%_VW' ;TN7H6j-s1LrKBj>1p K)$#W'VrA/6F Ǵi>L@H5H[1 I{DrilDŽ`HgBY#"6SZ*Ulx$,MH3iO[GDRz%^*cEP2;<Yy53 L{K{lQCeOl7S  9dMTɌ"6g =tHr(JEQیzQx$t3ۊ&+ |)I쳟)Jy lj~tb~tyzu:3<(w(xPL[XL_&~~B2q #voqX&q]j9P("TD{#KCi!5VL (kƓdJA(@cRץJ tQҊ0J.udP(ŐR ɗ2 @cRץƘ{RPJxB(}LXT_(Cf%H%{/BjoL_  L_ZJ*9J$3Oxj_J2 FJ桔д/I}\:؀ҽF)<ⴌJ)䡴z9|@xJ<-W(Bj"QJ󸬋})ÊuI}D)y(Ŵ8(J1Ci!5hx(%2D&^K}\:nK7JJPJ 0tQ  = 2Ci!5A|x((e"iLiU{깎H_RD@}s1xDuaWZM?H,+ag FqR9&O}d~Gz_4Y/_ξVB˽YQg8 ƶ/owy]aDw=u:8Uހ 3<&1cscHZcظr֑kUisM# h;!2њgsQ;sY姴{/Dcؖ^-\-`pamm#ܒm=HqCOFhnzlN4T!]dALW>?"jJvjU0oB p/oh xm(&g(cs31g~/[h$h$h$hTM/\1H(ԛxEVy`92a,+ͭH n2Rii+ [&jQI6q4OxZ_3GS_g@Cq?9h^fV9mLՓ @e,m,vRLіš:\Q#$H25/H#̬:*:}B2oTkmK۳ߧO>|TOr\)S*oq?,nY.S}vU烾9mwU1! vyih2aL;JIPFRreAPV AޮC{cZj::3]{Y K24?r`p8x-51kw(U!H IJӨ-ə9d>i~$~_t\\=r7 iSozoouD'0apcrC6~;ů~T#<9ѯ=N%/pSN^Tp_I*L_X8u+ńD`tPO7^i`, ~xB}M]ySTIx3їt$":.v5cNkdRE 2k@9n`ZTGl˔$_Zg q.?Ѵnw ##{Favy>ZcF6}6 G6e.XrC >{[MG(dܬFܬ܈z9YvF~|)Uz ۸m,RQq&} :)w9eWxkfG !LFiFR;q_DpמKVOg5{M G@W WqiG/^Kl<TȊW&aV&zq4ˮJo^m~ߍZ=Bgd)xmJ%ۚ#3>,eyFUΡ[$m/^]~X ԑKx3?_y[-4zͻNj>|^곛xe k (߶Z˭zV\-cJRTڼ*nfUqC(לk,=6ѡyG @ K*(  Fb ˘qH;cM(½ <~1JďkJ)H-A &Vy^կV`1r3YxC:$rΉ@VxJiάI^"f BZHMlِHJBVʃSVk&*A)9qA"!P()BZR-[H⽟,V].DΗ=:)Ͽ3Χ@=?}hϾ UK_F)zޟN)_fžĥn)%Q}ʉ1n,XRcSE!C V;|=n >x:ȁNg4nϸ6yn] Cл 3z:ȁNg4nx63zޭ rn)E[Se<&2$#.˻ĭͬEd'ei=:Λw..iì+=?=:x󻞽97GI*ȊY<.y']/Eǥ>KzEf>2͌+.pF|+L 73~+p!P,^9"p%z}VC.N;g>? #?IjLT'xZ2 9=LkqYNl!m) cŪ}wէ;B(xeaL3cKÂnx3ZSc$9O^\j 6/1U{m"QfgUC=6 QJi0QyXYFM]F=X%ѯOq"~srLzX'@ lᠵf* ?Byl-` ƼZL|y1RC~W4@i pU֗VTKh+A{Lf \ * :;C;.)ӆX|pL TZWӚ\rcKm{Q&ЖaN4]-@)"(i.VהVjd7FRhX C9,SkE8Γ.r숋XZD Efv` 9/!A ɝ zE)x&xmHc".VPέ}@2L[U\!=h>Cn"sa2bE!*?{WFJ_\F7/8*m~s5^eK_7잞7\o$y4Dh45WRGi;ksavr~Qn#pC4 G]Ɗ{/RhEuZa˱=w+ c7tn<3V8R7j,*Q@HmL (,Oˡtp\6Zk>ئXSY@+AY@~7Рl2B!O̭2Jֽmx&2EQ _R LhC{ErziӞeAK֔ 1m`}YJf ;c9/  0na^7C-jyG㆟Z BF1z q  %^5c F1Ip9Tb$]FjBY@F۫ ZgMa8w[CXA),fkHwZd&5NͤhJ3vO?YXzxɯaR&7ޓ_b3L~ &%_˸ ixڞ!ւB8ؽ3W&H`A]@O9Sl4o[,PR88f6֣=d؂T hB.|>EnXKMiM5i_=Ek/.t6Fa٦&;W t6vjΆ3lR)v=/=6 *5;8d/21FhOk= Ta9jJmZ!qz Q?LN.AҊ hoӃK9Hi4pR+ ?Zo܃`a,Pom 1oݶЀl{%)(n4; g˜G&P{ح7IaZnTKC}Ы$;MBa>7D+MZSm)>vjE6Tۍ]~x*\fW.dm:x<=f'[{ik>{xϭ1Y:pH|WQQt^?ՏlXNƥ2l;c~UjFPZ>?JkwgNoveeMUGGhNqIoLvwDՉ6eT&`-Ӻu!߹FT8[,|T'3BۘQUt̺AzZ.4;W:%]} q}_X}7[<9A$J9󣚳9gJNZTmeGJ|`*%uQf׹`Ɔ8ʞ"JļLriunЅ1儠AAZf \#ܾC ̽uU X&ٽd_QJBò_ɧFmT,}t?*[&v7RQ_O'HX}e|i "ZX")(J"R3UYˊ^ 1"YP^BHjWcRCXA(ZmlN-櫋BƺvJ>r ۵?!@ 6Z{S5wN0pSW{_Q˽.L:zo c%F>9{ c7boK9cBi!َ7ca ?^ U kUOտ:B|~ljq\@3]b|ʟl||^WƼo\[q}꣡ݼzgSH4|qU-| =,35 ;*ynk9q2y7GLǀɳ g5Ns1\8DŽ1]  WqT/H69w~5Z`#?r`u~dZ~ﰞa&V^Cyz=<n!`u[oXLFG 9g֏| 9 ]. =2z45  <> x9D~ WaIuXzShS-Díeh~4~o9c/5DK\m{qFF ` ! ڢ!Pv&='iT?4Fu^-AQ蒲_spnhzd1W6?uЋG|'a^nΚxXVI:_biៗǛw9F+UX%Wj Mc|^ՙ:{(gC&"|asv3!o>S4uYhGY³,. L ]+m»m<7) PĒ l,bȮh\l>U'l0p40^'!w!UZVo_}ww+!qjhcwDDc]LrlHvd6Rb*cV CseL( Pp;n99wg cT_$x3޾$9]HVZ?Z$D!Dm EYr0gyjLMZ@ar%p*2ԪPd-X@RimrNz1T:ſ0V{;Z,9g)L kS`j9lV259)%"y[[>alζ/5 5~ǰ\ꅎ`$KY,}7k0pSFm?jݨF?֦Y6pi0طZb~jpm1UOv,3?uPc3p(Dx`H zW !~ ̓9A}rBʐ[cj xGa}sw1wL~9`\Zò p_b)zu1y3Av??FC죁0:\z.ĤٯJdIf iY3)RŒ'pj PvQ)Ao%%kuQ$պbMTSm ]fsQ\y)eZ,1;Tje׵֥}ڧ}:Ʉmf\ڧOOxXjdpQpc\[ƇWƇK. LB]a&qKq @F|+dT3@FrȨx >M9bz Jsӓ1 kwWSe{|}^mP )Y,Ͽ'?yL) [#`aξoVdGˇ_ܖGͅf,̼9@;p[¿FDPD&Z*٭ ]ٽs4\i)s4LBKQ}G6-=o-䧥VZ4 -=F.՜hYk)J?-E7l8$շT#hg7b'1ow]+Chis9-l Zz]@]rtZ*4շToEKZK¬$շTp}ҳR򴥴JB$6w 8~wٓU˻~yQ&?X֚LbL12,25C35˙2ڲTGyC#2]\34\Ql+i-n7>ʅ 3h/ePV,cҼ,<EI`Klw'{nάj1m^m2xjfp"7i4I)KL\E!g)DPs( g;ۭU̅ZCϜP}G~GX l1#d9bcw^{oi oXCkwAo>fEex@wxw \F# f2:a&QP8P}xLWN:,[yC#o 2{2ϙ9˰ Q-cBb& Ԟ%(sΰ{ X YuΗn3ٺ^V~al6Aj"uX*<*B39vm cP*h$k2LQʱ*= 9Z26^y_'8H[̥fh/*gP8TMuB3< ]p\%Lj^Rm i1P*07oRmwboB;nڍX!]hk+/1 hSA<꽋?RqSH+]+yÁ[,;GVzSASv5A2Ӝ兖( K*/d LX.L bg*TN[ړ~v).p㒓$':yRp Ia)2I(]ڭơweMn^pz_\CJR\QrDr*= C{@K >}VQMYPP_׼դH`>Jm!s 9SN5c1_@i SJ ј3om3t6‹D28@[/4 `_aeWs^52ݢWrtr2#7;efvOoMop$HQ"@^F¸Cp!cb#Nd)lo ̛.ڒ{aD՚|+YĨh/D SYJXK-,~PP~n_$ug}T0TDx~_*B]ZtI/%U#һ"!_2bSgѩXRY(a KB2bl 1ƆK' QĢY%2K SdY-I5* ԚYAZ;& bR ..9qY;)I[U#{=;:[4rm!+]Hs!R0836 q}@(:4RiBJSB'c^utX. Y0F8 '~( ;83j#!rBdjJVK0 ?"e Qg7XALCP-WRGhYEα~>UY؅6[otVfuY1d>/n뿕7/CC$BH r1ˇx=uGC P&&E3eMg%G~}~xdeyJV_<;2=qa75r:4\<L2[A0R(7ߞc?V6qFžQE͇7.;(Q OT(|͗y%54hA﫛,/zh[x18Y<÷ )iKu37twEyʳVAAvL'oau/W7>~Y7-Қ2=5׌?Wz9=p\3u|-s4Q2Ѽ<_-UF /fi!hu{0P݃فץڶYb&s ;T2xݭWA]0XY>|,!|%f<^ݞU<^ ͕3 @6n  ڬX Sl>TfclV3>U̦$.%KJ/%O7dRRG Ƣmo8\VjƹMVsaNWUjD*S=,5VB@UXJCW\͙EPFm<"Xj$c:kIf}<0u Uy2>aG$P+1v_^I)*a+0)ͩ\^J;-au:z`xSM(Ri M*a2or'gY|u]9&T,{vYB [g^q0љy(YS{s{SNXh's*L%r%:&~QOG)B=l4e:2^9RC3rBDnnN&ZD0* C (Zj&_ҏC##67m~>^Eh-ѵ*dzbDf dȔ; bL,_LJUR[/"9ȈcH""B^] {FQr9F5R = HBB.S,)]{2 yiFէ; qזUx ֬F>3xX\3/^<+h1+YLi^Bջ7J~q0_NN a~ X^gHR*6~g zs d O9&PD(YՀ zOΉ-*p,`B`f$ 0)xǾda:Oetėc Ez)Kq.VPg2s[l(O(g-1\~aŴ|Et-f+r$zA8| ^p) ^p#k}DBKR#hpe4AhB&۴OHz D ߟ)Z% N@wzVagq̔aB\7ӧ45yFA 'vHrK.Y=-N+ CJAc-,Պqf#nj/eJD* !p5QOˠGkCa*RU*n"Sw5%@F!5u\%kR$5tRpH.m%6 ]<`%ϗ^*`=|ˋU^tR|`;m.Xq.-<Bq^Ah 6h8_nаx%4hr^&d  !)-rٕ ]Jx2tr  ePu޹QV4pl1ﶩb bw vR"E!ZPTi=?~Z]M@%R Tcq btP-Wt> ,*QڰۀD)D2qI.Uy[2 ЬOxi"% .efI]sUT+nY[Ǒ@)un IFtrJᴓ//jj3-ֱT8 N&!ĊDqdF d= uµ4dȽ {~\ bát)dQLـӟmMXx*7POM6q IJ~*B}z0>sd6z%"y4dK;an(Cw#sM.ܠ݁_`f7%6 &k Z%&H~&,e6FC6Lj hc\˯[t]a Xr{mtmxԘ^\cMAR轞mc#R85]EWf6W{Gj~Mqówr{NAUHᆰ/!9_Vw l CUceL8%5zm9von~A{ 9\'y-"V?;39苔7?n`ԓң`<f^] &AOcl.RR$|]Gy[ao r-?[0uT3G$i %)OV@JRLw !P,Min&Z=mտ 44`o|ƘY3ƽ%(R,#Sk7J< (.*CkT%t mC **͸G :wj( [x#m;Td~%^ɳ!^[ޙtwҶR.W9UeM䑗\FLD}B5ZZ-@7PSEzzoI^Dkݻv8Qi^'C6M:mgzASm{- ޯKeOG3%,8?TY$(VY*)@ <3Ib~+ɬIbĭ2MFJ uHZph}cŕxF>+1yJ绒% }h!J8?7\L ms5>uކ:hW`HFS8ʼPd+*+^\|RQﳭ?0[ΈE)-F0vdق#8"Tw7H^cY!SI'B(9>Ug%5A[ .>/sw?z|A2=*onx'gڿVf-{ot"c85 ك?~?óGC' $,Ty[ֵhW.,.lt]/6q^&Z3B{*xMp}4OX/\f:oqٽ枃7ĒŌ nFѼ 쎳1??A%?P~K:S<~+)ك|[qt=>UC\R=ČXH_VXERNLGOY:(B;y~/ۄ_ ;U;趈B,Ӹ l ["fl# $)PjNTɾ_nd_!5Tڃvx@0y_OU`4^! O;0wם*C{3p 'd_L^Jq[73u)m$YxZN߇? q2f'bxMKodёLjRu8&MR"CŪ:_O:lMX]^-~}觩4(L dTI#4 n~vlۍ(k _ SNPe*DJ[V:|IUQPAd?N,:mK MdK/OL`6O~Mqlڽ=YD t o}/DIux$.i!u=?mM&o6*E0U$#mMD1-sF n5;)m)=|)ħ?>\[7mcBqa@#UO+\I?ƚr28 >|4@ G4 px??}pt|%;n‰^9@d&mBy=|4fB3hBO37_Oz, n&a?ׯ.~zva2)۶ͣ6q3юd۪pζc8'e[pkscюYs'Q[0m[m#*o *v&HL8.s$y]w֓I4yvUXZk}‡ɵ0/ xx@=N3F54\rgN|MN?lM?cpabxsͯ _QW?U|r&d2ܐ9~NؗCŸh0K rq@zm.2!kм3 O~Qʖz&1M^,Mqt-OƗݧ۩ 1!XID\j9 ]%zN2z[O_7ϟ0henNӻw(y>>`)Z7(S_Wg'$uF}yܰE߁Gę Cߵ]ׂx秠?I]l%% u,wWȂ7/g_>T9t4EOi۱_Z~胂 Xt<\NIhugo|2OOAy YT'o:_o5飜{φ8iB;G17Khsl'uϳNtQclC ur;M`rxa&xhzᾐf%LYH79|ӇN~)ӳg`ԷҖ|~۳~rݿ'%-}]'T/.:ͽBz|:h||D~ϴLȐ=$ܗ ,cm6_J&q'fXV9YNɁ+ 4'+0>vJ(n 2ɣX7rqS3&1:ueŝ:-)0h[k{)hJT\aX]ʬ*y 80s TBMNV5_1pac8y5Y=?HBYP F?ĈpsH_&ޚ7: R72H)(t<N! A#+l"-ZB#,#bGwU;B~FqvkF{Lna6Dg p^B*kDz/!,4@>X% Pة lX#5P9BV"1+:YZBsjP.G6" [h Q.=^sHh yx)Ku`)b1QZəXKYu~ b,AUּoӴ&cN5TICa#f*V.VC﹐kmUКf&o)J42RZB̐Y0!OiExwg\&1%;Q 7Nta^ -y;m&T1I]?!'/27 &*o^@JG(oA# »aUL5j%dOxu6@ڂj o7WGBOfdo6:FpE?ވW竅^OZ #bJu2P␇)@0L 4+HD ϻX_&Iܰs$T[4_8߲#d,5h$JB$KMH>v;xCx~%O3̋a^ "d s҂tM ՈXԇT^r#%EPHa9LTiT &\Yh'O,B!\o,[LJ^?g0,D-$cbs8W^LkJk>"!ոJV*% !0x! f'oZhvm5}yV-uժǑ"ul2y@^JO<'lL6ȕ#.V:l:}QH·?^܍LZdBje\wUn8>/Cɣ=[i#SX~'{ :md@617d+e@ H^;o/{j_{h p<{J{e:n#q_YÕFګtp%dn˼*lﷄ4{Њ[&UJӴSQ[~3|憽K] B7).ZeM~~"r!LJ"|͇{N_RȀa6yo!Cv$3..`sR]ڂWvJbw?Z-~uwq>~%\Wt ΢u_~jrv@/(;<V4LM3R{BTWϹ.lRl} +1Kl( (Kmm hd/"_\V.5Y )^J3#pxJDzBeLH)|&qV') pEc;Xwo #wVF)IGI%UN`5V[v(l27G 롫~!iZ!"'/D%Fqd9U Wpcvah'.DW:cVE(H`vmΩNY$$,jbB@d.4 S@~SW%`Pd\ 4WN =hiL#7!#sɑbJn✗ %9|L3,qd*5oQNh/߮~w{n3m^Mx{B$ce*p37ObQ-%QTp8a9I$a9CǯIMUrٿ U9/٢nWܻN|8}!J>uHF,Uao&虠Z#ƼediL482pXP21C =ʍTaD2sEa)$5Ɩ^BkFGT Ox}'<R02 .퇬#{nEtd:#dFȡ!r氽DL3 R"ͽ6X!gD^a-&9;51B=d&zV7NZ 3~ GD)8PR#&ȈxN;w [>Q!jDr+M&!y[jcST NBn?IțGWa1z"!'hT+L9y4m"$;_F]a9l҉1w,&.Lνk9=rwl$3},v@İjKarsCmdE$(rG +<?*UT *e4lSHdμm!+Pbi.'W00o$2*i|' k؞t+U;4<(%BLQ~8Q}"j0ar!,áE:,K i!VK1H=Us)();OqUaY i-R.a~{uI[@!!!CRJBHCOhZ"P=:dVcGWic0Q[A:<[yTc"De3CFb4= IJP1_cbj/57]m~ױQ^r*Giץ{J6{lC ͇|Č9x.e d%xA0jr<5%_` _")"MT*"Č͑&T c3GJMi:9>ol1HAs}J~:71h脧{.عxɅlϳ >"_v ?rv?>`TSm—zf "w?f֭΃' w*Uoȗ (Z"L "$Q|YlC B1¼fПJߠ/of[ ý;Gz mfn f]kW?wQtnKB= Bm7p BCV;n/rrl Q1!ꮆWQbgL̡=Ix/QhNA2day63 `/8mC;= xs@I짞zúDc5%1p}0 Sl95$|b8v1J'yH$JT(A$J EIDa1d ITÁg2:@zNax1Rzc>0I[S`GYd$9]t rC >.Jf"OrLL܄W[\^vmz]ٙ1Ek| T=E`ʝ'Qn:dF?m'M RWz)7\$1qDpF|TK&QFڂw3-Rj m3p!4/8aV'bl?kh)8J"D5 [{)ª r @H/j?s7Oos\m.wñ1zN(1W`!a4pum iJc^ŗ4jDw0ڒ}Nz/08(8vgpw.5s)C&Ĺu?n.V]`(6 ;P̼Y<.lZ=Ο-Ow"^<~lfן.:͞0_y*Wyx.̾a{Bkhdn0.)& zlT1"Gs"=Aj$h=|fXx;Ԯ?.:\boEd6W0%05e3wWqS'r\߈?C鴟,ۇX,/c~@cD:E_gnyc~/jSPo'ƣԝz6 9Az,"{(w?6)10"TRtI)rQƏ] zpuXe꺙*Q5I7CG gkŭgǛM bm²_ fC3:3`VΟƎSUOw7ό>Uk3BBYQggFY33'M+7jηU۴ߗk<Ũ9 'RqpaN5>(8XD!@>(ubPP #wc'<<( g  -:(Jg'cD :j\ћRdWZH=- QL USߧ[$a[ol(lYFSf¸9a@h\4wxX#4`&TÏ(g2LmƧE ~ 5ܧ4E~]\pm5+Ɍ6Ti ~;6DeyE0Eh2$I mL3M 9cDƣw@%8SDNQ*3;LI8-?Em?>ձ6! xr2t>MqЍKSX=[_"kIj<3L}MA):e<,94(ګvQLI"cq&\$0_>\ȶҜx{H8y{q,/ore(UrvUc(ϻSf+Y(<}UKxgN'ʠ(|6La8%'dH88>ı|#KBQ)cPc Qyٓ:2ka׽Oh1G5jbhQ`عL+.TCH]p?ǻg׏nc7ʭ/&;]:IBYI8h }Tje1Aʓ|QsAE{m1HJ 舦&reU? 5B/r_^q}yF2 .vZÊ()LsV,ܑ ,F)E u侼$$ů37 p_M4}ƿκ5_'jZ8wkgJ8BnP}@`9ËF}I )sCEq[=1 9E U]GWugG\j\u )hi 2H6BF t-M U曣npilLPz*j0L1@]覊t/|4nq!4ٽhD)1*va :k{_lِ϶ vAإ'd3ٙ\o^L}e%|MAΧ$byLw١H|wσPU/NƗMu@#GG ]Pb&&ɽC׏aEѰ΀>EFz\wQ<S`Y0f$ߺz--:S >Ds3f7׵Y6ԄU.lَ5`V8٪P^Zb/rI:}Oarsngr9Iҩ(+0LS,đfS>}LZ__Ͻ 9guǘQ&t^ǰG3H?nՀȟ?Pvi<9 xzL)s>&l|,Y?>njﺎQt=f$䕋h-* ٗtAbyK6n،UfAJ['Zf4!!\Dkf7RX= $R''0geaHs.P%MER nb4St6IuHͩQ݊c& ~Mb2W-5x ԗ:y2~ +k r$w-(LgϹ'"6:^j=ߘA1Ӟ1TC738 2RXvhX!ݱ=(g84ہ~vjjM=c|ʘw徱b!%qKKP`msjgWoҙ'9PtE~pY~Z}P Z3\8DO#jWW_!+Qw!"/!Hȿ:/ Xٴ.UK ea_{٬8LG3n&UclbTI(u[#F "B {JG1ZKUhb*zpl0ףm, YkQ:f0P[BC Dc.mr%ϥR5;Xtgf*=>PLt޾uJ ڊw VWBcޥ<^ՐW5gPX7tvv*-ށ-+=Fx'S7 p^ώtꀱ"(>x{^QI'^p(3*Si*L#y;Mr/A5 H;:/5 !d3"SJ̋Lm҄=Ӭz4hIhJWfb*wցA;pFF/XBsF K9Y(Q=4+t؆oI0fs}}U$Wp #bDDjx{o D}`2𵾘K\RpռHJيGw11uƂ3 gTpli/_p7}>nB2W&h:}@<H\p@xvaiJn;q#5]ҷOƔ7/62`Pz̓7sg ?bjHCEsظW = sD QٴE%8gVco:rSOօ^nhA/tlR^GBb3 Ϙ.DLO+ª5uA 5guwȽfV!Xxv)$SX[GvX"?q=n{50&^  J>}I`r GRJv09S~ TR˅V蝯,Rh:RJD93x) :$xu0 cb h55/Fod͝G z,yӔ[HҡZ55%FZКʐN%T* qeK(mkkGrPRm5wva Ӧ@yV D$jQW{ɁQQ*Hsqt訵hȧ\sBBg 6h$ǵoI6+BHA>9 ߗBA:xw0,|*g} zQ5t'}~`Z$|sRR4% GDMvJ1BI҇6忽GAaR+td=륜hj(g"όT V0" FZ_[+4=Z^U+K ZRX\FEaU6A PA@X赑0t}* ft:$T6Z@),C#H*G?.LE@)8 !^ l4L@}4vl4 b&azR!އ 'QMH h(Rx`Qf&dwK) Ć `7CGk添A7H"08hj<~wsKk0héd t7+J]:H\K{ӎ>zhPLc c`m,̉E#OD gwGym }ogȏ<g*tՒu2({IRh3Ҏ^6WjQygkSiљ 'EQEzbc9$%,Kkh#{F  Ü"f'!սXHs V37#񫟚"pKUI|ӽsq7MLՅ4߆1z&H!9:{Ri/V?m"i"qc#@fӻ}4@2b=ri)k Ta{XnvzղdpQ}5/ =ġA:׳MU_?.=?%aAϛ?Bt8O֜P@ x0t}Mwo+&.Rze֬# E4KZEjNAb#:ߑ݆sfO oGj.$;Rǹxm@햋A~Gv2D޴[~Hօ|"zLq `?,F N' s7Φ3ՃKWgʞ<(DAW} g50|.|hJ} ],>"eeu\cJ77?E`Sqړ}]}2pY*x_̏nӻūr^ƀ >_)tWd'ʙA7[օu_^e^mwone;!{5Pقk l,TQwyE?nXj9Ykۭz7fkeNN# &RyT=',38Ʌbiceӽ(%/T<0))5Wezkh5էWa6FV=@\⫴\UehUi'p_"]|w.&*xp]Qn6@yoxނ4tfXuDuޫ&"屽&VGy0caA̧[>j9>*,J77Q`S:KyYAK e?{\|wQbĥ(SqM#ӽsq7G\+L?.OkJa[:WG u=ltx 0*]ZIP q4VRyhv?j5&?$ B70" `JGMX۰V;Z9)"!22MTbH;9x7>ՒA&%?m5y%VlMaLTkCmHe*ôD;j8?ItS-q%Q-q1V{aw*t\h%9xjdɴ ʡZkKț-M+U.1Yr#Հ^ ݖ#o\u!!߹fɔ$'oIQʜv Ftr#E %ٛvݺ\Dsdyv3 =y]EEa7og.4||"!S8cЎo%P-}O㪖Z|v-X|pjZV/ZViH^,{cpx> d8ρ4v! Ylgπx/k4O&D}׫`ilD U4I1FTHW)V[F!Uúhε$._=F{jjOПgLB>>hiAD±AlD炩NQk\lvVI%C AtYsD!\OA n@ZD貪%σeªVzͅ$ ) *Ꙭ+C3A= =`qJP\P #JIkIŜY4ҁPtMד `1TFZՎE@㊄juglUJ[ɲFu/wp5\7][ڡtNOe1EL,1| ŧ dIKxOFyuʇT]wI c%*.-8-ģ3ņ&x CۓJv `Qa !dvK6 W);C.m d3HK+L\ydJ2TV3tE7KZU?˫~ٗdˉn_'ZM1HnRw$dW3Mܭ GϜ˻ɢp'?g RLu=J?]s ژ"?]gWp8G~X,[)< j*<1E%EAg0CA]E5hbPͯRW~|y;_Jb:5G2{VQDUxVRZ\ ouVQGpאصc2q$$bw|-,ኊzSwl\֩{y bZG"R,FbKP/F4ӗYR{0 &N.ΡĻV9 R+Rpdb@ [kkV-Ożi*SJ%SKɕ:rHx qIAq:G1h&K}'##1QPgNuNqAKJE2}E}}G!!7H΅Y`v`mKVP|Y]k Zk,"栮l2=wHDyaq#̒oE""cəs쾹*duof>M<QLzۏ˦}XC c7sc|R{mۼh7Ψ4[87q>Ls8R9t83OĹi5QZ̡4`U/GOQf*Du7+,GA ܳ+^n4"2K!Vs{S0p^l\4p :t7y{Br]a6Du w` CC6{ٓ :t| y6⊵!l[GOaٖ"h%gq\8Y'~ݼiBHȭV\hqC*$h~_ڸz7uSk6E|.2 Ǚ7',+q7H&u~(.E.aԈNrQⳮYG*Hނu)i\9BS+B3+ᙀ;S$+7sl 'Vn@z锐x^yltitmNz*I@J+t@CmIKtj^</>VV.e8 O"Z2alTn aU/[n 0!)~O?ΡO3 tLa\V}܍ ,^\Un r5TִNzʼn'|@Lj]MZQBH(n? Ey "!vQ;5<7it+I6skZ JX-pY+Tk\CӪb6ʮ&rT ]RHtVњѪ_LH1wԔNKAuxKK s:zimKvh/ŰJn?Ǭ;Of. vK<z6+ ֹ=^AIQ;\n)[!\ֺtdAVbIiycӹIc?>],z-uc ~*O.`!VV$U5ϣW"Z+SHR ]Fٯ"0ݢZkIeZ&i]\4(Js4gƒ,&s֐^PFfKƂ c""TKc Eěbg#;g~d^::'aw|5<1k>L唗|~{B![+E7x]NX֯0˹J )4@8%+h]Qèwq#Gr]mk'`=S` {YrAeKM863C5OWWw|6fYrm&}ӁuCglo:ɌUcH3Ln읞S6hn{xֹn%۰1{5l^sС0q˫o~8gфVDz[M ZF)b5^;Jg7訧^KZeq36[\4Fg-Cm]ssYX]Ϣ&p[DGEuK\RP GͲPK^ Ozt[ϊ}_Z}C_Woafe*!NPůO6L %@k? sp1F#Q}:\=3Տ ln׮`QCx͵:|?셝<|Zsq߃#VPkZV~$ =usu -B7nM&pwA5$g֮18,4E}&\/XDv#.iG1D|mO2Xy =zj],!@_x% NGi&#nM}`ˬj.sMBwv١N"2g*_--!ֵHTMt-kv)`otvct*GWebyL8*[F6Dr[րL!$o*T @:Lyw{-QvR?eLMe++VDZVl%ĩHK75';΋ u$`)`R<GrDO g-gb0u[Uӌ5'hCc8'hݘoInf`d$SW6w6R}sAZ#QqɬA꿹LvS>Mr/_)\ΛDTktчzK1̔ 㚃[4bkX-حrzѥq9 =8lIb :m3YAQ&ztH`ġͰ2hR l's@pSU}yj2<(5:T=F`Y͘hl1 #| ˥$^BFC,v{WͽkaTEiUsML E26j8ZS0kȧtUʲϽNNY+!Vk;3@kpxk";cfa6#2vf`rmC U|E/ goN,oa RU{# J,ׇl6+v`֖  6no2GѬ1@IՓۙa4z:| q֞nju7ueQU|h6IXul&zXᩑ]:!)yK Be?F4C'|J$?4z(egnC>r%sI&ɞ4WS"R9B%4vE eG3 u|=앟ְGуЃ{(_be70 >nd ͪ&:s uvM`r *P9,F L=#Z =c:+F}):( w^r2{~==kn,_&(OϟV"9o{ݢ^7"']}0%DԒKQ9e+x::ez!X(e1'1łŴ9:-Hhu1րꅘ󩏍cHꉵ= Ul ه} ,;;X1s&1@1ؤ(ط9U)xXNUpk|Xj`Fwt.g yf9VO$'}YaPk E=iFC#ΐ$M*6|1!zbH:}KH_wEx. ӥ~y-&\/uݺn]W?6AVb\Ŕeahu2 ?/.;: 4&--)**ߺ%Oտx^$z/]7}ӳo.Ň*Mt..yv}_^I*~E ԓf^8v^XB;!v/;gs/;grYok%+QʁkU>%ߊZziU߇?XtYkeuOd:$/L!KFD !#Im¶D[U*F/H;1mjj#ӊ;E˪UD>s&V4O%6 ^r =Cb}=]/:L+b]v& z: vAa5f ɘV.)fL6&Ɓ.F#Ǥ\TיV%GeEۂD2˾唡UާCl{"FCi:tsdv/"9(k-fy;;b`vNĢ;_<+or $ؐ_<|-z^~7"7kmJ_K~sq_?~-̳nw`sLu\W+|۷r}G]u*Ago^ ~Bb59rsʯo 7KtY{C`,(Q``X=i-0W,G`DmC`z?\`qK`c|8Q`.J ji|`9qF;y=IDx|񎛢F+)0T)0D,e$}߽1;?JR;yvmx}o =EF72-dI}" {6,;,mqU@o_B r+;1+*ogg"rq[cq3+I&t!WDJًQ&!bi~iK l8v{%: B9b^>c@TIRzIٖ 1f~i]ɴI|DSE^P' @2,n-︩Ĝ֖yDٝh%d:/+Ѹ`1Ӆ 靼%́:T`>xM=$Z^`v2+Es}rxLQ]eK ;nc+ "8btWZ}(J\~ikF(Br% a{rvH,+bd 7\L9N-Z:}DfS%λU$="i4ezۏX6jG/@Īzy$V?X4$cUoj1탬u~$ ?@zG=G6v&l~jgcsH9|?ս|z|a6~K{҉a{&oyݦYn3Fy>W;ޝ?||&οD$pĠkFb3@,K:ϩ]3R)5,6#KG[]ڭx57s/0Z(@1У7xqqTs:=y[VǟN||ߣ3׸>})=O䆱o6z(Jş_Ԉ Uq3WӦS nރt&Wcw*~<1 ֙-VB#UiS9uq t3飉99Lx '0sbs fystSŚϘ?2h! f A]Gbɽ9lX PG?#Zcr֑5eaHӮT>*L~k4k o ΚY=2nY?@z-1ŝƛpJv\q^u'O}-LTcU"f?~:oDV w6=Պvw~@^KeCOG|P3OϟU(寄]Ys9+ LP6ڞv=/is-1 TQRQDUl1n(_& R3BŔCQB+Vr)AZcjԂSDFXxĺW D{U+S [-zT:91N L iHĭPqI(יZNT vZ:'@06 =j9k/$ZY{Z8:0P:/X8H 0j͂o0őc9hUK. Pd^6߳[/TT8)FbN`4aO r&,;PRH)$ZXgTZ )۹wW!R\V%[~ر`R܆gQ\1or@&'x!|iaEyu@]F9֔x_ 遶 肈cHuIc5.6yA/nv8EYLkT"Sji`rw2A*7IYZoXQ$ALv#䄋 %7@ELН.NM_On|@2*?ݝ36WipcWFpT.kP/cOfp4tѽV.~;>Ǥlj$jI+1kޙ2!'CGƼ]k]'%e#/VvuUA x0E2ظj1<8㠦iB NFd'<{;@ ʷMOeԺӣ t *L@q7݅9 oR0C[vr;^x S\E6خ u@~nӄ0Z[-MPIElQ{A{y*F@zǑdRF̭`R9rّ0hzv&d" _%%Ay`樲fcQpmX|aMw˯B\<8i͛t$ Ϋjzy=֗/̓{OsA-Z2$!$Ttc6w ӉzB- ݏnԭP)nu{p.OÜI`3Nƹ,|։ߒ+[M!"yv|FP"w44« lqbw^3=JIUqPP`.?\O.E$n['|.wkywݏ *yNk2M$zbr/Ocym SW պX]?~:'W,~dC4`-&.÷4N3'mxH3D80 ?d0 Z0Ĕ 4NlLxKJO .*hrLƜA( QI71\\X f79qg2 `ttrKwPA1ztɐ*yInq^ 9X@`ڑ]$цq&:EW?A饱$x-'Z(ݶnрpeM1vFT7-Q9 WџU:ǭƥ֒ڿL >Ea񮰣}lz2 :WCW"\8X޼ufb9 j !"@(&XXޖ3{WibZ-p;Wh+xM?Aldrz&gʧE_Kj+$[Ky h 0^M2m-EZXy.1ga;kIh³s+]v-lDDIrr8GHfkPD(R g~ǭfa@RlmD<1Y3̃Tn_!'G 9'(yiwhͯr'_u!T_ϽDvkϵ@lE'Фu]YH{lܺ׎ڜIW,LyY&=v_%w}=n&M9J{^-}yn& (٭wpip)$ֻz.)/D0ؾi7%sBQ]4#tȴVh[6r>$o4{V}˘DAr7Cީd.kr>N GT{z݆Fr$犹ӕ{FӵjCvcQTlZtB5˥ v5KY]jcT!'e*mj=aj0B˩i `rO$?vݣ~!#;%zSŦ4:8(8NitA݄g(0@7=mPKCgT̠ ɳ\7CWs|3A y"{VvQ{Irb|ڲMaLbɖ1N9[7@>Ȁ7&9y3M{ЯCMxI Z @&j2ZN!oc҉Zaؙ CUi>쇧%*|v$< D ˥t`DP cO5TJuL')Z`sV(Q\%JFURϥNr e*h!A1I筕Bq1 xe^S8,9?7wW& 0 Brɻ ;ES&u9xLB0 ml %a}"kM8sXcH * n@-WrpQe mHn1TrK1ߧ/M#ſ`ܽ %- j?{eJ|? $`3A2 Cĸʝߗʪ7)UJݶK.Ϲ{֬('Dog;i1白zfrdg*<^clFp6o:C@S~U vpKcD^}[vĢP8B}= w#p^PXأg_΄`", Z%19Z(]Wdž*n4fd@zI\dՋ闤P-s}a0EЁg?QD}r徳SqRp62>Gg,GD2b4LRFH!0úo[QXڴPlP=͋8[0]IW^2}I^,oV(BgP+py;(P%݋;df9؎+JlT[*=ƃh][JGCW&+ 8&`/O/?/Eo0pEFՊY_NgKWʟe2;)t~|%_Ž<cGcrK; }ˇ#}wY_|||}oj遽JHLX9<g)r E;9-lFh#"3fʮnl h>W9O5Tn>f))SعIҌT J͘}(Б1zinz^J")sC2i!2ץeE|Hiy3lA;Kʎ&i %%{8i:˳ͥ!6 Pq CXI48ۯ ;  {blrvb*tjʧD-b9qtRr8'ӳI6ֳ:OIHfAEO7kY\BᛋbCߎc @gvB}%H6Qb+poԥIA6[ z)F/.hI 2Q& ,XwoCG0-Z)bjy,;CQ8&ڜ[ض}3ǡdT̵!#݈Q㢰(n+'O#VO19Y7^׊:EdYh) RFK.lSp@W2R vL_k&6,n]LKcxg`<օ;ty_C L74f;Qw2W ö}j /aKyݲi!)=V TЍA0U0nmوQu76ez!qp1f&d 0;9cP0P)9Pt9+2A6v#(ADaId"&_Wd|E[x̂~lx741-q1@ϳU>7/1+x[sLp6Vae/| w r;~3:Cjϓk兓l.oןӗ뇙^>^KgG4!htKSLF.[QO& ?C`4>ev`'T4Ⱪyeak῏V2Uwa1 Uw4bpUw3@%nrI*t,q!Od4_A. [^5QQӤ^buw9{]]B( v 0HPFIitC &Pikk["AQzoNV.w'TϊmgDNX䄐lˁ+dgDȔ083dXSnۿAXFLeHA"@ϵX,xpx ^vo/Gtj ܩŮT`)IX/QqM#=zJ[AE-+GU7anoCTjQx+t,yw M_fXJiD$D1+ў-[P`& {J^( E^JhTbK%FRuYn=ykZ&GF2k;Z=xD(:YɊ;Y1D'ÀPBFF^u{|$QFh*ITY&((4&XhE&pffT)ƄИg)G9KGXdJW-Յ l~ 5z^TPEz]_WNû c]m5䭋?~f߿u>*ߍy_gb4:SOW,WJaou7m[er^-{SQY$iC޹pNt 1DX |\'!m)㔊ޢ[tC}[]OS0C̔b^5 3=E*A+aoI>:Zߒmy:FWZ:~j@-3CwF+xpD ?-G;mcᑈ6+0FL1BȎ0J9Dv\11b̸pR!60M]~o}7uUf+QR}liB: 氝11Smj2tM 5ԖCزqՊm x\6ҿ퓴voL{ J k=éFL;ńN;ECMLX;EA4Sw (=WHNykW luݺƉ7 9n[,HCqg#ܾUk[}Zn Oy74nĈm 44QTZfTa媢8mꑃǰ60Ax¨ݠ!!Z:-ःb16c 4zc%5ux3x6_Y1 H{MluzX1Dt ; D (C0(A˄Ԏ=0G c3voG~/a)DGE5>gV쏕 /~̸cbBpu·AQ;4>qR|8@ UByg Lj%΅ QmGucTڈL 4)6P) \hcfDrբNѽ:솁{ SI)YߚrR<{~M'Z }>Zg7*~nQo8r2y2fBAa}{}w5{o]Rw3/~+~fUU_yz e f Z%D0HUImQ_|ֽXb^jG ғeDverKDQp O5L{HgNK6&Ȱp :5Us^JԶ%y_b>!Dm} %I2}[|:~;we/+߹ms%u+VdJ-*zV9Fʒ[Ti킢W:?gn㳽 ƫbt^lOaRpSCZ,Y%P߿zg>, X0_) rFݴ>zt㞛  Gps,ܶZ[&r31?l[IfsLC=ShVH/=fm!0udRPh;P?{۶_1(yyb&{ n{wm &jdّ"}gDY(JQ68DsΜ99r)WnQ,8j]thmbT}S51Y|:[p "C.iV1CV-NE`. g<_/eD_/_0wD,X_z3]v/7Ofa:ITd 8Mb# rv gb=x0eϧx5eO~.\yz,{\zrMxz=ٷїnl9FGo+8V\9^\.񵎣qH |w֑|k<Ycq>wUXyEYʒX;S"r!$e2"XjAYtPDb} HPck85O !Sg ڀ>'?41A\Ӱ j]BA) A8w*py>8F쩻4 "DKl:|uLw^Gba: g<y $B-czHޗm7Bs`g*ȼ s_yƾ~aՃWf< %~}&z_zܐI94 - # z1F ͇C)(~˟vSnA<RFb c),N,F[j%2ıNL4)"nKC5ؗjPO5KAd2.eChJJXnT !VA.0#0SmV!% % k8ks2%@O(- !DI$RአK^poWlIbύoyZS%RdZEOlwQz#j:yD|(-l{ w8Nvd/~Yq7),l8cMWjaD1܈}>qu9( FQDR\%&L"$åT$ 9+d1$Q#BV)Φ0Qϖ #6#rw|P;㳯 YSsW+dkl5b0oMшb5@&L$i?@ P)l9Y`p" K 9KK.-?M`h1ؽ_Ϝ}9sDg%`K%5aSnu`1j(J9V*BATwW{c@uPTgֺR};7㭖uQb}Aεw]- -! L&lK,oqFȑqΪq<Uwk\PѨL["=D1G{΍9۳ѯ6DϏ ;@[a8УFW?MˏMߏw{vW[>6Oݓ5(':>L.{)Xt,EXi6EO6{ɸT^VP\*tfAy}e]5wu?rjvoG>rjGocS5dpI(=8 #6WiNn{סenzۺx_盈ڼ]M ps0B#4<@T|xq'p 4ԗ|`K`M׀ZO,҆F 4i",‡j@PZ67c)9 "raN)a7Xq)V(e\zE ^6@%j`ɥ~[P.H. "ygS<[2+Ǭn?$ɪ<=m,OJ'`z [CƉzJfn髇0Yl`~ed C ,.ύ0>̺< "ԂK/K!KkIr԰T8 gHX$ܙ#S-kSD]# P@1x lԊuٲEDa#o(oeɱ.d+zϊ:rwYެzUb}ZHD2>¦dO|y<_;?1$RJ2.D"S^FcZ&TnLDp秺 TwYۛpȉ]nU!b:T'[d@Y`i*bejNjKhuWlWh5i]~6A.mF4UI Yˬ1"pNN(DXS@rK NjRҗʺ*sp -{@ &{^{t{:*݊„*:H1N)^=zǒbCp1U@N#D2ArSF}*(97{8#nYQ)lhWvvP1ޖg#%5ϓ&(wˊj7|p t439)h(H\C{5L>inAٳGQx[܃ !&GS2N%I@T~F)V, "@ xPO <0C;K:8@D\鄁`g v",,]:k6.]1e_ 񌓚߃vٷ#-퓑n5u j2ΔI\ NP/2+-|: 8H}F*gU4ȒflHDowo%RpRBu+_dRC(qDu쬙f"JRM#˄Sg8QG2si.6ܗVj8Ci1 KhȔG2a KpzZIָ$:Aܿ?ڡ=,Df< #NʗC%吚Tʰc'(Ag,: VQUy~T!^ rt8û/N (XL+H9YGZ8qPsF.@IT=W/UȢ1ƍƘ4:$cPAO] -a1k( $bV NVKc9 pϐwgKCjAw6;=x'GOI gWx޵y(PggݰG~Kh 6GqpolkqdsUϢyҰZ*ⵓ$i%[SzQ Wb{H%yڴM6}zMZ Wթ[CzJdKF,|这rá18\%d--q1/H0#-;~.+#D"h+68u.W_g8>i()k] /mH>B@im 뻊y ;Kjt8Z^%I5:(je W»Q2V H)c U6=ja|\?cB[94v TP#.oD qgf\`[׀ 5W8}#J ڢͣ@SpX1xRVLQU ÈqpNwm_K^fZ`˃8LJu@iqCQZvEW=<4%##q]:&\[N!pRd9V=i2w U-囝JLf89>Yز!!1WG{vzG1M5Iy4 5H0yQ`k~0rQl.*-_+X߲o x IOfn5sJ 0/QVHE4-|01a8 5I?";)";)S䞎ݻcj^I`)F{ S= /b9W(״wz0ݾ_hb52KÌ!Nh ;g xը3݄ҤO( E(QTkKE?/ճNJ NJ H: ˃xJU,h!\(@&Ig qQg. U]NLNTK grX**?"1EtRDI'EtRDIQRc*#g4g *DG So04]"!hV5㦓o-JKdE:9`Q ݻ+ŖUִ~7J{͜JEE3)G1`,8+b`S B|w06' bax`)&f}T1eV#Gb%4Z`GT0jDABG>m QN %` N>26haCUa>JO>M8C H >b\0+k#b3m,+:g*8C[S8 ]5.E4x>'GLS%⊚2/bpldwx&Jygp̂K1ǭH`*=v Gw·+Co[ŭZ խOW#<%V\[&)D oz8w;2+Ci?m̏'/:~x3$"A@?&ׯgCe>{{x# ^" GSz3?B?pTU><\G[ pooq 5gK7nP'\$z\P"K>m#zd>bG Lebv]񙣸nL!-hSz;Dd!,ls5dH 11&eIෂHՖLDE1Z"UQ9M,";S^ßLr_H~1o@; %wy`ϩGa7T4 a-Q{?kH+f@NzbxAYz{2ךxљ˕1W >vTXe)\vYY2ɧF\I#VOCBdEU f=-qvޕXrI1B0?FLm7̓rpG"tiu5ؗG2[3F?uq$8Rǖlm}bDG ;.Z/V.+:|gy lhɋ}ކ V-^nC"~؞_Jvm1=b?÷Q{*r\:3!CIiG/a,d\c%(jn ZTEKB{jWuFj1-=0@ӆxa9'XFeԎs/}dF*D"GAZH*%?mDlINh<1o¹<24*,%8%Ǡ!|BUXz ¥fG|p}tʰ1$@ @KZi[ÿpT=@p\;(IFbIz8&= -~RRj-)9qE9rfTJ'0WKYʣU\WcdqDd)hEP'b /@F*GiR[@dw )JibUC=gɆw᧳T[, x?+|pRM٭q7k8@ŧ^ &qCob,.ƌߢa6KL'#xvk?B"nq/=5BDx!gsX9ZG~~Aj1~.+Lxj=J !\!ۙzZZ,?[͗$jQu \^ 3v \d >q#B\Y&r)X(\-Za,%5 Ttˋ JdWU"|'5jntSEp%[8V쩊AkktUV.\6a+.-b*9ZRChDzmxϠnY>V5SE< s^T_gM gX _b_U{%?f #{y&cE1m=fP)ᭁ1QV*>y; (ݧLf< h1"^]ϳe:6q?tϬAl2w]9* 9Hwodc{1oGƼȵ{glrѻdjM?&#cd72mޙ5Lt 3u|Drwə|>v]xkF76CW4 b8ShNv+pE+^\`0{0rVT {+g@ cT^2}{'|.4%F2d$c$Z(#LPZzQLXmO$ o,dR6TgtCAk5N^1dsIS# 坉EDOd0^I=Cf;qv\b:EJb:e 9œG=W7IRhw*FĚtbU!PO/SF81P c>lj[h!&xCp%Īn,rh;`ݶLhE9|xdZIM?O'- 9,1 ǽ${.lCspvV.v nMAkcG[7G'gӽ|F_j\nť`4m^DWM-nnY?Zj/EDO֨~T s3䜶E½1"dJU6bec mIf[PMᶐǍpŇXji,\7> 7_Sp5<_r3p=; ѯQ:G'"Q;#'H얅#,\y,\a,s};;O6x:ƳEϹNr-E̱ɉڞjɱ/oB%CvB!ӵ/;oDDI\ rx7$U aԤ`r-|mvg%H@YID rS&ۦxp$S}SKt6W1/I^UHbr(0F 9oy1 5lX2y/7 ն|r Dx Lm ]P!J*a,oٷz I&}]NDKR޸5̛$oN:eo-5 /8ybr"EVTk'E=3Aܻ״wlR"5D[t]Q.-c))~54a݄T]xCHEK @>JV')NRZ*G1B&Hjc  n$8g(hI *+GӉGɷp ý]NؤW8ViOH?l5DzXFJ^r\3Ql_;Q 8!T6N)vܓd ^$*&H[(Bw  Y3A_xӧ3?Յ+xÛٗpAB2iҘOgzȑ_)6L2xE~nϢӃޙy򢐼j5==H]%aö* ~`BWWM?0u׿xJ^nr?V=j߹'|Y*L~糀d:}rL +৳<>#Y7~-El:H$`DJ<H%j&yeV *sxVH ͓/u_È\~xϊ"ۼ%uXAw b3{@4㜛*"h#;h8-eZt8 vWe`:mƣk j.h h6НPl;r\|y, |`(zxr#~p^E3G<*fxfObD%FSFgOaW1F%ċ[r3v% LNs3cPpߥl;;e<~0pOs)dWe;PC8`<ĕ{ץsV㷱3l;QAms:u^֍w xSsUHdOzRM>r}(,\IrmFAݼg=WOF(Kbvԕ*ҼWd1MQ-_ҺGOq!Y^*{/oȷôjJqG+ΘpwG?M^O  Udd[QRgRqyZ<%,޼Z><˛}<}9!xUE{p 3ߣWk+@9+GU%|8OR@1V҉KIz>hPV_;ȅVqc<<ϛcI4CÒ}eN;4\ӦHkbʿ ux}`VcjG5aGCa_fJ=$)"!_\@@M4}rN%x<;Ͱ_MK~u ]`S TQ?iN Z8.u<mNB킥9Hc=Gz>v g: X&G"6lմ$1/9UnH ޲fEh$gDDhaMV+vÙvg0z)g@]Le,3I+TQk^E8iQ0q4\;^8' V >ouFsB Kͷ1J+T!PPhVrC'P4LKD c)Ҽq.0b8+o*g%^T~J|O_" H.E>uF((^z Oq,ȚZL>C&D,_X]R-Mb,(3L5&b6w7O~|>p7-dWki9Bs!9T:_C,"NHnIjܘzCF~7,ͦ9ypzɧ[i5=MOKOS7郇*ԄH.N'wWID] ҧ3 oWz:vWg9zFdUjYQs8D{M|ߛy>V袾 y9hId,Esy;<^A̷3(3{ǺoIb,Хշχ@MDweGa2V?Ob GnEj|]!0ʖ*V@hUE%-p$H6"ГUo?\HuEz):hrrEEY*Hdؼ.R˛ c`OK ׁoޗ)bcahSz9Pz0hJ$D`s΢Bƺ!8/ |=̈́i7jctnWV3PF b(-ݫS4*`jPU2q5X };Kʖ~P[&.>B #Pͫ jȋ %pr ܔk:Jm'^+j#E*S\עnj[3n2N>K~zz '[%$~|fmm3%=d ewvbͦN|5=>d/?ӓ]eF Z3q|MFl?/$>z&Ǹ_kݔWY?үY+%3'skqQݒav3N`*c3z= S9LJ_ Zkt5*X&"h h0qG7` a.|JViɸ(I֕e #d1h#Q CFY;" j pҢRIa'Ss2t ZRV^AyZ KF A p\:x4pK㍣s s<GՌ|(5ߠh͍FXO=:kJNҴ4W}=򏜴PO7̱ϗҐ}g ޟOЖ¦tmc&y&NQJ4uڸ*o=Iv؟M(g\=ƿFŽiͬ'=f} U6zWnloٻOޡwAtmw]6u859wRևrdSf4NлuŠtƻrƌuhwB^nmSekN˖_9-yúS[y{&縱k'O6XKBL h\ItX/"QݢB$ݧx7Sl3;1g g??{WƑ LL3/=܁dŁPX\ߪ&E5fW6#YuNmyw=,?^^r7xO?^)s~ά LçBM ]o,:3~Dd%罀<A2`()cWMpu THDu ӏcӗO%+椇Sӗb^Q"FL)[V[.y2$L4ɛ<yCA)kONSǧS*ǽD q(=oEԄa9nr;}}OȝA,)8-8ut3L#S75,o++9 6y(<ɹraŠ .шY%NCl-uP =0$i # cfS'RK&rANzSGDXi1?bw;Rs0M1HI,״:O7 EN V}%1aUNq0mgvPgℳ[S RTiufVB65?$nmh Wѣuw #qҬ"qRZ#^$/sG-njB",Zj,$Di4"J! 3yHM Ԗ!-5KEwNPKn[j.Ng*œҐBKIn[j$ /ZzZV.>73)&%qhú۷UI6sZݪq٧U찄% ہy<<^ߩ "GSP…PFtPaT Ƹ sw|k7?!l|>4ˋ@]*0"mRqT*thF 9C=nN~1v#<lj ڤf廸3eݍ(YGGϯyzJfS8ξfˬ"()$쑒_a`ednB$\: ;SOZ*fV;/Q̩Ŀ}-2)31kh*<4~nQmMNɠKi ̎C+ǫ2bǻceԴWUnkV㨶߬r/nvމmy~s[?D bRoQ̽; @Oc 3; CFST@ -)K43 GW Sm *o?@<'>,5 f:]MCv쑷{4D+)F  2E#kRih]Y֌@( c$uc3Q./"Ì)Ixp@M JnV7z1|)>3 &e# O "0ۣq0 dW.aߨJCObyS#K(10DK@9 `h8H!+Uﮚ54]x("WxO2 HY8&oh0 zn=DM^PqIΗI˜΍("םCE%d+h%e}3?ʹYCLw޶uEٴ 'aF7YXpF$YfjT̆}~ ̔8>2&z%3"z=R;'QRh a9S'=B sQ?yhyA) %1UG$#ƀc HW'^R}U ]D/YO/|Ũ )Z{OKb*ggskq/^ ~ըSzhe'Z`P_&jeM~v9JIֱOԕ`4t)x۫Qw, lTU^q;Hc!.xSI:^Gux]TǢ8s(bGbHj*T)AT9ӚAeÏOoQ#OӌcLىerL$sz\*1_[|\XuT(ㅍ޵@Co@c^3'H 2À 7r]F&VR#P߉V_glˎAtY<1N ?x;DbU_ofj~9{`qUٽ#zA]5 lMOtu7QZ.pf dra-gz7C}w@BԄNY'h~%i31eOL$ڄLh9ΝRP$!. lp0W۠8(P(XpDg^/q^F8 v2 B8-JSZkabКf!`30]mlGkL ;3[gez[} ;{J}}Į>X1Ly各]n)iʃ2X;I4F Of`![5cH\raCN-/\H`ٖTPEkfOU'ӅMmLDT]QNLSAa]Z${,*vq;"D7^R AY8,l}ƕr11n{[)T[[B٠݉xs>+B,)ᲯB2耈$fΊA3G91xKʷ3qZ I29%}=Q QIEΆVeMc}֙e:NЂ$^EWy/vZDN(}I}2:,G/^k:pBd^ը N.V]LL,K=`B Gp" d\uIǐ0aŢ!}q{L/^lDWDJN%tO.X@&bp]w#Gc@'rHOӻ''V*~e¬ O< `޳1"Πz\/ uw7IZAKRq3\V$|Z1Di;÷V0A!z(IX[?`9uf?6cVD?.lgDRB:1msߎGNe'xty W-[} O6b5nJ{~w֞>e+F jS{gq]25ǯ[7嫽EuY4?G].v~ `8eή!u$4<)մz7j Qgϙ.뻎V]<_*dncYvG`kܧT{.-~ԖK$Gç.ﴪիgHkuuoZRҪ4*PZy9jyZ{zp[9!zБiy޵zJZ4%\Uw՞?ш7S|`͇PQzP+c0pZȭ|M%P4=^;h:]gv-d" VnT_߄n{1m>K(;{ս.H1+%PiUHP8$gD RF i͎6åfa1!h)˿pbRc$Z2FrELe)q ӑ*smՍ2-9uR+D8r/7$nYW?.ꉋ#ljj>{37۸e6d^3}6}͗I76ވIė?}sz煻y>-l0_Yr?u޻ ήGbKE|?),giJnT,*\擴d!DlJ (b11gx!YÈ޼[z6,䕛hͦMs,1G0$SRfG+JAs7ڙd:{.Vͫ '+%07A/ߞgono7Gq7g^z2R8W)ܣv ܓ"Gjsw"ĔP+i=vd흪\g AE?6nK!η4M id4'ij2+_ƸEiPQY_5U\r)He-GrO)9IOf ! xKIMV0 0 $ fԪ5]T樘=@ܯ[\Ӟzve$:*۰^WJ=k(>'I=1{E:/ȋ%[ĶF>W|#3ZN:iֆ cĖW;x 3f)cuq#1-Q<i K-F8:4]izcf~w3 }~0o0Fhχٮ.,* #"ֳNc|EQ󣇈"8W]wE޵]*4*)F jOs~DiKJ>0~6`Bteڻ8zz6,䕛hͦMsUês0FĤBR2'RRIy!)vZf9w@Bf  KmHe:- eҙdnCd{O?<صXrM0?K/ 8BP% W:$C˽ odН:ͽLjݭ<*&ܺwVG{i,̗ Yl,ٛJQkh; 1ggZr*{4crk6c D 's 5HQS>h穻՝{#t |C 5r 46KL\ 3ۇ;D˸f q" 0]|SEC- n+2>_i=k"%4zt$HWqlK΁qnjy&!z$($0%Cqa Ug !~|N /z]Jt_\LJGRJ;7Y6I0͟$ŜW.r1U5\DHi2i(DRA2jbR#B\&$iN&߹X~^LɈ\%}.&+ V_V%*^VrG?`Nvh}VUfLBgid:pfMi,TJ-87ϥQM0?C~>fhMar}\9* T#G[ n.ڡ jʒg `-)-pPʋLRKTR*<7f!Pк_ڒz`4Q=R7F$"j2$Ź.2+l dֹ$vlqVFQŦ'@sb^ZD߅ F>"oB>B;~1c`=c1:AYTH qo-xC]kPO `-V*x*T;g?|ri]VXC5I= p5]5ʤ]h){ C(8N!aCBrp)$>z Gx# E쫶"d?*3dy}^ۅ~(r!1"xlShvjorˆv\H@N(_BnHOR'I),ӳmɵ>\C췫L e>N9}fWWOǥ^To~-)d%|Yyyw.q~;@Iu3`L׻+|x{{s% E;zz/;F|; (cv<ߞܽ+ B\pْwM8n)f +?'ZVchZP-A) βNi 8vJw,zJdLtK:vQ/hvěڰSN;=9_2!Ys:[l1?^RF^l%Kc gP$)Gz' aHCDHz}Y g !zM#:\PDrdADhiي*|h5r3FWMbzyIJz7us)T/ďq"d'#_tޜ6v#/׀=zP/>V$m_Q^r3F?1+q{;7/%-3j봭?6 )܍8ŤSoU!iÜ̝sUi)ԆOKUgدwJh|i[޽'}\0]uԙy+y)Y/0,I_/JU;Z %an_Z8Ed5%'RvJ2.AbˎN(MTt{1!UR~b$!.{J!w55Tʺ.Jm޲iÊKűP\oCۛX|Qk0OӘrcʍ+7fcvuM`s3f h5*rQPVМR ,\Oj۾yp"I/"A~i`|.f=I2Gs1ni4<|!yП3Yn2k:k $u:KB^|0`;d&>o fy:]'v!#V1R>Y'1uL d5^J&R0bXR.RQ\^5ژܝ^朢:ʒ5SNptk:=>OO*}H1J_{,5|J<Ņ;s.eykII)Ř(ꕀYI'SJ34&μD9äSģ̌i;8vԊ RH CV"aCZXͲuNwTV*㥊`Ua,Bo Qb< c:q5滴 ~$DʦBq;̖ MˉLt0LXAV\O;W{^h(.%J⠢d(ok߫,9"N|zm>_67odexR|G0jSNcff#_BhfIe TzUH Bx/՛.lHafI ! Ǭal%((?>bĺ4>@6pЯpKy]i"5 Y[JUmph:UU hqzl<N.|]5B%D?  } 9B47w+cG2*EfDkWhuxW){1Ԧ^-bFFkF 5FA b\L@) f&eBb"-Kq*',IS+o亜T s4 P]0Xb0, _F*"C>'}6N=Sgp᧩|<{ߢ&X~!f$՚rX6X.R:FXh!ZeAI&s"))ÕĜ#L`g'<c C( 5r+ՉW\!(ъR*BRr*bRUL'E`*L)720R. 32c5b^]BwZ NMFDia50R8`a]SQNAd XˆXՠʫՆt>)Vk}mX̓At "pERKb4%PaQ1aR ƒn 5 c ܙv-ps!CI MtaT.%tMclTF`8V]n P,C&(su;^džHSD!2KRl{TRY_O΋w zۛ1+5Jm?(icu"wQԂx`4umy_ W4+J*%YnvcPݺib&i´DhovΟ5L?y3y{vwЧ Py?ɢ{ڽ.>.{ar1L>Wgvrm?ҟxOQ$P]Z KUX:h)=Ь0g[<^ם+k1=aӸ6=eQXVwUGГ 4W۾.ͷB౿_F0k_5KGVwTp`.gU'^*qdN_u;ӄ"SXfxy4R 22Q2AtvXu!-ҭOO .?/A㦇K VG"T]-U[D-20^ÄF|(8<FNLʹvUXK?>RKE'.lP?VxrLs{az3&\8O&L|0IGp39ӠtFY$tdD ׂC$; "2(ڛ[]`!]H5҅x#$W3:\\Fy`0x,bJlV6Wlm^jmod +EQ(UK1JnUB!h۸h֭r;LRV"ɯq>q2W3:${9(Z]7NsPy|b[b5M?yhM%m֪u&M#Y<ݹwFǪooc($l<׹7hjDF19DBQC#$8W"!ADAm{f:,C/at5U!Է=k`\kR)Sp |Ԛr|/.fi݈ˊTJ6Q #lL 9Z+PL-{* WjsP}yÐ'ϒN~t7P4BGf~k svӕt*@%`Q D K)ݝ*v_5xkZu TKPM[N(sUl;WK9i 驛g&}xe1U5L6] 2={Cw=Y"Z"n,`Xƾ)b=FZS"ڎԲpchHܦh z,SC(O!dwhϮTe+*oXU%akK9`y-U5j)%-:ڸ$铆= kv2tB-ۿxFt-FC U/5(7='֣G[2t<(TΫ1Qgε0ǶJ~WU4Q6>8-+u-C61QLZ<o\mP4V#4C&ķ%BB#A8HH1,`Jϸg)kK e"TjHM$9zT>^`.Ts"KMK Ac χ `=єXvθ~EZ2ےΨܚE~6XӃ]8'&ᓵ3` M!g_E`o0|ta`Oۛ:Fzâ;/"Gvщ7L{(ڛ 1N |#LQtb[-#98Y{?&ߊţ*gRB3:Rȱݑ5coMe1:{ \R>'os?~ËhvtgC$g(ZRjъ~B\~czI-Z%]jHPt\Ť]v5`̎ ,Xf:֌k!ڧoّS 0otwr7eTIW*>rffM0bfBsuRa񠤍Y\\w`(z{W;ߙ[ = U/wDHczD;%AD__ j(R{".=e+NpF|=0Kk ?J}q b%UUqxgw'Tdn׶PI%L_Q6B)_W}=[p{`mnqN _"d0ȯ&iݸ%s k.AN}5aXߓ5Pa8_O|sxJ*etPsCJF)/@X_ ra$_]/.Pa^m!;¸X? 41+f^Ye`Q.|Lw,$+Q2g!MQ2:k=CQXʙd@0ʨLf' ȱ>C-2dz! %y!|J]50?6ٵ߬ϸGe_Rw'+@᫟F0g "ٟ`OoN`LgK0aoo l+ O\po2 xpRؕJ09Yb<™L=b4rIh(f{ieh[1mu$v%!0Lt%a\,h_ehJ̬ א .9,V؀Ǽ x# /ck:NuyHFJxVJ PC~r Uu>z^eH% [EbjP%+ԳTbo`5}c};"#R[^"T>.D$-/P K4)5Ggl !,;)cJ M{aŽa5 )>5XG#)."vI@3^­^bl(R0.4r)NjeJ -(.-7'%(MJ7y/0S,*Le(^g/L`e0r&$W|͌Vd(.Ӟ[K3;;=[tCޛsj3ڴ$hQ2 ' ˅pឯOf^~^Wx:,t{fWꫳo>}]hKyd GPz|A6KN @d$cdl¶=x49;l" /C{r^>! ȱ R6ce4isӉ3mn=:eCA)x_: wx49;lHN!j n=- RgOtq9'5S% pڐF.'=ǿk'XѼ证pp8=q:S c{TPE@!y󘘰CLz2<*84ä 91cԦssM1&ZR+8 8<5 A.p륱fU"۞P0kez-sjuaduϽ?X#Q9 `FʔH0acjM*R 3+Qt=4^®{5)dUtWyuW6R۬Iwdß>I*Ix۴9^'.fbtT󓵢 l"jؑ^m%/'3j˧ۯoAȈE2(ino >'\N(Y9a/}Ld=ڍo΍(KsdՏսY\hL#xʥYrFC1e *N)V0h@Q \6Xʼ.oR`kLHqB|_f3~J(N;TL$?X@5CNLԹ80 DN ף{JrL&PE >Z><Sˋ9~@#3) S\7#~Y5ׁ>8QR:%*e6>x%ՙ<'& ;=,4* eɔvl%2 <nTV`2|X};@`Gi^l/8߾ИdGR7h-Zx_?醏R k;p?Vo0K]!e*YMCDIEGEuЭ@TAM+,E*UW~i?^}[u>y7P#dW4xŲ,~S7"dRplN$ҁfȭp;8( ےRD}.˥UjMUpOZl@M#ʼ$;f't(Ѡ#tW: 3=)!Tez]i*8K{2\պX͕5t+U3U '$MEx-`z %%Ҳ㴖[ᇎ f͞L fTUX,kʄVK34k4mbPW52@^Q2P6f8Al eҪ80jV7L5Uu5RZUvk޴˖iI]j -Vj1s,GjF8~6^9:][E mP$1jܫϷ* ӥ_5%+wTK"ekv2zF4SMEDOaR@GCѸ0R6s[<~߅ 0.2Ȓi< 5JQ#FtSmTEmu[|/ͧM:z1 8Vo4-#qQQNNzv{ϞWî4nJJU)¸hEc(KZ |e~#;k{ {B[ Êލן %JO> 5WyNjOA[i-uI`3o\1ߺ*Sx~ľ$GG+pK,a`v\Mc.kn676C&x>u0&;J'8;ˠA%F\|۽Lp.a ǹ= p bv e3$JJ\@^G.MաYi.'# sZ#q*7,.d2+bjZt( 8a6iQT3.*Ljʮ Rv Fp (rF,b4eD¸e$݋ܵ78u !56;3͚V V-]*ىW67O7,)opTR44%URۮ KBeSjIK5kHO3BT"LݑM#D:!~`<@$BSȦKw5%M1N(cKEYi2 g Ч2 >M&8-9P![Ri{G̯iM62SmݕPD]fGr zڸg*IVMfeJ}mLC&p  2]e 4tIۙk %l9gB.e\4+T,' a/ QxMnpEYSttOetfDa=\gFO'HϨs]+NO+TL rBH ?8 o6N_'8c/^Ϩ `+=.rk71wh'w"mǎCQڢŸ=~㨽uh}n"~ZV!ne߶q)Cy^.|_J&[*w`e8M1)]oDiGT^ 2{'[zme,7o4|h w6>?, i|XogYIB"4-H֬ZEUZQ-t78LcZF0kA&'&NI 91\d0銚|F{2"$Ud sSx'_Dg7= &8^Q+K>;2;<F(տG:l0ͳ#4gF`C_rInDe@ kfț+p:n^ v-TIZJa~FlA9 ji%TteZʰllXۚEkR BJ :d jiU:5r:Pqܕ@s*awuj؜S3D l:S2#Ԧ*z$PqKU'8vI ڰ,^پ] xhUYBKͨb-.C=WZ@tPmCWzjh-mv'e௷;xBNa\g>AJGȤqoP ɥ qvr 7 Ӧ yrգax#Gl)mrDS,uc LS'l]ϾZ9yH/V.Z2b_J@' 0q8|:9͈8Ũ& UVr{%rǣɉaCAi6D,(<<=myx1 2m|_<~K  4q^+=&[}`O桇ٚK$㎢6 ˖ukê.iQEVUU 趠"mTGDo \{7.s7cBLt#|!V!߉SdHб/J_{r"E 9q\$ eO1;%vD1s":ቕcqx)kK.ꄹ@;_U$RQ˖b⌫q\ܥxÙ$3eH{.+dQCF Њ2!҆&m`;Ѽ%FkGkhgR"&Fxeڃhg+#@)NNm-bЇH\r$/8sNb/;Z>ChSN^;V6e=8yN\1n3߻3HԎk[Vqu{Ƃ> ЭAmם\ҝ3{Q5ĉC+Iq! ;C:QGj; ".Q?~~.N |T\S{p2DP*h 9%~/J䣩V Kf][С&ƺE> UYĘe%sC(U 3B|w3"{S\!Y 1M;swmHW,z{e~)*b0=[ꙗnL&ֶ,uqm`})YNJ)M-7 㱕JOA2xB ~ 3͍>XqiE`HeG̽/1Iód q"&ץ ߡAnQ/|2n(q@UOF+}_ׇׇoinıfV;Ǥs Ҏe'jΥ:VLQNrJSc߻lvL x|b =5[m7f0N?،%EȮf+8Q%DٹT[v/z'w%HGqmH+ݪ(o/<~Z@Kw_:\1A\T K|+.h.@aV}fo/1BxTY#|}?UlXO܀.)31O݁ܭNy/ټ&$e3*=KQB*|cK78y)!HqÈY"DrTVb< <)5<Ƚz)Qe祰Bs~P; jgc=^,eֳyOc=KG{3誒 =/>9փ!@ZrBBT≹ޛG<1aߣFm\ }Ĕsm"B!6yE5k#ҚD o-`Ͼ:3pm?i{gWy Hnp {_ݶM >M%=`7%<-k7y]Vי;]=d7_@ o&jny]بRf xT=dž_RҪ5kHjZCu|*t /KܠRF fX>AP.<ں"$T0D§ {z=5޶dÊЊ_&#3]YJ }{Dbrd۹TŽbv՞S ;zaWH[au~vwn#uݳGBVSq8AISqSX_Nũ4AX=/%Eg[׆"j\ދ޷sd( GZ( N`v]FGVaB ut8\)Ɗ¯%9UA>;t9"z^J-@KK񢁿 J]]gpvoW7-~{~wI p})>5_v㽿|XmAP0Cl+}wnxͪon$JYk'ܡӿ׏y |[lpU.Rd!'nQ6%egMŒסw tbۨ:@[r&cS#N??|h-,|&,E˫h08f/[;x]:[uWvuGnM:x/0|>޺ (iѱ"VQдCU˯Y P}/rGO{ܛt%rxKq*Vs{[Mc.bElP 9 Iژ|%a^-1zJŬӍml1ڑ~Lv4QE:mr#]z,&&g)xh`6b1`9R.գIATi&r5P\3H1!J'TTd>^sTǤ!Uwpń驤?8}ת2.ڬ|] ^xT d'iUKkhzQ|)S.=5 T޷̈́(Ru]vMP:͙۞@euYgOp"V.*YBN+YZ%G})|]#.oa~5`AI5O/)M.&0)d@RR}bx*'" F+i8^fJUQm V3 ګcGe?"aU˽TU'+? ^IOWIX\alFH iKų'+'AplIDQmP6U<b2j^s;8op\ĽY\ |2NM{ 8_D3,V ^xpN 7TBUN_ Spk=a` ^CeO{! C@%G@ݻGl1}>BK_M@-߮# |;ՠak[)QE=P2=E_2(eLW0ֵ_}T(Ǭ;UMvSYv*hm>ir,魩r,T( _*<KJKmi9>L"dStRb&45fFӌ~fHQL,ic̔NN `G>"(FjLz{"Q+j=63_T|K:2 , LH j-KSX"ly 0_!l>Nq{,h{g!&T\\aAEjn7 -d)8dQrMFA aS[YhUC &7Ӂ5"֯Gph/wl@[1p"3[ۓ"\NJKZMU/?+dUGvܕk]ޘ+n{m{̩5΍{8+ɬnt# ĖXmq֐[[L;ڰl"|m4qR#LDr&r D9e(i6̉B5zAU҆i+G {M/J$}o H5ͣ 9@_"?& 6GF7̃4ǯ!:]gAjygh)CDW-Z< X(Poj'3:kHxKǓ8m_VЋDu=xR@!Ng"ap YL5"$&sJ9](d+)#)O(˩DeP |LӮ/OWfD>c2w5A\Ox|+.h.Pg.AC!<*/_x6_5i9Cߙ5#jw`}^Npl^k,zG. 9RJ,1>祊΅XBz8r?_ yC' 2<ʜ. QԴF.:|u=66}WOkL{Pou](Iy @TY;PhOXy0;V޼ XTWݫJɫ5NFCpR?Gd[MCEI8O21BݸVJ8a͸H :`U$()(T$TIB 2!ekFGI]9v9"]j #֒ S煌2i0BYưA )"*e4Bji!DiдH! "&Aabt)Ԕ0'X Jdվ87yJXNo,nGU&;.9כʭIT1"\DŽ)B5%ɴ)4g: (ݑ=J4(ͱYT#y`4) ԻJBRQrZt IzN.CB~LjuIKv-\5dƓ t1_Gf2띮{yӛ)(i` BH%$VIp3[3-'ƺKob: G[{I]I,Ïͷ@+Db*b</t@q:\( ,;-ɫ[n NI{^U+҂7 C)E(\&R%m:*1n?V^ozF 唴q-*w>%`Mۏ[9\0a ^Kd~#Si!k~+}ue3^#S*#|kEѦH+h5Mh[*fH쿊8uk $n>m61XV#?KґfR kmHKp0r_x_Y7YRDʹ,߷zHJˈ= I[ptWz 5=SʚNrW'ڨ!ϡRNc8sIʏcLRT3 u;,p:QS]]H+<R7F_TM:o?g\^R+5'&FfAQۦxU/L[ o Z}B_֫6j=?\nN[!K6+-2y^ΰb%--܉@#hZl~eyG$cxP1"0K,4<\U8jJ!|%ӃgljF{Gr@0Zko'hO9 l(LɽM[.y:)x^/@T_f>4B2rїL(=DV:dRR^^=eB@("y!_ڕpZҖ2oV:[3hj URf _3V%+" db2z%Ӭ_fTI+5WgЃQe_W0'VH9$?mʘ _J?'/"pɲ~[[+yM&Ԭڸ%ɯfSucal"Zؼ^7$I%M* o% 0)spk,lLXji|Ԫ4Dk)@DB4O˯u}Cվb"izi8ᾛWϜ?M괩mXQٱϲWsCk H^IZ6;9/9̡]sul8>z{5#`iJ慏^%EB1cuX!;ZDLs:"BS9T:ɏFLTRX [yf(WR)!Bc1M@io8ta\J8`pWѬXr]zPԐf"vgiI;Ō|+-b+10(485)`(-KAEIe$ݠM:XJ=>h†L3Vq)1[ThARXS:a9@/,+Dnx3-eDEp8h m%q:xq,x NSMO A%[lPv!bCͲMt~?D[$ՊZY^=Ywj}V{'* ?MYuӭ2|~AY>Ng"&5._տs[YΣgQWj!RI&H=O51M;n7P28rH蝛fWc5@$HK"*Vp-H(buLQW0@g9v7vuz(=jЏ-e%9ǖ0)LM$U+ p.:p$<=[H/QƼ3v\2Gi|@cO莻g)hNa4'r$z<|C3GKTpҜuq#K=4S!i:UW.Uu)Q趃cBrppAz57WwڼFy/~pdktqN|k#;QSSw`}3b`ڧ*ə.bgno+7>axy~Pdzea{yt' #_ 4P}si; k'70Σ;xpX4BebѤĒIY,*%S*G#AtKVa8FYjc4cRLX-1 P6mD+#͊ix`;8kgg47H< Hk]hF }]߮  "͒> Pkjx:ŹN& ƺTBHdRLP giYa&qv*w睗=̞nQKͼC5W ɢ>Q p qtXZ{75X%v\Yui럇cűmNmwmTW&m4lU5b%f՝%F)]ܐK2հu~Ir{r~#С4:1mA@ P&ץyT)' Aq VڡiN#s`zIxIMKmzJ$ξ|ӣfw*5iV)w yS`&ml.*ޝt`W쐂Ŏ*02@{ԦPLct'i)yCպGdynuh ac0tf d3i^xSDhSl7\~)Bj{g"~l27E`M Fwy nsbF(/~dqZzY,,@OiB!B>@?$]`||ͦчCE=Yq|9MQ;*¡?^b^VAp3ߨNKc:!5}XAlfⷹMpO alaXԘae'Aa{ \?);dEuv^m=\H:mJ)C.&5su#,SBG B.Ţv#I;;HnS >K",dZҊ2h03v2z/)yIJՈ,_f~XYgg>o(q}ZYP9M۞o?!g >cNB_X+܆~U+zĘl_xG AgCoIgnz) h:OމDPH=NgߋE.3g>Q ˶ȩ 7FdbR?M&t1{Ux`}4NL` ĘZ;,x]FŴ#l[$HZ[//VBԸH(KV*¤E;#%eDuhNd Rṕ;#LY-2)%”D?)yJ)E88tQ ^[!*EzRCY 8BI'!Yfb;ƶ3xGuѸpw$@]?]6^ öIy%5HJ8kǦT75\V1i7Jq!$s36J,:kG$,Dv(1ẘD7A nMIÀ 9^swg$WvW2@pT!-,n 92[ B0 >s[A@>Dq=%B̀b%`V+u]q!T^Hr: NPj0@Os Ev=C뱶$"@ahi#k5/]2f]Z=5B!'tm 3g#ikNЖyl 3:~1ОS}Gn6[hV\im׌Ǟi#OK[+Jۺےad&rѝ|lP(QTrl`fPLXw8`(Y.jB uyt]'>v," }JHƸ.~%askߔEl8--N?l!sƔȀsןa?A fkFe/i#(V{>" H+!Wr!]x<<۩K8Nl%`+Z+ 8D !MN@xd4+oSyT%7ԫ@&{vQjoMݣ6d~NY*پyeoFdS@K~*P3n kY7m8 a L]qJ⌝*AN^1s\-dKvE@*JOk'WCAmgmRȓh~)m+aWR:?r$aXBpfF#u=ǿ4TɎ Fɾ{_rL*:'ѧ [L/ѽ~- ȿŏ|34A|%!-fVǫ LJRSLJj/+ h8GٽBHL!*,U!s B "OwS^?~pД02-XƞPsN C7`TGEmPJ0 C䱌0Y;mdl-S+pÆ~.0-6jR) \aǭJLI׃#f@]7ZYm-'"'&i[?oH4JZ*k #t(hN*ֆڭ7'`Y_} RROhh)`9D%jl?VC{GH>'NXQ pqi%K$@J Q2kQk1tsyfW`)@)B(E^8L&+gnq|V]rY6q˝"Ƞ( ݉m<rkV,tX)OV#Fkg~_' Ar8UW/<%6YvWYWwBӰss C/fhn1Ĕ|Owm=nr1,q6'A}I j<iI(ibݒ3si5ɯŏb5oB$!'  L‰ o0JcGW@^ݾ˶mo&S%Nilɧ40߆,ew p' miWi{}}7eE0oi!ׂˎely¡Ups+PۻZwPoX 祇4nʌO̹q!=/*x i s;.8tRoe\´duzS P[X ;*lguZ?@ {IjBqBDdZZ+o",$<0Q ]#jvY}s);EnMMyz"0$ 48+x"B!['^E=H=OuP]t9[ʪq{A׃|3*̊?\Oj ~9X3+7LPƢlE~y鏐x''2mJl-1]g 9mTc9~tw̴gJyz' Q(pGUJ =yf4kѺbP:mn\guv[֭z֭ y*ZS<[^{^s*86݄fn.wxDևY$rh 4ƭ?phe%5hR!&KIIx'6h+F3N{2Hff7H mGIyJ׸!_ V ).E"qʐKO/I[3 ̊O3 8C=| -'p,O\K?-ipH=z/H޾>hN캀S+0T@!$uf'Bq+:6ik5nT2:JmbL\8H%b#jkCĨHkp ]{hS,2!Ԗlm5'?߮ڰЪŠ6|G(SyzfRxׅ'/ z}&J^ &nĔAݠ%>qއL6 mu lq3mfƯp^ωf$ 8 PbhvCI`[yg㇫Ɍrd!eY0^& ϩ%{hӰ}bwDa9(|1|ż7/@b^6/-eP4lt"!/s\QXVplM2eWVWQHZSF,*j\:ZO4 jnݤ2=؉|f35M]-syuղ5euYY.zs"Vb㥜jXY-=Y!/5ћ-Qasf[4q[+BrW^A{ J )q43q\:myr*J!X E7l,^sN&Z R@1s/8D(&8-%,b$Y(@i pTA 3Ȗ<4A[,QLѝUgAn^)}?9Sʏ~#p h3p2دDѸ sEC^X;U6Wu͗3n!b?RlB0aɒ F#9Jw7c AvEn*nݟmR8-?WKtAX"KWWû?ջLգ7|b];.>v.T߿@&f΁TC qRsMmcr5YFؑ,gΚp<}TrcGv^G̢5?qу;˻8֙޽^\tR %~")9KY6dv^ 抵zE?jk; {oF.Cu~P5B}F,;JzI" MpQpQ5iHB;*ƎHׄ$ES"W" $ʀMi^Oچ&^m{+1͌8aJò))xģ'e?'n* (XA9.Wy0B!D $>jPJ\P6|aƍrfJFx>.Qv}t7@K..h; J_QVD4gXkj. S:M-9ru{EՙpesIH N98RD'"@4Jh M:(A\ J)e-OFvBz%`VQg"r I JVQ6r87sVZP+~>SHLڴ^V"vU˫Js %O9g Һ,%/͜BN#Ad)y)!G.VU!@뎑Lҗ+(Va'̶Upe0K"J%$U`jR/`[~VM~1׹3lHy ~Ǜ3dO\&?!7sU9l?>󹃛e™1\*+$Ͷߗsw?"dmF@fyq@]Xʞ@mO|9n@QtS䈐qK=)7g{{Ӹ;S(('Kw;.hxΝΨh5"N'O"hq#l4{*0U6PhZr|Y^G IeASq7Ĥ5\,:@YoAPT~ֽ|+sPᚰ*ApӏHrFr}U9/U3ev33d}+n}mp\[JEeUJH$FI%X`́1!9Ӓk\dhh Lb":R%``lYw! BuC_+cMQLb!q/+/69: '4ٻ6ndWXzI쎂Ƶ*?lVXuV"eǛ_DΕGѠ}t740;H 6L4\ZSc_;rG>cui>pJ@}.Uns|Q 7ްyv9Sy<htp@!n.$.n>ɛWfo=;d# ?.ͪw׾\qdB9oO'Fۋ`8dd Ui16\/s~AUFDBebI߸[_]|cMfQŊ}K,>ĿALp)8πrjFcKgJ'/}3jY-{פsr#g wQߙjV)0ͻ+ZjiȱCv=BtnA_m "/|dC cm«۽itNi)$]UIjƽk9$B2dNy½PyFH-AՀy|Gfg~N7%U>VR(}0 yѿ[e"od^Iɥ97nÒ߄%+I饿\eO_'b|5e\5Fҡ5t &]5 =]쌱f׌( +k(VgT͔RT~vhQ m\/[Ų⃯4|ar7`HզraS)OY&#h3R2PKD["$i0[0ǩiBj Ë15zNfƤcUPJ:8>qݮ6P+Ҹ }+Oǵ/g.3ʂ@Q0D?$ 7oo/.$~ τ\⇻ʹsu?ng|plJ8? pʘ[L/˽#{E>,ಕYՂsgcmZ0Bp5xe9`z5ET}QQkuCkB}elNLpU]tX\wĞLlDMJt~ovcb3t9oJ啓`omj2AXp%$; \/F+1JQ͹j0&N]őOgH{=g ki(IVnN{[Bb"XI{Y:G))ݓns5])BGL>Ye|y?YnN:IZ{ʲA@!x%8sr\2Ri.@GtRn1qU,K±m[ dH(Ԡ ro.,\X ] uGw 9"Y 4pA3G ?EŕS =ś׃sh\HamZ\Ɩ:S8Q=j <+{*+Bv| :}~^He{C#XcMu(ЄPpm]CJ4KXe̓ mTЌ+s%ڴiwc[-y\\"Ի2eFd'HaB"Mf<`xyL[=.>zF/XgsXת,X]]nC>qgO?-s{O8kW 9 m?QrוM^ꨠr <SA_aǹ͊tBpC Fq&[3M|g'=i M"$ΑI4n&~ZGY,ۜ,>J&q;O,$ UԽ\VqkEX#6,- H*<~St ;.Ugf֑Q|g2ߕk /cY:nD"a.* )AUĶsOȊfz]dQaorњ$=a zV0weQ3IqWʻ#C1$EjQBPd4!߇סBփ~ 6e)0|8K^z겣> 3V6KS(WGgS8i8iV(<MKHJo=hr(U*ΤvjͩuXFqB)Oӆ4m|+8D!LFtă 8RFi}D4EMSD4EMey"8m02hV:[NKD `5 &`̞Mj460VD=̊2M^+&`i UAF}D5ܖ;hKtzR6-zA4lY@w Rbrֆ<̹")RIHERTHr"sbrUHq#H"KtINg<`%-? i|k>-xb[cTPnV~.nޕ$BeK9ÀzИѶ{^z dasV4$ic}#Y$,֡%!&"\ 1@ꖍˍ葻[9Xu9~ywsE`(E7.a¸E7sqz *\DIOߌA#5C]]ecO#^翋߹2Ot%;7k~`|$6`T]+920c.0= l@4i'L0eʕ'*8I魠+dSwk!% \ IhഠꄰR1&=Eƀ!vG/qTt>pXu>tUXyL@neTBs,de|X!aa2BxNh g͙`-/g( ^_.Iq?+dqzò!gcY UV}4S1f):muP^N\!s#% &CDԶ4׷o7z%Ev4-:DlzֵB(>z3{Ye [un]$5& ʹ?~_~l){7su#G+4)[K?錛hvu4@S0k so2ue]Kf~]vxH7+JPTK\i6MT)%C{튟֔ap[tw1O{ڭ$f(~y2?̓0M@x̻wZOd$'VYRj=ZVh XQ_WA=x\s^HT jT_|έ[LB՚1\Q 2 yUqt>Uh)j:fSܸ`q/,]9XjO[{>~>3@4}NJ8/XƪB>ZLPvzYE sCmV0q?U&Un5B'l }+W-I3F2U9}@)"[S RD;h'MZݚG [ELтv";Z(ϿYr**E' WȔǭƑTGG qJi,fm s* ';7ZhJ *'.YyWFϸћ>6ϖO8r]F){21CBե&bX59 U 0"(g%|۲-GaPNvG4C`W*PIK 6g4sÄBSpڛH{,@]d:C~+dzpjl|9%t:[\M_m0%" w-O, ߖ(zOWnAuO9XBBExhBX`ޅNy@zi&KË!.;DlDQ`Ce)I@WcȐ+~x)4|{ ?U^]-ABvb) ]l@zB~i8/>8"ep%|yD`&*_q"RQ#ԇP\WE #{71[}q,}MB' Wxs&:jwZ0v};ipjumUPUg^n@uaqz}z5X[=aU}<: c&UG ?O7$$ uwBV ۨ=\>c5/B0]^]Cz Ύ:ń?ZQW *_~-w&d!D_@HAq6jZ6?#IPXbNdU%QBiUţKwK!HzyeE Gՠ:@DE`|+]6"[}}{]MmYrR%+I5ޮXbtya%h6aZ"ıΜDC {s+VU&Z!gu%?>PT|]$v=D~ė?j$TTN,y%#bhj,z0cC%eNe`=lՏB.s^ B==X`[, rr1~q6U:9š8^ؾUʛX]vt3;re=p׬?tk_ǯijWl"ƫLH)Z|ڽ&BBM9'[*_tʔƕ}AﰗQ҈ʟ\1/R[ ÄiiО^`P5d")c"8BMZ P3v48 d+󃘴*Vr Se%A{ 7CV9G<&A@zI3M ( 5r_t5/0fv+X~&mѿ~YWk4-\G%qZ$ e@.rY@OD900:HW8ؠ3㍱,'^s;ۍ!q]"Iy 򛣦/z=Zn7i]viJ/OCa)ϓHP#% {cb(lhG,``9}V`3O)y3ȂShFTS3TP(ȒQ,kI l;"^@6:hFp撰g65N0'Xl2xOIh)JAafPf3CAYnU!e灟L04zD1&RVnϲL `i:q(%( gK]-4h'9?=:wO0{̨kba*r:yw{0gr =E]+葻[u=҇_!A/vA.-'Spԗ|LlOOߌAz% B\I_f3 ,M筮qͷG?(Ϥ xeTK~=Le Sx!/>cٹxؐ9fRPC^iLc*A eU T{BPt敤UӠ\qYxg[)aoij`T T5 Ab;ĉW& i,tosxp!gLJQu?_K % *v6*+gU<^̓*Fr2Z*XRDZ)#;*WJKGiŬ-F5#qn=zj^>Ec|NQJA5ֻĄ#z^фO5ڢ'v*2<w\ģˬrZ#kUf!H)`>PRl$i4/i=Y5Ӝ1~hdc9B=Aы# ȔG?vgѮDžGvz/n&' SJ\)IW;)bx0)2Bz zyVGT~N )e~^(&QTɞJSqOfA4TeG?SP\ISuj!;-o2 ރ!b*Tڿs:I WcȐK)J̓a3UqOi)uT@j6_%u:J^p);^$gFr2QVcdW|uXHbQ|}2ltn1Cg-jZOw) >]_SL49 ֜guIWr `@2{B ʼl V88IAc1썰fg$$Z؉BN ήĺ$)q$IX,QpHnbݛ@\3X23@`uF$F0FkPO"rs0~++I/~-3<7֏5*-VBe܄'5H K9ヮn>-s&7_R1&=EƀcC3bY 1ahG~dąw ϧ>9EM{>`sI+JO#hP\yә BF*1űK,E9/@w2*mb9(ϋn9w#;&Y#$L2Y6X&$( jJ6YFx9aa Y#`)I)!#IDJk-0H_uxzxG ,^:7Gh U}CHѝeVYHv ;}`JpuPwk[{c>4r~hZdb,8JtT|reTaa8ή]*Z 8a{`H]W{p g r׎R} q=Dl h;sfۘ})?\]\e⦴Kc+:\#"[TcW;mw N}uu{X {wEpW-|hlowX [+];,{5'|}f]8ACuICDZ]Y9ETKEX^FH{vFHXoQe0G&:U鎑E4Ba@dL-L]hkNr,y!,I9(S c"GL FR bE!)UFژ/ )G=4 ׬؎![C AOnyZӋB+QO(8W)ی7U1! Wכ-~:|:5ދߵ{4#!oK(>y#3Z7c c1$䕋hLz$jcw=nN3|vd-~qw!!\DdR20iM8:7٧N7ɱ%nf|F+  Cd,,"#s>VHf4:-պ>ܵF gRtI4G"%}/e~ g ׍ Ã≞v/p j n ڷ0T>=FJG~kM>ߞ=y8bk7W,ͺz5G,f,,8g 7MiATSN4U]57VIZ+YfҺbm;Ӈ,n<, Uwqj,(&l9Rg>2 BrKz>)}Va܈<ct9k1X7{6{G~ `N<89mjR'xc_v_MS'q)`Ғ8T.3^n/p,s9yΊmlY) [{Ӳ!n>}%cZJ|}xMXYX J, #;\bks!N)D$1 ,H D NUf8BB̫~(Fg-0X"F´EVof{A/ZRdN_;gAV%|SsũHkPls$Ki}w&ՏMW?-Z癹=>/4-FOӲT}<FPRl/[kAq~xON%pl'fAbkxfSYw0 ~|_&&4 r!(443}>Oܲ{E5g9sFzNw̬f0b~_+r[[pkn ;db0TȌdR9`(Db**M2MX$Ҟz}%7跛{< }hy,!s@JGnp?|f΄Ln9A^$M < ltB` ;9Xu0kѥe6}2 Š!;xLwg &OHaF2YZ_SYeq'O.kg8sVbs,X GѢ 4[ݱB^hdA^tȠLeW'gyݛdU' ? àAfT c' z9gL_via2M^0h؃J yI#WwεՃiqu}0o5kιGъ ys7{f޼aCV72mqZ>&Mܱ9 h=*4NC#Yq:D~{EnwkpP&qt61PL??JR$MR$m=IBQҠ`!,X$O SeId *2)49~rD;xy[y)'.w&4z=}.Q>zdv}7M2sr峯^*""Tc/r E.(F4 /l $IBL+5F2UNP $Fkk^X՛+Y*%9 #"\'< ?AX v0yȾ.f[}TvQ\͎\hIDȇ?>]P=xڏ:#c?!7FFG |&~x.ϭOgMO;w|ݲ;Kd~|~x-hsneo+N& BSyU;߻t㡛ryP|y X;iQBTRhPa\΄Ұ3'4aTQA30 $@5c~^{Pol%wF?1*#:dB>[Or&D:եf )& Hd>,8R47i:2+3F<6}0;$Z/$aRC=Lb D)h)Onhiv.0(/j"(h,esxL7iq=n]l܎hzsISl6}Z͈ ؇`C1tZ+LLI44RXHq`nbф`A͇Srvrd/ϟf͉ܽ9 xxwN q `3Qi/v קFwԈ /(.15*Y(o+V +,-{Sb7|.9bHUvbhׇhbe[[[:s'V)tjr-0)*2rJA0Ȓ"Z&,+hP"J0r :e|-Jt_!adž\N FpZF:+\#" i{$wr`0"&AqoA y4(zaX3HI#jQ<21 %Ut 4ф*BV^ebKV8)ErޫqPSeb&Z5BFD.m,J/C(0U4í=Fq'1.9&r%mv+!y=I [ug-{ rEex*OܧzDZvtصOsEF>~է"B9z*hdYz1&U/Ci)š)bk:T]s #Fkaޥ=nY*zNm2NSoe;mbmP`hNR_G!osJk?㢊QKo@fhx87lˤ dYX;-M0"d]+Y|jDTIj~l8rVQl~Qn#^s[:zѬEZj惮Jў|d]Z|ٵKw\re-|f-]o-$nZ>F]~TnyRvI)O=ׯ蠉}twן~khrڊp٦e3vo+ IW?YN ,䕛hM/ly7Fs̎Gn1wTnG\R p{-wB^TJ{%_qԀ` mr1=4yǣC0Za|1A m ҟ|ǵ~]nsmL7ގfn岘jPP8GtZ T-mʿC9QhƲ那@](UuE."i]sY)&_w0-;YPmp.h|h#h.ω?|k2ءbfw/9Fu~`ۣ7nt8PRllEܮx#e$dDEoMaxop&ۨ\,Lߗ^\>uEå%_Rw!\pǝ_Yzd#bPfQ2U͏Z;P{Ja!Dlm8w Š鄾w;8KF!Ϟt&Va;xUpʖggsog6Q;g[^ yt|uPX 7z8 25 JWdJZ3\H3?9X 8me8j+y6=Aqs[^Z6JN;_a|u0 L;؄O|F3h) t"`'`-qJP38 /&(M;6u_Ŕe^S7:TWYKix avJ:A hs\(!J'fF"̐ C<,)S3;uz$EshM5δRNfb C-0jR?[#Ɯ|*s;ʧu|RqMmjMK3[J8%@;H^c昵d-Lv\ ܵv!Wk*q /iUA+kO#z=*/"b)nFNg6uڟ&rحwcލz7vwc'$8R, rc))'QJJASƓb `a5UJ8/ߞ +;O lPB_t[耛mRx7_Kٝ}J20/?h%EKJTk|hѝ6]O% D2"q5 b]ԑ& #j(qM]W4zTŠ+~BX)1<4q.}Np_ZqWM֩V$V0,+ J`zBX ͥb 8%B yea?T13sHbD,LYEF,O7+3+#3B:+m\jEI EBh`JS $}b& qn!L5cycm9Ē3 zsũ20`id}O$禄" X) AI&69|&)NBS2seí?|6J&eX<3Oڳ.xw|wp?=M ],VG޽}Ei7w",^; w1ĂV6;*\p܃:\<353Õ+N*5^2r?5J6^3T;jc@6Xo.z Q .hr//J5+gye1 &͋IJI8'>?l߹F +0eP?YB._ii3=ˠ'SMob̧M$v6;A3$_Wa@?>D1]ILWY2c3 0LX@\r)!cU'p+cI$v=Kh,s>E4TreP0U%#Wh~+n&&T$TR.%j>SR/&V 'Pd5MM^= ifJ b7}<3r?#.Q¤RD_n+`"#mP^ח¾2j uij9h;8x?\2e@Fh9Y!Ta"6ia/R;5dA@#TSFC8yE k;=}Up|w6jҝ^;cH/SRNVO=me0pjZv0j3aEPcE=`,Zb=O%kEKx0|<Wi( T/xulsՂ=V." ϥ &.B(4y㕢:D֕Dn'oǟG1s 4u76Y#ϒr78=}`Tċo{/00ù3@=`VONP_n]ا*jcw7ÚB%i]uSآ5k+mzmж S-O*A2CeL^-P:R%:`s<\j$W*Ł#ǣ9/p05+[s!o.NG.R:;!:  ɎPe I>|2ء:w)M jZI*)SH~;Y]sg:d|[| V|v2<$~QƅK׽g{UXnx>4.Z\/#Mc.MǷ {.c!oЕiv~Lg:Wي'tfz\0Ȓo.]< ^YXO* kU.r)kWnA6 !*?L6YZ??.0>M6g\++z\n9)&Lde>pTAr9=Vt{\Tta\ 2N#UƵf.³KkuX+7r&Ekj$%eər4xZrIJMmdTa"JJzcz\U<,Z4a/Efj`$ĊNWeߤ;6 'IOe) i%*47JZ~qw'"9rX$)sȲЯ0>xfY2_k0}6C.s@\!֘1Ì<Ф)fsL2Ǽn8)DsLu3읽#R] h_zx@JL T/8⭀@2 ʐBLcu1˥Y u14Bq.h]R'O3  ,*I0)+6\[ Ժrf]=P^1[q;@B.jBp*W\79M*EV(\*s`!ZXe3WUyERjHԐJ,WO$ukMWIZ*\D~loVηFd"빎竤?HJ `Io JKm 'މcGFFI =JeINB`Gdf$]QX3 & 5H)Wy"E)Jj{o,W 5 VNݩ]E oxC.9ev;"Уxص{S7$z'}<9hHpB||z^LӚ++zF#],(Am%t6ryט @Kvft3-g`Wɞwd?;LQqh̛[nzzni7,*i{gk*vYN__8+[8{ҵBom7tTς>Z;7-n&zj;_-l[ dA1t"z EMJr`sJ"K27 .4 j8Bp>k4NwzN5$t؀L;- n<$ 瞷T =:f-s,(DRE"-\̴MTsR3FX-%eWھIe=J#䴖weBae.VWD[)~GǪt ꞵŠd}GGu `ҭDK[++_KkQ/S0!xAeׂ>KF [ffċ_ >{wap=}O̿}=GS?_MwY`xA/4U&qZ\0fW YQO/rw1mկWigW3I'm6'l(܂Ju!Q I@2qdJ(ܾ' QkB@d*?L-)JPyEvlۀ@vޓ;JרrQ҃ AJ(GNuP˞mDu RLQ&3:re;!NY`\ [DXL5RIͯ;ty}_~~/:3_2mך^uЊuI ։g@WYxwVƣi+֭C"]}Z?~@B-Hք]+EFb!ZtGw6**^ԦmE}Bg']뤖Bw2NUH U]"i=,NTmB VTXm~S^:ky$%oY[D4yZͧc0\ΗGyԣ᳹YlcdZexF>:&_kHe<q|~v1?a'<1SJY?I29bX.|YJxgNTY@GSi^GLXl}H .zIUO%O5I;4>Tk}##7WKk:~K":E3."t{>H )(vWpF)J"箎b|wߠxH K6x4}\ນzLe;f˄F0ḠzɁnmZ X=!\Gq$]U'Mp =|_#gV}ː 3C ]JCXBF|kĮ]OMFh\HpcNs>Nt ax< lzhç~w3BLK1av8^͕,ߝ?PhAkj-IdR_q6*MVjvDҒ{sb ĉ9g·`/h!/h.U{ YŧpSܯl&rLh1 Q+WF qpTd.'ՒI F;2IJ6Qx+1)/Vf9ZLRE"c3kfH$MHi֒XD kҁ,M+:&zpKY+i/zcB",j[䚽d Q">ٮRh5iF6CNF B;gc,fKԨ]R+j %KP5zOBؑ+-NIhɜz/Vh IV)8&i)oJ|K$X\5 PqoNҞ yVz (N D;Bw6X@kmޤZ8R#Ame9<`„z 脧2hɸ  %( >|$ i||`d#i@:`Xb*5t]/Rh5 1i{l4qQ.Bqi%c*PK'mvYW:{52cCWі_Mn; d" %-yf3(vB[,Lui$~q͠vBlxt( nˋË;5qB$Ń;Αcϱv')גK"_1ify?+$]PvH~$/:~JfM _ƝQ( :CQbқ9Q'=h4P (t+FHkC;$CuHXqG* @`P rAA#ARWuQ9gpS{ԡMeD B RhlfkMbw>y! Dc㱒,6Իvɐ-5k7GT s˥&,=;zyHP0e .|}'+˼\v`|mE!8"3"?[#.m=V5rY8{k";NN&n5LꩋWHD E h^K߅\ߦIYh90 )s~dҬƚIO m &ܼ$yV?| iL&7I^}*kXTfE Zc1JCKF؄t3\uqk8°;ZIs;YAB^-f::sB5?$XʢA-`d"$2a}WxHb3CE1朻²6Ƽq ڝjx(ídx' xJbgxvk-s$i}Ԩl)hGMsC%QW9q-dd7洝;Qk?ε4G b1[I0B2}8ǸtP`9Gؖ$e_j{#蕓w#y)E^\V;iǘ'?V z#>Фᥡ#u^͌CcI6u]SwLe-Z};@چm{$4W9a&D2MCwJ Nra@n[NJ3b6.LlJ>z|󋝸m*9)\_˫ɵmY@H+v1f[a#t(q9ߐ Rx㥅 嬠@ $ tV/(vƀF&jgF6@#kqeM821KN Cܚ͡ |rrh6{7Q~,BΘz}\/F[Ze&~;}5Y3+Fxfa&1>hVŃ0Jكf*y&)Ll w8,`ƙJwvOF[G)ٙ=8 - Gh-+3tC$o;I〇mg8s.*u\`C"Zp) x!D.st¬`ŤՂMz| ^wr$ ⃙gSWk>>9IhG+0>T J\ Jj=C[_zp~Y=;LO6 ~~ͺ @{r{SByz'=/\L#d 0J{ORM'ĿD7 F1w`(DTѐ'ۘ ]V6d-+w9rzOoUrew?y럚EAWE<|lӵW𐠯َq&cyү9E|uR^9}S^zxR'*~y*ۯM@}za]sսK0L<Pmm[@XmFQn2*9&#F)oؼ E_hiy!/n}bjV X$,\`C)~ 8-H?,?5X} |y#oU[׺ g확0W{Izt;߁ف!ō.^1O;-[?vznÅ T>Y8 ʻvKe}sE?PnX旁.!rE_6Lwo-M&itC͛iԑּU^gj"j;e. [(^Zx:4onZBL%["~.%q3p[u>Æ{wg+p#U3f~9qpvb_/G*zY#s4̭+){Zu^"cu)",c;mS$6!62Alp|pb3¹bC'*86wʺ[cbV-&aiy! ߚ܂J 4' `sr̵7yVxJ#cV)2Ʌh.Cw 2[Y);+ uDQ9hF $'[2$A Ѧl#EW#:"S1E`Oe{)Uye燤-<#aiF +]lSfQ'n^ƉzxSm˷Uy8itWm-1*"ƣ ޮlU ^dݓJ4_&O qHJ CK7+ 3(-9Ϝ.La/]N!QAVyCrkAYs̀hj.O=9JќGO͋_(j.Y -JF(t d0Q_߲k; eTPSzt>mR9Kޠf2hq:Gs9)]c.)]Pz.cH9`gW%ysRWB]'lyl%g'l9ya |x^wLJ: xpbCp~r ͵9]4FE.ݠ0vԮ"WSrȃxY9pR h#FV"u!5K`V0IΨkZ8Z9ے$A@1'x(vC6E&qW)xE3 iA!e nuJ 3s@CW!d8Bz.ٽ^`]s܁t $o8JX/'K)Ŧhg)D"OlyQR)Rf.]9\ƙ-`dj"Z1Fx'#B{/ ,31ʞsSc)iDRJJ (>[ TRW 0ZYw |d&?1R}@NK_]!OOz1ZJ([OҮ3YeQ)p.$){׬sd'y "ŀ~򧬍`H#R,4JSYBȖzma=1-la<|*@Jc~ψ., nSqS}>NJ6-^ݺ'gw-YC5S Hd31d* iٺ1vV CN2+剣uha/Y_f~'ܣ3@#Ԧ,5;"Ӗ21E)խw<08k$ #߉qOC ђ.i6т!H7een s}̏ n@h%c$3[e7f(a9w.na%W}Aa- 1%gWCzvKSvuT ] &hF1$eQ a BdZC#^"v4)%xg(czog{QL֨b2٪=Q *QUav#Uh#B6C W6+v-!1K3Pn-/RRk9|2 rB<')TUmV$&}.EDNl"YuۼGYsR^K\Wk{prS b1[MA|e`35Z_h`ଖtY2KTݤ ! "*$?Es1h7΍ߦ`T$Ɂ&ͧ;KK5HՏc'E@v:tԉ]KYQ"$m;Eh j4AoR IG~lؠsoܖx)<|=ƩlFV+`j1 ǰg/3H!"BT`"jw? yla]ve}Bd0s'!ZsFq &rLbL%8,4}W0.jDs FkmaN*e Jj0F!!4jJR>`X>lj [X[![x.FZ)ir)δ%)I4CB8I(4 1R #-ɑ)Q*9i:lp$fx*d8keDٞal6IHN/vtf #HN/XQE(72{+B_NSeV# A ` I+2:_/˙cF>/R-ǧeS, ?}xsRA!\Iy3W}w< Y}-kqVn!Yd_vfNlr%%R3t{YE/-/RIc2iRb]Ⱥ ~퐔l~ρ+ /)?ޔ?\o%;7 NHmKZ TO\_|QLY`KOCMq; ekR#F՞A>w5|fX!-Ųkk!n+jHfc, whӆN~WXm?y|ޓ.@rY(iF[cvnUNv5W*%[{cL%b{߇~1廐@?w.6$X<~uG#k73O@ZkmgJ$n"Qר* Р:yqDvti;8PLPy*SB_~'JZ֕2'O=3o_i#g(RkHx#,~ RucѲ.eK?e(~Q.W͖1Lkj(*njIVXeN *lj-|U_O\z PhXd`X!ZJk0t`R-j F (k]YI,|3ơ6ٰ] _Їk+:M[i}0/0BpĄ22^ ǥ i\U`sɱ 7 x c$^雺K1)K]Q&lwʍE5[27s!˄% Z Urf䜠cA 6L؄'xn9J 2Qh]oGf#5P2[>Iy ¥v_vGfgp4$k6W-X!02ȥȞqcbJ؜ra> U8{OܠxyMЦWl$8+&.c>Xvc[ G(:|~)ҍЈD^XљVkgh`Iد ~Ӎd3"jag]jyyBOx60. @ yViŠ+gcO?<I6TbO| hiOذuA\[D8ɊD*q c͌dys)V YJ=d=;BQL-! Ӟ$D6&")qD n"A`MCW jӑG̺PG΄0ZqP t8*jw$$V<3y.a]MKFEע)uhGyā{]_sѓ,ƦKaq)XY vMT#5Ao?|3;BJFw0|4K_uH|СƯ Сĝs3I 'OkRH95~._ʚηKǕ3Nt&NgJ89OX 0؜-͒1fE7|fWmzOrO|xlaLM'ạ)(v ym4܍@`>ZBCvCAmBSj+ ͟zg* >,n_nC̃41Ct hߌ?NO~zxji2]E _atY It۸T蜪>Bnm\zmptZ[I; [p)W ^)2 ΂܃c7Ɨ^]NxL?ӯ~oO {.A1QOQ9`}0,P3vhq{uHgV{rk\K&ԙ HXƙ((InzNhҸI ,lbkXVd(Z{d" @[;~=;Uz䁣8*GL˙V&16gRzhFrikYklvXp#~NH>ro0/aVڼhJY]/f%7bdJ.X@"kҋ ?G,Ohʥ3zsfӄ@Pp],淋0【1~Y(O`V(n!?XEde7E++Ӎ嬉Nq_  .voqhVq?,FsrHm]UVVn;s-5mxQu&zht_ 6dfQ6Չ&LGiR =ٛbiҸW2AB e Rd/ F>4V1Bȕ32\8KJXE8^wؗz5\UTQ.V2 Z Z WRtw>VZԂtcAf|mؙKeL?8| Pgx!i}@Ne -r4+dۓ}$fp{pdqqbl/!+w=SPC<$qdgA8bM?4Zޱ訝Qn2_E[3Vbzh%kOm=ȃ,{7xD19wo 3lu2QHTsn#`3#?ԡƤ: `vL {1._#FXĈW93DosږFv0u:J{+ik`jNvAfLɓ:+?=lli>yK۠ mgzZyݍK(mzgPfWrr*Xm#BSQ򲑕B$$+n_QC?&߾}~^ _hsBcZd!h<1ˋ8X g{AY0i d\6K,Q'[z:/4أ]/:Eoŵ'wKDs-C랬|f“übBVr:?N7$k4+ERNi超e1cTJYڸ5I^VA.ݚ&ARlKN -tɩ)].e}9L9 eb=&hMfyO%>+)̃\o̊cR:XC3Ϳ^qY~f 1Lz`kK4~ .8TZ?^^pzŲg[_'с &p+|o%oir7szp1O0~<߹tJGkh+9*AߒlF .?$Țlb (ߓ.mͧQvM_Nqt[G:Q$1fBw(a/2\ƽ˸sY˩Jp_@p& UZgBV(]Dι50[r:>UQ~ۇ$1кՊ^QB.> drdߜ L΋2'sǢ0J_ %lImrI65v!ҁYcĬP4W`&vӽFn2\cx^,: ƊVrR]0Fg]8L&nXbڢ0j{1Jc0 7"3#lYIZa6\7heC ୻)y1uG(q@cŒ`GqvZL"fL=$%Vn3h5Ozwc&&BgZͰ&eE01"Ha^x%@KYyȃR*-<"zV1 #,}9'/5wEhpӄ !Pe%ʖ{;6dňx6 s)Đ=\ 9, u:;(\weI(a.6IovM J[5rKD=,V UT]:6n6T7Mul $"H'!ݬwW)gF> _{w紾1޴,aa1w˧r%W:cmߌ?NO_}xji2]~彇 aޓq$W}yxo1t߇lxKlvd!)+({EVIix )EaY⨧,xN^˜}M8L=KlUK[ 8뿝VT0(ͼ;V]|>8.^[gUFzLjTj>38/ZS!ّ)uc0nGI kLtuȄBsMBeS 햜W|; !.%@Yţ@#gK\&j>ǿBhvCh!,z8)RDNdNfD0@|uQG/*֠VʟEmQfK'7W'󬳓 v8O΢E8O΢+<%2yJx*]kz> 5Eb`ߞLn?O'.c%Э~|w泙_7WIqW7ΒQjSD$k&ҙʥPn|V/(qAYt 0!IKM)2Υvo}¦0S1TQTol j:uWs)G5 3GJTLWjevNcB5` m{fuSϪzat#fR}9+le,%BrQI4Tq9+Ϭ8) Q79TF;WԛLM24=Wj4g9nlj"VB\츔gr)%ەK@Bpj-U81@)i`\`]CY%C@g w1 #p~"[8DEK77q5'S' M1ʃ:LwXROo1 BɈYkC[F{68y+)8*QJ-VnFsMfNەuGE @F@!N^ѫȌj L b k,b2+'X)J X2%+}B(l-ljkmk ^8#{ѤuP^}~Y!p^h;|$SZBtP L { Y^QQYEyYG)A} 5UHg-H!zaܾqH<.0N:x@֮Wo&8œ4T Έk-Z_zh^'?YU/\ҴN-V! ;@azz8 wOcj aԞH ǖ8f5ka/xO)!BX)QqUS1K &3.Yժ\-h('%eʠq+T3 d eZ7iK@DYzj!mX}gID@訴3ݡfXk^ @Mo4>`QW)r*fJѥph*pE]kfQW'gLL^n7 C JVgWZ8O΢;`+tK. 1￯V7x3XH⊟L푠w+g}2 4[ٖBKN h%݇[ɜpLc›Ԫ2^@_ؚh*?#g?I٬4>U*}wJU3O/ i'w9E~[ot*I<'W"0&#J MGD'GDI-GX!?__Ri_B2*d'U)w^Qu:S?f/E5aXc)W-̆LR3(.ǜO]{̹ DR-ǜa wתؓ717d!|A:C%c͹ H(;3\sG{fN8UJ }'w¡& ҭx)P^c"xX `WV6[pU#Q>p3ky5Og䙡Y5Yͪy|uOsj:QKQAG "ʻe1ݱsq}ACZO~70>`(0AL\Q\}np*00bR\;Ԓ2ŠU&AB,׉5%jk0\8eD)Z\8N{՟'Yac~es @;c̄ $1bWJ0F%1i̔Dj5&xjUN'  HtzA[Ƙq|* G5~wSԋ쨬`-E ؀Q^s,} \h-T)k#*"j1Žw;gPf'[wǥ[~,Qp)Ay\ZCMUK%L.U\RSs7o<6il ml>,axN_)rڈ!`\IobY(=8YV#ڇ#[@9IYD`H"!I[zdhc~*t[UHT2_QnJnDM. ז涽g;7!R3VsK@۵V5 'W 7\>}P sCpc/څʇh m\qD%(%݈%}f =[*fRiv 㛋##\xęu)ᬐR11# pۃ^yէ5wH 1RV k N$,Dڴ"{͡id ccӨ9Rpt!-%-Y#^wϽ֧(0ͮmV}Dhio[~KIZl $TD'eiI 4HWk/\ꂍ׈OwweHz-5wb31i dݲՒVGx,YyTfWh2 E2/QX3) A?WVgkHb]#3 .Ӑ CuK=Zlp&F-:k2K+9rt29QDHIc:N,QDjOv2 }5>k}>YIP^`5ƜZbAjmNGJ|ڈe UȰfT65&w^r.zK#k3:LB+>w6Sbqb; Gwwyp˅ba -aN1>Qpa,A9yK,ҭT;JTwyUٺm[-3miyѡ֗: R&@^ Pj5mZzNæm,j5l$l6Z|-NmpGϖt~Sqy^yaFf"ZT:WU',&z@:JW~UYD;vl% %)%7H2б*Ii+HܓUjh: 5AZ#_#WbXJk+qQ`]gCJf!6}_[/wFʦ/{aT\Q\$W-ƈJVF;Bku hkd(5Lo6z8B͂:B$!@f}ś=ΖDŤ-"6B'CNW4י'K"~_S*ag7KT(cͮTn.֊OS҄nE1u媓9H/'ײ=\ YQ:Ϛ7\э.%7?̍ـӶD@jGo=$!S=C8(0vtw, M1Ȩvο}\:?i ܦQ_5gاpI)7ZW1/ z!;?=}tL~XX@kQE.xgiU3UdoMP#ś˄tsd= ". tXp#Z66/18UK#;^c#? Yɦk!CGN k:N~#S({Xidhy]0V)tTG7= PW'] ƲzcSzd/+9 Bʱ;=X@8<aqM23L6YNFsw9sf7KF;q ;nP|@ ⦬*4gZh]+߶fzUNDCEhS69MYHp+ JTqUζjU㯯pY?(|tMn~AlvI6 3@No9BL|Mc,c^89-5do#@r[cTmI&Ў[$g l?Tۇr-P{j}jЖ=ZcHTAK`TA+UZͶ/NW "W}HrLJȢP#g Uh̡-eU d{W<_orEFnB[~pH08> HP/v05_%xƘ!ێ˷_^?J^~2+P~[d?~Ǜy0Oxn=YNzu٧pL?O.z"8=._=e{O=<:z,czd%Y'_G_?P#[L;5ܲ+[BJG|o!{}uru]}/Bv hBJ lm.N}[¸7q(wEVs~zpz4 *jtɕ6){&'5CP˖vǧVckN r5dt Gٞ֜eZV<:8o+08b1ږ>3|x,¸` 1k)\:sף wm+piC' 72yZ>Ni^ K &-Ψ v=M+8=8CÆM:KgzDYhÐ٢P1iҞL}.\g-Asg)na8*&c W\g\ l@-ZL ǘ6p"*. `B9}ҍ&YΐJ'BN4ߔZ@. C`v&3et0 OyV WIRw;tB!.L72(,2$f eiz㺧-}`?ODzh=d.>\}"-_B@d4,5d1WQD3Qh6e"jKz 뽤i׷׹+RHMeK[Z1NNDq>FZ W?5{Jmq6]9ׂVw.cxN9.'h@7r|g?S'ɵ[ʳ=},ې>gVA;6z>6#R ַZ u~>2k h+rx?lw@idDv b l(U2vLsizu'Ζ֎ vF؊ٙTFOΔ|fB(Z͹QJ}e3<8A 0_9k Pe_"f^WW.J0;;(bcE Sv@59"b60C!j_H͊D qa"b),;͹h>U[eHRZLN: > ut&ۈhPޫJ"H3JΔafa}cjV* doF5R.e[m5UƏĞ?&vckgw:Y wPQEv5!7#&PkI14d7UDFojJDf=c¿!݄Gv;u]7EI1ǤfLv 1xہ%sAVcLؙs<#Є|B4؄> dYogS)zSDǔjƶsSOScz->6 m CNk {MsfSscQms pK2p}J JDQw~7ܙCc$lv,LDIUSRdXRo6\(]锒3DnbJQ*(@{RUyZɀu%` !c!'_|/װ[U _\^-{#ٵ.E#4VXB-ۋ޼iq^?̧V6(k)֒mDŴI dq{guh0$?G2rtr=촵 00l\nա|*dSFy ­%3Ab-KS>G]7U#ΜxB==}R=< |xzpϿ֜QBS\x020ҞwW|w?-E@}(?#N~ӓtq{wL~x̪1g:_?nybZ -OO._..>nyE V+G=Vpzr?]y^ޗWle |z݇U/,7ѼElme\{}I2+h}`jl>tr\=#WFRȁΥI-Mv%Er"B5&ra+ Sts{FwKsv;.MxznY!wr76BNEV;IH1ܑU T@H ,[2d$'Ԣ4b S D-%,VX*slpCRo` Wr’4dת%;7Y1FnޓV2|l,o讔7*z[lIΎr_%ǩ .?#Y#.%K+y>=xR]4t?=}ݾo>&OG囧o.L-'_4fK~^#ZWɑk@{RwG3y{{Z<ǻ,RR.RJL'S}jb A{*<#)sG+HB0-]Y_\׻*S$1If 0ɫODJDa7zaf1vq%&_@I.pTLa:XBE9,@2h rqiI2T%0aYMV̻Cz'F (% .TEk<A1#'QN[9O_Re\2L]"8-='mnL*9B!(>sk>c-*0a@S>{؟gew"9*']Y &J?{W۸_6}02b] d? M[Y6t̑EWM$RjIh0rUwuUuXW0W]XPn7u z3WE\LL8?|q5!["޺zpJXǮȋ^ղ8F6s$ f},DFT$9068183u}&tpiT`*}@`d h NTx=jMTbn׽{6# _E]6kWoގ)^}7X~p#V '4J9U0spaMs6 KY0,p-4,z ĝ~?E]>Cy, ',>JpN}|t7 CU<Nlzվ~:u?" Jvޙg= Xv8:;N;W-8DIq'+ w0Y)yk*z5m{LZcuuǯm5Ŵg|K_%ۆ_soʹ5O ŤqSol67mJNpUD;b 1yA(aj',U MQB0&) qb \Ic#Q,V^:B8Ũ^ 7YΕч c^V34$<1P$Ml(RTRXÜHs #}V*X^E|W 0J nIC^6^v먀cs$D (oITԥdI_o~= Ra.[(+!MtWBd\4Oqߧqܠ 8 utbԞҒ{*3Xڄa9HtrXZdUj=@p$Odf 54i,HavS$ v>k\PrP]Lf ]. MI;̑ެ)ETRM(d[MC »j5?ˉ/iPob)*dIȀMHM#1EE&@MlalBZfMp [ie/vjONy Glw`|!*$9jTWr"[ rND_ S ZgAlǣtש.<8^Usx:]aU";^mq«nt("8ǦIP#q|ׄ" i%Q-]NV:epf*Gya*\ԗJSך Aֶ,GwvhAJ0:k=Q]:kgl)O bXwXb\3~5'_Oy W~oh+f+m-!/u@"ư}<6^ȓo/$4*O(@}|JHOPGMT.r5|gm9m\-b8e61!Z4^DӔp[x8-R[}n|DjT'`JBBQ`d-|+sDgd< ><ya%^Vx{ '2la >|7?sBSIֽ%"xTc+"jl{4/-~x;d6Od~Yj{y7 s|cE4+bE>ޮVO ?6pqB6@ʾ~,ȕ|cA? v.N`lH_6srV&dLH(Ig7 ?NB{2Y0,N1{2YP+vPt4r5HSMV%$ʛ˿}bޣIy=xaޯf:J$| pw &\#IlFqD""KcjP( wcJ@̅% `1ywvb̡sqG όZ.YQˑAE[U/+IĜ GCTpb^ϹXP-l ӃxQ!đίBZF_@opK,ꔤJ+.[Ո@q0rߙ/Q%oES71@%ݲތ/Ң-/ъyeo#̨jǙQe E7w%~8Br/sv<"U#j~=D!LF v2I)lq`s+*dn Y؝!e8癃dy1SF1k=L$!eprHIg36W.y]{t ފJǠ,.*@6`/vg@b=DeA:}z LX!vh^fL/w) $mkOffqUq5z,J!o_ @l[rz܀e_S8޸=Tp}&B#ǺQ*wԾ{^i.1Wc d}*հbX1sƣl~ "$&7/M?;Y*&wydQ86D13aQsMRp,(5gryw- ky{WtU?vEQvK4ُ=Ic80B}p4xv 7Zbl(%|{{f*nl?\Mcn PIc,fˈ,I}qz|z|E,,Rr>4 EiR^ZUO uHfa4B&%Ti(l{@n\{0w+AΠ˔#m=^+u^O݁đwS$duv d>FsxNUG)SH<助u-9H˖/ҤyEF⧎!傔U n@u'6##ne 9%c?I)11Qyc*bMDJJ&aLR&ōc hn s3+qF0oٿ7W$̗\-&xfjTKPm/k uȥYbqrqC8%x R&X9>_֢5p(xI_p-Hp~]b2wk_QR֛VwKHVYX,ɧb؇nz4Zt$nc7jfyE)Թ ,Nd:4wFk(pZy5WJZYXH?{}Dy\6I߲(/B\yw1v뛟#%š<ߜbiFɛ&i'O{x8]u\u^C-@N*zrvש!UbNpT6Rez6V 6&j%X#*%{D;,- 3:Ht'+dY:QcݚX OS&тDII(.^3KN2C gcmN'ZV0PܳL : QPIxbQ%p^6l6:Svͻwnm(23 s`3tB hJ|B @0\Xek&_h )_qKTߴ Zu>7 &ä1%d(IЪ6\6;|ћ"`¬ӊ; Wdm@HliVAF BԾiwKR MPN(YEpb3%kswX,_\"as-N$̟^D6x+ѢK s1J$4@TsZ7x% C'yr4jB#%;{4~h sw8pL'Jw}F)sn|@@Z4񐌕>:Y0$ iaS"3b}hbʪNqK38j𠤰6H 2N(8StTJ RØP-8Ojd VsCӬ .͕DqMD,U:PHx ׌ KtGfL޵\>q33bK":ܹA\Ͼ=X<8 7r{@J%*љ1gM"bit/.wh2g/Kph{j wY#}I3MWBX1E,PonsmXR~):v0wf5 ^[ y(;>+GGF1}m~w39ZujJUo.FCQ!oua&&Jh#Sg6)_3㨉IW>fj׮2XW[X"ނvm_ zHD;l`?u*4*)>j QBm[8t>$L΄*/k)ro:W3I9t8xLq^즮L]zfa~ 0p%Vj̧J#%ѱ L2>$0j48=!0;!# 6%s= ʥ*ɼrHJ4.]vp*!2mm߯'񳥍5{*މ&g Җ6<f۱2@)GlM!6`4 7M4jO(.] F$ؿUX3;Ւ t择fhs TvԎ)|'ˤf0hy2Dϩ>5@M08 [*twdo&`LRq*BoՀӁҧs]A3=Y3@8.mǙ}.zKMpxKIāR.;Km Dlև29秙ߑ{7x?+Fmz7iFFfO -ZCOYu5&n|>L 1\)1YᶡL[Xʔ=&eNF*5(ϫ!ZZtD#t T2fs@ UZ, $pG%Fj!ѬJEef_f R@fZA/Ȣqrr-="yB @i6*\0[z,Dffl}EDNR70Nc 4΁UY~KH>ƸƓ\嬼9ɩ( 7gNk3Bepsa3n|)pÔ -,N'}YV$<yr:}`a0jLǔwZ~piח]]:}_엋w#-^@(TwgLPqTaD̜?5yi%~O!0jUjjǯ7!'mT@zm5ХH]VjUXJtaHB_b^>e(855MRsgB >M^P/T8l?ް&3FޡS.~ܻ4i鞯7)d=*LApMv2dUrk#zsP *OA e@z_I&M%ũV$R p"Lcڙvй!"pD>]P' ݕ# D§6m.UUBrq1AM]J.yɥslK vv(v$+DSvePƦp _'Q$E)pn[dAS`^e;> yj4NcJFOL 0ƙF`%@$EoVTIS2VIʈfLr)j*/W@Gؔbs}R+:8(BݲQ2 V)gKCHS[P25{ӹ" mNf?HTRG]AKQBn0h ?NsjC>ZOP'CzaFtdZsMɀp&|%--D#J3o;h(EA̒|#PLS==w`%w2Ex8LeR!t]r*K3wЌaLPZ!fbJj:!3﫜zncDy<@@gL1N7&I3O4fԀrmm|o+>ŰW91Ua1쒛ӑr.gnN0&Jn5&oPE  w$`,r7ײvne{9*pPc$p)Q,pĚhOS`;^ExDbHu-F!^W\O 'l[KE5Q)_ Ȋט5Fޖ%|n:Rٱ7k&Z^YCqFkzA 2@(ANS@%oc (q k-:¥neyqh B?ͶCFv"兏ׅ~xrW/ Rq@UǯGnm[١k_nC~ݙ2 5yvd`(af0$:4)QN^uW<+~e 0@#k%iѳ!·YNi!sHSvL9Ctl4za6@Wp !3;Ca3Ji_ߓ.Ҋϖ6?4#+>ma@x xk#~r`t،%R lW=%LHAKdkWuMU5;ER=Xr$Ck={V>Mz~1Uڞx0zv;NxVZL_pf趆n͊dbh3}uљ*J Xp۞Z>'j6\BƷK:[^9! bGSF./t0QHgI9nx34qj+oPL-vBZM&m0A27GHĘՙU3J LB<u1*{i"i4o*P_貉-2 ~?'_1=̬="wanisl,ugLk9@ʴtTIW!DgKE@~,9 PӳzRdS]~46TCl]EFC?>,=6l翁VzzW҃ŅopM ?Yy좴nQ\P᫟&aAD=\sO\/ks}qW忹175d?O>x6bA_n.`"b5mE`:xQMØPY_K#1?bhKiD:4 }IN{SqR1V)yޠU#DMۆ@hb7Iy Ѧ1F`i0'@KiJɜ3ҷAL@^&]j8+S() hmI72rt0IEI!!$q>$·rgk48Il;lguJ>ڨ\Kh18c$&~tFK'޼ l'Z1 ~V-Կ??ſ݈#xr\["rLikW F|08Hl9jU!ޫ""./ↇ4/7y ˟gz]C JPy{vŪf`xdض;nW]:q9zwsq2}\sG}7q672 ӯR9zyBQ*"1} @#DTѺZ;0 n6Mgs 7}8'ӇGP.4V/' XK - i)ι%CnI%bג)P(G5L0e6M=1G<\-+!-,L4= "l@R=hl|xYHg4Ȱ"eꫂj;k?F-*h*TTӮ\FV;FUg!Rvj]rB.:TJ IkgDƝmQ:N~(Ip[Ä1MpG4ڮ_ЊgZ Sy&QnK3ajecVXF[㐖T{%6]G4qiL '4cBР4g &ϬW\*ayB寒&KDtTh]FǰLY`"^6cHc8NDŽHX9j[)DeZyCʽ%ZC*SDی!җ$>kR ГrCMJJdJ "I)[G$ xgl0՚T %̫oN";f:l*q$6 `%ަq81^* mvmc%Z{Kn-{vےS.W32G(7So08F3`BU rECif|:9rp͉r< o Q%qę^.{r̪C'1iI!FL !HCR9RIRD#:b} )]pp~jDرF͂ +A#Cn+0kRU}G\\dL9 Wo]xAT ) j/Dq`%@huex 1A%G K,P 7-w>q_>;g 1ѿfoC1r~aw;qlIOūݾ_c->HNJ@/ ],co e\~V uQ0wO3pQ /'=YIO4%VⓆ$䙋hLݔ ޒnnDJTkT5NkAܨ- ) d5,f,8z<.vݚEy/0,wWWW],/%hcʁTd33wW~fq2}y].0-h"E}rd6y~OG$24+c @F9ˆy T&96x-7yd ;0:TT. 5S8QN^j| )ekcpe^I/dhY ҉\uG,K \iMmzQdۇ罭'r?x^%ۂba387֏6c15ΰ[ʘibʪl3Gωs[Gһ3Di؝\)O$Is+']-Ua+JpxacbL ,]hbD(ټG奖 `Pado]?O?OCs3q"3 dx]H;>r2+hW'N)\qwNG~1@[};aOOٿ$AR‡AR=AJg@n`9؞mYd\YwsΪ΃JXJnۻcb1R*܎[W] M 7wQhY-5wq("' \uа ܺ~X,O\G=.>h^R8ު4D/Z*%-)\%=Qw}gk{*v+%QTdg[rPgK-^jPS%;MJj #^35 DZ;l pY(b橙%Adԏ鶇@v24/I q){ٜ{Ւ,T:jXj¼y¼2QeQlTfLY\7T٣3>کT b SRQvM K x|2^Mv{ ]^Jޘƴ_eKVX#0J(IEʥ*^9~voLJ]]2lLf"aP?؊;uɊ6S9svmMo`E%cLT{+VVz Tm(3PC1Pm@ {4N@Z{C: \xd(.yFc&aO?AMdo)eK4QVo>eÂW HZGUbCIAo,#k/vS(E6&owBaֿ~_wg].$;9= 1ٻѤ KS۹oܛ]I<}3beϕpXx{e#Y)Se#$(X`h ҌJYX)oIZr;ԴM0s꘵%]a,|w]ú|^^U58~@%uRqλ$0'xw\dţ=$>K~ ױj|ŜeMs烣5fshgw=Ѿ /nS%A ԍ/a絳 dJmr)ԟovU7d0Ueٌp<@Evlv W gYβ2ՕaI p+LPʀ"sV%tJ0I;c˔Qo r Eܜ߬NrEhE項 W|] ֆ:%Vy/EQ$ׄ"Y#G$sG""}84h}Ђhؿ`Sb+u bsC}+P+\|RJDhNE:(mN@lt64c}M1ZO|0#]o]M.wS=8m LV&"scɥZ~WQzfU4@%e.@:f"NlDwi|H SBK02Q*c=XSPd M)PKT;m0HOf$04_6QHi1DZaaO-zQ_ɹ2nmׂy zoɱ!=e [o%k/6/s^Eo^׫u*ߌIiv7c{I/1{Y F':6(=Pr2p5}Nq-iǰhV曢X=0EwjlHPT)U&?d#JXXz~6_ƏصB|Cn/|<((:=gA Bc$ GA$)FiI G# ) Wf`tDe)C`FpQ{Ou2NJj結ޙ߸_%rN]@nK;+#N'?'Gv]J|Qm['>9 ,趧VxLVjo ~_O=_)@+JSPho֕*?qr:Kn'C;ib@u P1&S $Fo(jj?oGk8ڤVh=潢b704kRKzؚ5qk9-v{aχ{M;N@VTqaU&D}?/ 5\-stwERJREc1W:B<XPmt+NUoKZ#"i`vJۣ=k*Ծ-mWV LL6[Ab].&<" ^;+Rja@Oʯe6Zݺ2db`1ܚicxu"=:aq|ƝI#ruݙv$/PdqSF8[-j1ٖ}JEƶv|n/mА֛d6mD-FX;A]NXo $>Gٞ?LKO9:TzeBp V&~ eY`ݟֳ|zZgiAx,$j#RsL&JˤRd%QH ANkV>|{k/b.GGR%xG9:NA72 12ؕQ]- bƒꪒ a](]B[3'BDV$ŏ LZִM;iJ.R.Z M8 7Nx 91;|q%[ O$d!#*?n, $V|Bh^Ƥ2?FEֹk` M,Q4=- rP'>qIb nMmw2UKv:ro[hYXtA#Ă6ʥ e6aх+"=2j$s+r )ɐaM2(?>PWnfbe6Flfp}{s>9o8,ړ"Q8H[ioEq$Kvr=]O3ڧ%+Ο}y c#qh"I *$#KVX|dHΆ\(nKEG^qFbktb1 )jc8<f9+ .!(s^T )ieFRKRPGナ&2l^Q#Ra⧝``r0HMzC#&41-,Uň $iVfJ@K?d0$@liwۓ7€S¾\zVpL͒EhDM0&%DTh0u_[ɉ^]4DHIQaA=1j&#SL͖ ÉU:`t$Ly[1mutv>QJE㯻|qu4i=wN$\b*um&nObyK ' @Ъ~o ~结AO:n4|<'D>~xpo15 lN#;}>zr h|zy=nJ R2 x5ʩ!2J'SqMOŶĥ~Y{ĒزI4e$3X͌t[ﲉ>DЁ3If,FlM\d0IP8 yf@E ej!n !ys@D[Tq;lDP`\*Li6`JO&"(sYg P۸F4i!hyq4&"uU^8?فu .0; U@'kNNtpJTh>ޓaQN(tbj=KgY,A.V3.DH cH9.DrJ%^YO!di4 $QECSH$q:(&o*m\Tp$A vm# '"%H/j~sM ^sYO'&`]mh]z K< ηbW8\,75Bp\DdY_YoTœ ]P=6Xc(fۋHV h4%"G*󎮀&!>:`h d)Foa{rj g SLmu0+)kCfQ;=nlQPv7As0.ua{hJ!n\YW?"xdgwrB=;ƃM(|܈s|0 hŹF+cSӍQІ2:ucJs޶q՟6A*aSW+9*Y}[Bv(P;+πҗqbuDŽjVxxKᇑЙʡ+.YѴKE(iU]W 9Njy{fْ<9= _2T>^~tk1k;uLגC\d;5ѾHӣ%s%eMKV}Ҍnzњ/]&ъ7Z,z 4q"5~3)g98^qI$+F z]~В@R:ATa ERjrfk@rSwWursJ ^jm7z]cŎmM'%ʓvsQ&r)eL3G zMb [[ޑjCD$Q(Cx/+ (}zzGj;v3yXY:J@ӭRv( 6$R֢E=[!}˚>D]V O8 ]S4l<]IMfǞANqLs9;6_k8KC¥=Ѻ6|JMhѕM28btâ26TN3_q0 *UY`j諀Ik[ɋFnY~Pz94{[kbÞ{MczS{`1GWY>wGt6I>sevE@ XWޢBvkxtez?P]uK*Cu ITKocm-#n(mhJWU!F7Y0mv6rQ],UG!W'&VKȼh)\+B u? a;o +)養TFF]qt*]iճ[~:G>x;-EY>1q/[3O\fҺ0TEZMT|[N d!da(?̪FE΋lFA[ W>xRἻmB" y6nSeJv3m5 e{RqLoSs t#FO/ |j9~c/d"P:jۮxg}&{$t[H[\3ڋ{Q '-J32doȹlRͽD0ʃx6><'+EE>W8$^q#<-/*T[v!}vHCGP`ڋZ 5ެx .f;` iiڎVVY$cZ|m>M1Ȝ_=3gF8/_ n4;nngo&UCD˚ՉSe~~widIfAQF F2O)Ӓ}je~:;( vR8r:9hn{նI'U[Nd\o~xQМH<"sk\e/8~`L]'!ӦcS!Jc>`c81 3x49q4"gv^q|[=0v<qOwoo3Wȵt1oSZy=[Ng\r;Kx~3h?|yQaȕv5bgc`2ZpϦҰy0 < _s4Suv~zzH;m'``p˲Yb`J ~ߎńebuao?ɠ~ ͧ['ok.=qo%!xEljF񎳆?@rVڛß3Кӣ]-Dd+s泵[M0_xsnl_B *?dw/f-/ p77Ep~xT0XQ㓇s3nYmhv2sqX Y(ȮY,F-[hx $e!sbTO+BQ:n&ɖAHMeno7a-oզ7„`19i)898':٣丑BtĐ2*j{'c*׮-Q*19EY J?jk2i^$ȴcۯh&87;?U:s:~2 &<):crDd^ĽIGF֛P"EvJYvaϡO>Ҭd'gwcI Zo'LzmcOJdN>kNVo{9Qָq@h+ HUv k%/ys{|w(N)bNmg<}\ śYl)6]׫6H~np&^ PgmPH1l)W'R¹ \ʳK ׃j!v )mUҲ Vn\Iu,Y"Yod6&a:\ I"!V'   l6}Qͦ/?X ԨK.&+s^BJ3F(k# 8S(8 (KdDK CSF-ꌉIS <(n$ץ4,:j1:'J_hCXL!fhf, KyX@XL)R69ʲ !JT\\"1¾"1~<%QxIpyJE$Lj,SG'GC7K|~cqh8J^xCUѤ4G^pl4 T:ABBS -a>IjGC` OـhLxx]QA 3򻭗^OɅKO3Btǽm-5cD 0]΃V%)1rxڃbA)=]5mkKiicd,8 \ Eв*}|bN |S5 0 ."m(9?jN5Z@nS )*[ܦ4mR@v>Hbݒ\}X]倜 3`jm%Q?x9Rz?ٛ)fg~u.Q" 7km&S^c'f@{_=[`LL5oj+qVe:4qʤ_tFkR5]kKcd Iܔ:ŗg/hta~ Cߑ;)^]n6m@%0*ryTrY#쨶$RJt-ɔ71`Q"<+똭"9avcՐSGֆS2+qZ7![>氈O~s~DE̯ 彚E=9แ k?戀Os$g~i` ~Kbi߿-ڰgf[Րtnf]m5DuKZC8?6>y@vOj6ҕa~LF}JFWve(d jEm{KjoUYP@@˨ X)m8{1% }VQuXb dMjͦ>7^Nv3=Мjsq: > GC*&8hNR{PI=&3XRfO}Q{cJSڸU6xjMߙM)2N k I{6@ 5F@șzx7䁅x71gb_ƨXL $@/>E/6 tsG![mc,Gfp)=Il<߇9^?fN;:a,S0DV$DT$c0)`l>L㦄F6º*(ɤStװ=x1C.)ծ 8jJ,̶~d;oa^Å]X}j.LO8a 85\Jx4>4BEG- 4VrΉOyK?57(]}{W>S0:2J#tŧ;\166HyH޽A7L2'n=k^dcguTIQ#TxwTn 穽mr=1m< V!, l<yz9%b@nG I4 ]i[FEx:}h,:ѡގR!?h2h:{ϳQAI#UC+~:\Xž4<$خ<88EisGͥ+jѬo&|l ruOW/jՃJG[^YS>.vݻ+}y KS SSdUӾ9 OfvY ÁJ O>)f)SA3CTe(NP gحzrKu(#FR|$\ DU]ߟ/%!SD'0Qh\:fS{3ͧߒpXUqx` #ؤTױO8 tApSjZ&M%DiƠLXp 8Lѣˆ.5G5cp E- ,b՛_>-q+r&可_w$զXI xVws5;ocf"UGdcE2-J[ofw-!WRo&;98Q^(9Hm"091d^uH#(!К/BXk喻c!l9'oEV0%%H}L)4g4 ^.1Å @喰DkEA3OW~3hdyxB EI/.vlF!:O0_z8m9Gm!maf &_??DW^ /z cdeG_ }v:O5~YH͑C(nT!RH2֙)ō 23VVSS8O)bk #5+/G-P~Q͏i:8s3޶4,"΃&IXI %$2Cm>KmțKݸDF VVI7OBhw,܆-Ne%Ltb4B#$}Ѣ:u=ꥍ֘EN;$dS "gKN+!WVᩲ7c?ӧQȾ=U/tygChSNB̎,0:}cLO :SYRD ;c]Ti9H޿}c_y.ba|_c(Jzgir|q_g-y=k }XKݒ9[|}gE|~]$5|X7B:'sZbc8bZsϧmql+9 4LIdC_/:|gܜ=rrAA &WRJWM\ nhO?!碇qJ0TY=Dv|cAȽ˚½W;%0 (җE1ċjz1L./g1K)SC c d0?sDA6 (%dfJ|hu451%)&\{|pjGCr. +; QCJAEm|Kf E!d9⹔F} ]^. 7  g㑓΃qc[_{;|E;|&&3nD1Ą@j%@aƔ2.0: H H))ӏ]?t:ɍY/ˁi8@rg6ӊ85ݬ?2/?X)&.ܝ~Շggڛ5ɍb⒭?!.aa"fQk0{4a =fhž nf:`xCuZ=VX6 s@ >\0nڬ``>[M|%sr1v1JÌGdըҫ!]/F7˭>[=LJdpכ~=GC؄*3J:fX:UrK~esv/Bp|w,\nfaUw > g.`%M &G6o8v~s[M_]t e| C1֍![%laj-6iV%V6baJqjˌJ3[k )$=$!&[a)_FFI-n]@ Y T9$0k˜Ȳb4T#f1Rb>p p#=;#i3"-( v*Xa" `u 8Ny;\(w֟tz>]zkҍG'lVo kB_qxRPPP51wnNykJ{l&{5Ҫ8kpdPB SLTya_7萔A"!GR8y쁔"S\þMPp%۠1+:1$.ꬳm +ۖ-wA wB îOM2#n"d P$)8#,3ZC ? 1]8y]id* _lV#;wpQ)O?#$$jeomZ~B^OcvٳbS~IATԈ߱1xDH#%C0Iߥ='"ߙABϱ̀?G[PWk O\4}<{Y +ǪN 9Υ|VYI(=11Ng#}j$M=}ݎWۃˏw.2rKǸc>)f0y),hkg'{%uģHV%TRj;ÄBVJ}`"Y 뻒T;2iKNH#*7jli\}?JAa=>e71BR jA@P^jCĐ$u BpSfcV4|6qS-*cBa wmmHy,GxyM=E?5^o;OQ#Kxt.:r;VUX$>:ԏs;F,QLŔ ^%r-{YM:kxW9LޣYT.^ "p(FUyd'E{b ϩHH%M&`ev` lrMk*l-C3F&$}ua قBK rB22Ja?,g #à 9$"mY3) +l< #h @y2Yl hFy+<#PޯV ߾/I4%c׻T_{ݦ'^okJH`tͷb% YlG?N^MP4M*O_&tx>kis?ëEYo.//>x~˛coV߹t7?je }/AhFo/>+O%=;v6j%BWyXsxP1RQDdž]6F-pنPf)WH3&J]L(I45d vAx@1yE {#:u^(=4g$2JHHtE㕍M!5- yEl kD\N n~- yCs($h$ Vޔn(*Kd:0tkZy^ydFn=ZOYrD1A })~kh;Հ*ZU;%Hv@kv%5/u Ajj.χx Gf4fP,`fg & o + rM$XG"+BM .ġ;υ>R&׀Tq@EH:;_B#E|t:.>]JVD.9 8)<6yF=]V8C{>uOuU9drl=DG7#zpMHX XV=pK a8HIȸNGy*[UXUY17:P5ӓb DIee;F5zskT.%MzkGH)!9:ԃs_O 0I7FbZ) ˒[s>ٲyjP.T_-.>/D@"8# 28섡jaٽ{׫sw*{K~,[;i/N⡎r%fPЎ䇟Յ>λ봆BJylȈAu;~ok(#άC$[ߐ׷ MT=֖;ח|{Ɉboʦv:/%7>GpGFru7-jG6yVw[ˇ\ʾ v @r&^Tk xj?1@s&d[ѻNHgIz-bC]/Ӑ Y EO|URS}ؑj|׀z4&̗` f$_oŷр#oAŻ< A30/36P.Za[IL̉;7mF|UU 2k&zB ڌ\/W7ΊrV(obe¬""O7)Vnw6bcoغr_WQ.,9OLIbfvY^t$ieysWaY_/z(߮"oW*.FF;7S륿_|\\/?x4mjfNE!rVZL/"SvJ*G}Wws{Ftꋇ椫'Z8kBh*4|VӨ΄@mi!XGYˍC)6x4 J3V!j< ,cV I1NZGf V3Gp`6\]4o$7nu{ZaEXC29Xh%#V ꌍL/CE ;yu|mOU7íޞ{OT6a'=BV&%9ʂ$9 . %+Gţ ,URnLs^srx74:&iU@1fGHs*fYdj!bx" c[O¹3+Zw^':pr518Zea2Ӆ^@hb(\K(S$6gF1ũOw>Қy4LOwt9G`mj; N9EMq񈴥nfzx+D W:' DW h=7jZۧ'Wn1Q3g0e:~frյ_>祭Jzےab .O3ʢ GkѻX1`2E>* 'Ǚ4 U6Pܶчt X+;h ޽9skSb%I\@l1 Hb#TpJU{#Mfɀy6@Da*w&'Ͳa UNq9lf0Ay4 Rk$5H\3Ȟs9 oL;9ĈK*CE."/59NjnupG 3F"|Doxbi0*1 UV[)M=6d8@~3#{1Zͳ$0HTe ȜKZzy!Y-:@4\J ].l`d:yh.8IuaBKѺ䡛@A@䫍WLd(zq.(ì. }u2@f)HӍ[ " )3ƍFGast>M@)J&㥦pc__wgjh/atZuR7I\Lg(tQ*h|/MpQ:G!Gi8Ago8)f*Fc7蠵4{ޱQ#hT4.+ aUx [Q%点Fл GD+%oLϧcyP <ɮlP;uPJ:,LV%<+6Q)x>' dSO\jmIv`copV'gE 1pB㢬 Y V2itaY->i4(EIef8Wr پӍj͵A*nbThf- t֩J3wNz D=zd4Z4UpoMrXFpT9ًO!6 X=fqW.bJ7D?` G @,7K$4, uVj*}\}Xl_]4_޼~]ғʑk%KP>rTR5f.S6&fmկLS?7on="ST9B(S=9&!-תP4ܠ>bY3(J{P-i+( (8\D[L0{P#'M@SOj_n.EXŐO,GQ(\ +l;%}H[2hHiA{1R55 o:u%KrIEڇ#x+y C7u81G=d(!t0eD{?z@LTUJ .2Z@Z)IfhVQ+U4 IԒ&jU&@رLoHZW\n ?QRYUՒ ߳U/M5ώYDmiNƜ9 gSRxStKr%UIZ.[ҡTm:Nܽ>RV=Xb 쑉}c65x-40gE5󎻳6Z'Iuot\3igB:U ,1fFTHUl#Ur=v3כMs+;kO:sLs6t^b6^P 3V{p:5 =AC)nt܃/=o\{F5#yz/5v(فaM.o>~fErɀA$_fɭ?|夾gzrz%ԞR(vt+,lj n{ٟr-QA&cLY0Hȍ#~F"Ba=#OkOaٵh :ɉ{lcO@N|YGt% zɔQNT^A, *ȡIJRAu;Vw[2)3(1߰V}J$;U_[RR>^ڛ[8UaZ{ER]AW5~5t} Zd3pY;z)u#iF7Oܷ2SkiC2%AZo.RjyyZ۰e(*cHil82v`R#p -daܛt2 2ܡR>׆TNVaY4 Mjٙo?b }b߹9g4>Ⅎ I+˫犸t?CϨ;z}za['Y]q'4aAz_"Xcv 0ks ZgM"zD2גַ.%zvZyV *'$T9Puii=aj2F%GO&qfq%sZ`R거}Bƞ]B$Dw{-CѿocT_0ĚLu{j_a'ZY-&hFf%[ \xi"!T:^ ~KHmRrŏv T\݄kX̫Z]^_^t&e֛i$_][o:+F^aY,lv_fqsf/(^6$v&m\[lM(:3@eYUvﳓ}Ƒ1E.ȎkZƩhm4%C -kv pz/; oؿ\jcƾ/Uwy(]ܼ|7@u#0/[{lJ^#Rjn|_^`e(V͎w<%j+z|m9hmh6pTE1jQkT)fOa2fx;r̳$nFh!Gv*/#1E>_Nϑn뷻/mεoo_oaAV<Y@fhLW@Q|<6`7fW]כR j?+z$g<[On}w')N|:U3_]\ݡK+O<)4Bka 17(E-//k *;s))ϬgF4R}l*T0ny Ef\ǟ'\> BNԶ+BJ SPzi}W3Oų*G7)a7{PW 0^k6Bc c: N[b7ݢ# :9tݩJjSaBJTN4p*guJkYP$L~1V -*= pBȹTq! +{!4cf}ZRZKB2j-C4E:mD+մv% 5im2[klZZּZPXEiږ,U'Y`! )")6YR!Flp!9.^ԅƸ $[[&UpB)PdB58X^yDJZQ嵕$ \՜;e{j;eP턍BNvf/,n9 [,+2ߩ۵zɾ%+$'-TxZ BNoߠܴQXbbI[I7'&4m4L߮?ȧu %VTX(5:^4Vz{H RE3uc-7B#P%KadZ3OY4gs (I1dsm5PDO/{Ꝇ|b2_OE3XdEa0+ p Y r*j_dQUDՈ4]=wB{^ kP\Ol!zbj $ꀄ ) {O ҄$pvoV yUPd 33%,Q%޾A L EE ͦH3?rR2kT.PYNK|Yb˿2,1\ r@ *VS￾T!@U#@PT+*ePJR,b%Mq)5$fh 8/[/|,?*aׁNRM8@)s:[v9 4t6 *x=߇ =qc6-Rċ~1O{ᓣ<eRFȧ-CL]pvQ*1aD9 9*>V%L ]c䳑ru"!7fjRCD܏OܑolwE"~|>?O=L[ddƮK\8AYճ<>15x|1׉u@SQ?]M/ާۉV֐wqӳ+_J-/h9No!G%Mw֓-ޑV.fnT`ϣkGU(MaO[sg\xJp,ϒM ³$sσpI1A6f~9"kX#|8_9{|>O022ٚFbX_k тZ1n\c"^im-Pd)*ӻ2iѹ^F i)p XޚaQEvwGE#oC粭$d/w;db3꤆#]ҥ?δj=̤23G_fu+(RF [GNGȏ%m&9O#$c$ ;RRct=޵JZ=c/CҌGb6B7jlqY Wd^:F@2۴k)B#kwzYi6D 4)A:83]> Fؑ Ec%/1Q9 \3!Ðd?;iZ|u.׿=⒒^?|L͓'oGradoN㧛W)Χo^WVּݧ )}_.b%27Knֹ]7WioǀۋL/kӢ"߾AO5%- 26Hֹ!66)Kֽޑ[\|"Miy $7T"bh0\#t "J7F+@$>)$9%ۂtε-លȹQsS;R`:]D#@]^əA`J4+p)(4\)A#RbɐX(F VUl‡z^5l4HW 5,5w,*MU %K`GEoMCL闉t)6G ;ޱdȻ ۱:b*8g Rh)z/WG}/&},6#+z$l!)1:w0pjN(%m Sĵ Sh˂zYڪ>&yBE;N8o!'3;="Ia!2'qd3 `B+pl **@t D)75M$[A|`[{&GƺG<, ; 4z'.Ama}>:Bj"hEnTz4ﶤQ ~㖳 ;u lCs u7F]51L%tܜOSE6ܪ8d= )8B>}\_o>Qa:{AL-%eiEL^ӹ7uuXd)l%G&=S1\dVLHL(|d!r=#`ddf(SxM>,_q7Wkr]urO(97i?}=)D=T$ՊW8>Qƪf5&%ҀuD2c[G+bD19K52`1WJVpNBPwG{,B|/ExWLe`߆F7"1j woķ^$ NhК]]b>*E*\aֳ?%-Xˡb" 3𘐖&B ,p"_zF (dmڳ9 `m>h?: $X! 勷V J|@)<_tЪsRm;'^[.(<׺Pb^xXT-4a-gj{ܸ_Kp;ĻXm~a&eiVq^[liF[VFHǞvw"YEV=$!i#ZAOMxm5 %o(JSm'; Ov*OZH(;l}F88djhRJJ%:,:j+HK y mrv$43#$vrĄ#:-R-A-7#>h 2LI%w΄ĥA3HQjEꨠ̉T{|E4IeǨk+T%ng ʂd9\0M KH%2[M7mZ]֒Ky@ {|gy?t*4 ?M݋sIYr~/1&7 3#+/uxJ!T Sj# e}CYz7,ei 6`gUHP$`?b@(8/5P3ܭ<7z9Y4Zy%G~a i0Sl~0k'@jN(*). 'Xs ̡g$-)Z0wJ8K;RF8zDט!4Z~I'Uz%t ь!m%ܱ]0]ΟS+x! .IHq)P tXuEp*GBo~Xg q a~O(= dp # G02V0)/k/qCp=I~3 IԺ}I}CvP- k\ ٳ7EYɞEKNnK^B%-zCՓ҂7fϝv"_%AIW? fw2QrisWؒR VJ|)1,'Q-vVrdPpAae=N.vE w.]4Pi2/rY!2TkA\Deh HB@B P-va>~3n'cZKZ/z$!۝i>ۮ hM7iР קE/!%R7ӕϥ <}O˷in'Od D%L*u5BXn}5^0x?O*zpR@IpZ>~Տ;Gis$76ڤ+YVB;Fm1ai73_|h=ױ =я5;ZUCVyAӪ: UTJTqFn2ʊсA6dgRhqkȅZjBf9~쾚 %lo54'˽{ 9o `gT;ɵffw,HC}z%"GZX^Gt—"u7^fww5%Nr=Nz Ι@[!02ДQaW$7΁d>]sT"T]Kd y+~AepK~)t [>č<~hчG_N|\:9nf˄KY`<]&i%pԇoEQp|S!R@^$+p[[wk#Lӿ^bk=߮Wj, rw)gS-wgiq𫼪5?^Dwˍ;PKn,<~Gb[aϼNn1fjVz} nZ Av^fĉNs0$/.IR'ur .<&,Xq[.#bDk!> >?(y zhA9ܕ\V]a3F)vf\nkO 7rZ"+#[U(sN xܘ-ndN{O=7S2RlK#5Sx]F$U6:8 Te.Sp a!-h erGTRƒt{絏`0Nϫr}yj0u]a\|1zryo yW5 3ϟy~xTZ>>k'^gYN,f3JPN&EzV@y>נY/kglPِeD2ۡJcI-BEbq: egǯp6 vݎ/zbJݑ$ m.RIkԿJz 7 GBMeAemE l]mRUueɃ7OyTo3j{='}ޑ(1pV/ܴ{;R"~Jp:촮}T+0YyQ墇sWԫJe-gjC5Ktwu/u˧W!iߪ2$:ǥ 9VATqEw='~{ðJi(bx* + V<3lz# x\m p}*VwWÕpҔg+<@ZY%u@a^GGmV*DㅚǴqxpy%䵢# IL5ZR%[_nm \ dgi|TĄ@(xl4lȔ7Ȇ(TQ)Un0Ǖ=j'c_ҥɽDi/]k= -]yɾYEFFMyZcTsߖr~py|~ ErQ~m@'e#V~]iB]K*Ϯr*X- J :4@уE1(J-EMo:Bg_h0p_FR:¯=n3yˍ: 5F~T*`P<>dCocL><:et_am)>,gY1ݼ+^6},aS'AaQaZ͏_'iyP1,vk 1mȼ%b? Y"4#1#doxsvqe>v, h >ϴF.!530Ts˲a 55ZI`o󭰈oseX ?qŻ[MnCůe Ih|jK;ؾ+$8/܉T Wl;mK#@Z~\%peք70w w4 u/>3SJdⲮs"3Z*!}$R R:Qw$ )vBhqeYa 2g4S gzqE˝e&6CKT0S#M߽%B3rf吁 MPy'(֛7"LIiK9Mt{N)5CsRX%< DWL{!(85XBUo Q0"(OZIi bGŋ\2AX2ӌ)gc^YJYu|ӑ!֘#ƸV.5@$]uՈf >k9Hu0AsViVp l}zd ?%+T?[R{_qZ-cԉƨGr |3 sN3r68d`AR &5%JJt}(ì6u \lѽYΙSX(pnI ,Mj¤! v:C ]tahԯFZEB}j7M'JGPᾯfI8+(|Ig@s0U/ʕXeY_b)O  @,Dⱘkz{bi؈)8TSqdCY|xk [R<y>FD+ʻ >+F\ϜMg{j2u LŘsv,b\Q#->F=".A9$v4 ɨ^,Y7 .^M@ֽtZ ޱ`!r cJ%FF j gGtW +%`IˁC>]WdIWcI&`'};1r 1Rk"DN#"VVaznLյ"FV&"y"L63Xb."0< 5rƒkl\1‚=),:lF6穑kU?$>|_š F0cH$D9A"2DR5#**lGO ਁvF{:nh֬͠h!S#IE ;aZ4p+lNk =ԌPY録6@j#Fx!x%xRqpC@"&̢(/ZQE҂u0FƂĀp.D9ґ rΊюwvnB{6P@,U˙9z:QI;Q QVBXickA:O¬c•`J)<ຯ'KSHěc?R{َ=T.1:Ky4t-|NDJDjgqHZpkw*X_/F2G埯?)M *q")3OՏ#{3TybYbYbYbYtr52 @HI,:K5H'J^=wfw)rC Q0-bKM:A ^MWUo>E'0b7,@0e^s06L|M= _>%E'sFs48H"赣#ƌy BXQ+MIr0G6rhXbjY@CJӤBݱIFȈX()Ý4fW#O~cqSs4Uτ|Z WRw@qi=Ap{])Xcl#e֘ fxEŚGm=X'lbN)3H Ŏ:ԞŚ-ᓧ%gSY5 }H!ޯUy%nHy<R_R>\_' q&GRCpK >9i&{ hgtN|ǙTjr6Y(gRHK-Ǡ%rjFÄ 2JtPn=RG焉)3:nEEw/o-]JA)w=sBʥ,R&T$cOׅagiw#YG(ф+#+hjJnDaN`fTrg1y;Rm4 v aOq_nss Lұ&dm_uI ~ q:IaO s~eUE˿m2dc )˸ IǠ *uDg@CHg7\"Ah5Q2W=nQ!P8vZ0 rqPɓŕPvjfqt11 Rb}eE&XA\Y#[6"kރJ_P)J)wO7;s.3i(NJ!%FDA4Y#P3AhTH픒 "L0Oh *"Ee#@)Yǽj*( -q#j1*-eHTQ`Նxn\qɱfb{׭ |mU6ZYc-7P ǒkrP^ӭ8{n1iPP,֊!J,'" n֬},a{ɓ $-ET" U_;Jn/H5*Kd2MJh LybJ &:u2k&s@7mYObCuQNp)8U!β տ4Kt >NRwg>r܁~s!a`ű3j A ЧtfA-8 '|~:LLlivO,O$epAząNmUr9gIf`_2o5qeM'd,kPo*&H%hT S=F] o5W .6 7IGM^LN5L͋N3xdk&흿n[ŋbWp SNuPCјS(A20[^^<^ YqKH.U~~dVX}vX8>2mYuFL_^}ܞS?t7-sDz%e3O]f<|w_=>y?\~o607d ҔGu /݇Er')& /Cr^0R AS%-]\;wӽv0[t+RZq(q'/7띋djIA-̽+ 8ۂ{#2}z.(;we4$T_@vAhHjNv&! 3}e*]7qn [.Ni5BVK,ݔX \7fDN]d4B@(ɎC ?M%wPWYR[|V :f_s.n-7sw>k\l$+"MMjRN;h)[3[hL)DћbjRN;hCAB{ng3[Lej#fy 䬰,t6h.u,+޶(mW\`_>ѵ^}Ơ2)# =osX#0Ο~v%H|= I֡(| Ԓ/AR&x+ 洞,Vh5ˏ)m[ϼJFtHr䗵 >F2[^=eJϳiY7ބ[ r檼aa"`8S3j)Tlӽo`t2G5=;R IT居̨hL'x>=F<|RN;h5޴[yLօ|"Z$S9+T3! iSFhO/zn>Zh{")cyhA zݶA8Ļp DO=S:~Sfe嬰5Mȧ\ QAaze#,0b9 ȼ[m)j@X-6=91\)m81Q\h#PL@9q>kulX'r Sk5M; )xq:O*8Nޏ.G/ga%(u4䪦d!H.j&Wuÿ%na>>y9F>T/KT- lM*b ŇT_Q’Z+C+NX"!UW',Q,TqOȒQP:W(g[rǔ c¦-~ E=4iڷǁ +[:Od^J~69I}ӫs_{KF@Qmv #fa"I6dEZ6}U6}bD W?l^\>7Y,/]1QGk1@9UwL糺'2YkpDNA{9JUuPXm6Ki#2i9eʦD\PXpLpBP N Raâ^] ;Y&v(%4?I.g&;8PBXydթ)EU4s sÌFB ' wȘ`Y׀O8 !EX8 Honb`"Ωs`{/,E!QDj1Z1;ZcԂOWu.F8xfOvL[I{*?C+j &S caH)4 Jh,a,qg9,]~\工n|˧fa%,|˛ZꔌijET}^W/W EEnwPE/O1BxT}!ld뛪ϤÔ1'*/B62 ϹͿ_]U )cLLk4\-i%۾0*޲D d>]wۢS x&߂M:3ӁSx%P[Eq1SN0G"џk4!9y=7͖=FiΠ$Ф>۵H W=wW}]z}<;c%oGѩ,xxď8㱵鑷D -Q|N%w&`Z)o&):B[ }F?-azp5LhíYGsc>_?];]TTR!^ v08] +V n;}7هycLQm VrX얲9I>{TX5Gy7 Dݠ6W+otη-.-)#TcS p ޺ '%/ߤyzJa_ wtd!iDHL#$-&\))TpY4H!x?{WƑB_{ G%fY5P(,aPwb(}eTҽF)y(e2Y%&N1OP7J!CE~2=~E5yRAP*pk*gM~R5Y\S6X̺iGh 00y"j2kS-7W!q ^ \m,h9=8цr^<ZS;&&e Q"| /xU`La@G9OXyђy)5yJJAH~((lx Y*3U9&9+j{a {&Pْ8)PACI4,!V%Op &@F0ϸNosӍj#!"Gɤڌ:?h1rt^B@0eD.@E x0P"Rq!d-UT1‰IOzg`Rhc dZK:kEI(w%#a\WR 8T;!IR=%Q,Q[X\D zK5d߁ ˴LWI&Jdj*̡~xJN+eRy(Шt@>Tdz|Q'(JzH~gҼո*]Ki^A[Ӈ$F)<*E_ (eҊj䡔;Ǩ>Yj~((<^硴1rRy)QeчF)y(eډ2|'PhJ+`ҽF)y(~1t'PyJ+eF)dz|J!WTSE"Ӗ:pm.^}oڎ<L"2@1 zk4MЉN /@)U8RʹhX'rYR~gzL=3xcƳq鎏wB@ C7Oښ"zo .)s_22θL͘5PjM w%j,XH^ at6ƈhR.qnB׶ W*΁1pe$H G#[#S }; !IeP;P k+H3% |t%5Rz2,Qտap-NwX[[wc!AC`0; A= E!Kɵ XPXB U1R5wق66c`g^/6]'Q(_ݜ n A{c EL.yG4bDy<&"QwEs՛woldߖXX[WIcvmьRj6>LQ M lT@/?5ʽR*wt!AC7Nn÷r[L:3Pzʊ{7bw|Kr V9^OO)<ï|3W>K/s&S8Y!xRƕ0ij5ZlhּcD)C[,hU \)1SR!o}~WF a 4(&MEgY˝O&vFV -qj4AQpJ 5j)8X B(* ՅՔ62?3Jۻsfw; >Ic2 ]%ӳ |cnQ),E?"4!4!4! NykhqJ?ǣ: '[}h/U] yOهlz| G}(i΍hWӑIGn&>Jw9AݣX2MZhNs6hWZ!%sJa%8oފ(\06QlW!$}D ׵vl^|(ԫNRůTByn󒍹&pvvG"^0lZ=UJ!_49OYWhEsWF~zu:ܜgYRL!o_UTiQwhtfoiPb HɅy|ngwa /\f޳0&r32U lZvݟ7o {Y'w: 5Q?)vuur3|I=ǣz$"]T-֩x/(KL9B _rCj?IG6JNY|DN\p2&tF ^\$hͧD&iA-o{tqt ꋙדŋ!S囊ɗlI4StLRpt[v;ؠ'1VT8ߡ3g}D=%M"ҩY5R|A㟢~}CŁLJ⇛Y*Z=h.L7U5oǣ1x\zwћ5mT?Iٞ^j6XbҶ!֩p[0P͵k/zSbx/=뵌3¼j2T53Z 뭺g;O׌َY%6e6oX B)ʷvlѹب4c< Y!d_ a Z PuHӱBT3Ź6j bj bkǽRݩ6ݟ{'FQAOyS~,/GV#S\ξMǟY@j4y[/ztWک;jC2N0vǞNbu;NC"re;]/e.[WYǫfg8k>)!_9D{ak  Xc;w֭ rm. ٘u i݆@W^ڃuSR}ehcݶW9[%m|]T՞E/EE͖4nLAR*xQMPuY,._Tkϊ´`4u8'h`i/R'.q&Fm*R)مm JiCFbTX庶*FjU2$ yKo)-s.xgp` 4k+{PKU:"W7U}t6z ;ƥK#E/G/p+|_>LYvWpTWe4*Rl&YfB d>Ӏ$2C/R2T"@b, +UA t҃oX\#5-+˶l&h!lXW?eip;|M*ɚu- X>Wy\z9~`n@n^av &M<QP:2r")4z8Q}@٫cjj%f71p=XYxN&:J~=H#|0Z%Na0 k%.2qc W`xoIFTZ}, BQc.Gv5x-Jպ+0mE rx%3WbZojp5=X)-x<4Xak]q"b+}} ¨Nש`lzMeu{6MӛwD~(/f=W_ ni6iڜisڜ6Չ)r;7!I'5S(N`X$ Nvn*P-P8`[aB0B|ЈQPYBaEH%V=~& nؘLc[F=s4R#ձ,"HpƗ3.h 20Aj0KTCyqEHLJmNƧsFqZBy8.pE.{V]`6JRETF"x 'c[Em[?]aHa6:~-֘X+_Խ= hqm-5XJ?{׶FdkpؘhǸ{e'mRST{M)x) Ū(hw*s2),]oIؓi/dQ^PbGxEyarwƔ`|۹;͏$:j'" ݋RJBi W,1ZdT:CS}sbkDIR3TZ{˅Ҽt?>\ܤj1\OUX.q:t~# Gk]TQ5xpoLfL {|CfC-Z0̠"yo+29 9?y¿S\ ̒ MPsғUW:sK- DŜJMԓ4nt>d|uȳV<}ejKz \x'd ^GRJF=%%:JŁ pJ^^LNə1$Nԥ]U>na(.m.7+l>ǻ׽a@S_cb9pJ,EX jgТ.H~bEu$CAp&T5YyuUiBB~ Ps+~;Ҹ`>LRK2~~^@Ԗ$ZuMCLv kr>1Ғ *]~䛋=xq;[k/utӲEJ=pVwqQWR1kh,N+!R6HøSA@Ki8 m&B+ŷ^&}wWcS+/VUO/4@4\83d/g]'s/Jm}Oh63ٓdFt~5CPtV63tlPBvIH84%iist۰|KwQ_O% 0k"$u>F K tJE3xTSNtdxS7)B%H=Dwı!GLs[`mdVK(Ƀά&:YogpS/n4D \)o(iuyIJWXx̉/qa2ż UxUd!ѳՔrK˥5@WsW䴰PS )e|<Һ,&grj}!(ʞ.|'`E m4]-:/\G:H2T?eg^+Z˪>L0YOXܘPS/Kx:c+POIo_#XS)m 5{)5S鹱-#GkPS.K I#3+!u2*-Cylch~넡g#z\XJKP N)IY I#q$Z[E5Kz{mJF'E DyY^:z=6KŷJ[JiLz~9ׁl7>*452ъjCH50FJ>E$PB( _u5UMDsIQ S|f_A m1PVexXujLn9q:."Z1D>a ɵtVaJc[eAjÈ`@8_LI sϞ]^Mg5G41PI t=VW3_EN-‘E_8<1J@튆6ݔM0֖sN;)^ 9Vg>sōa <|Lg1 H.inz,R *s!{-,A`?@0kw LF Fh0TS*9Ρl * 7J `ۇVStZ|br*U (;@ I|rG(1G9K:[1FcnWڨr4VG_1;  Jsd;EN:si]ixT぀oQhWXJJvy;CG),(6x܌~Sh?ϨF6TuL[89Y̠a0 ro%B\'7 ]t07؜n=/ JfdlH9L6<]ATbrJT :)uVy:ӦIݹSIzٗV&kӮFyQ؜2f'p> 2 <~ W_JoIBۯj,?RG%!r*2Mf}`(՞tB/|=[)B@*W,}m.r0OIBnJ"f@'d\Ϳr<ӯ c0vlLQv?\GdhZ!i/{ՏnG立&3{Ph[| 64}iFV}7qĩ_gpyeqR2cNi}Ŵ~+gIFxjYkz6u،J@"<Yt)3νVj|''KׯBussѷ1_tǨy{ UQmcu2#R%^[)ǜ( ^[ H42$Q՜`LPږ;nV82w| ;#1]W\]-,}w7kvd\u`81@JJEOR"4'I X!p3Ig }ӗ^SYKo?^ڙt_ JVXiky> 9KY9#[—Ȍօ*k1Siďr}µ?D\ 77OK'>,]KCBb{k* N{LK: pU) JR7ؽ/ ^ʥdū6bU6ߺ7ـnpu:*V]: YZ4T/ߴ$ee7e*~?i)VC01m\IgƝǰxss\D{$P_4Tqi=o3'N:O%rvE~S.1g@̝in,̻cHYϴtRK-gA7(elUf,3c'}L?0Z8c(Vb];?G:Muȕޥ$Ok2ëhf:śE,Eo n~LcӼX7n ‰pQe5At]*AqQ:WНw;4)Ohӵ|B5k$((۝ꔎPRH^)JRn"$Tչ+J9TPz*:mu-9O}imliTy߾뻁2ٸ9:e㒌L*}¥G4?wY#o٠P\~sҟ@W|FQ?UN(4hΩSDR,3S$O~P*}*EyWh#!:AhJyOPc!GP}Eйx/l&X _z&غ~ݏ8W j#^U?|g_U_>}tw-U9~,nw+Q->QUS{Xwٻn%W rEYE6g0 $7)AgK;6^!dHdRgt죣b}ŭO*D ؛H!hz֮{P4Z~8? >>Cu]Z| wmppO(/Qpy- 6&=./{XgIYYЙY9sO殖²t!psr}{{O*}w~u.γw˯~ڱ-{mY~aL4S9nz`8fDUWe?3Tٍmv{02) Cqb8;,}0-f3p0n X}qs7Fѱ~H6aJI\í ~jbQ  .`kA#㽂QVUs- -|$zxUZ,<ᙢ[4\ny[<=)o]R&[ZFC] ~wnN]whHv@ ~d S3ĘƦ8Cظ_zcTkYM/q ,YtuLjRʍ0ΐ$6[/ )hYKv8&b_<=8Ş_Kp(bOTI T1A^Cf8qQ7%-6#J\^ e08Gi5X(R "ӥ#T-^Jm{n#|7m6d9 Ѳ"Z(Y.5qQ;įy/<xήYv]񡅒iV7bv ذoT.hMRD1SPw"5lE6/\`7/{H,9# ߌR >L '5xTOhtMŐn47 1Շ箣z(/Fu@_-\sB -cK%5)B!*sk aɈš]Nw9v\ד_onۋoq.de?x>?\U|7Y]!N%=]`eOِEӫ1vo?`|xҟ?m'I">I"*&-Jph➗}(*ͮ,]\U4ァ*YK@qLhn3{Vsil-^=U"IX.֥QhhqVPMJwI}.,VzVej9 $tgR[ۘ[G+=H+H+Ź?&?;њmcWD<ONe%He9H/<ӆ$2j`2e fDkNu.,Spy+1:o|28n_fG=7DD^lg~+O Wj1nq"nN[6m{~p 20,Qq (;OtS9Mޛ4 +=>Ʃʔ(VcOOEP2x&Wkygzk38@.Dn#ȕ/rG:{-_p[tr_p&Vr_l]pԍ C3L xr&w.¿jt~Բqo{h}kf[ 9Kˀ|]m˚ 96@h4$cZAҸCIKUĞy@f` EPH)a$@Y'voj;2 6cͫn;2̬n;nˍ}V3i#X$Np׳]q .(%3 $] A{Od4B#( 8!_DC7 J=8)Тa(G3Ghth/ ,ݸ4ӽ-c5E/=<0g$71]󏵐_: ћ(N=}rV{WWG]Nv:ͫf"s wn.U9sMLϖ!JeM†]yOr*³gobgAk&/JoLGy{^BC#_/9 ⪲r7}W ;ch0/"rڒRSu lT8(7 pz í^TIqwY 5ͫ9l~ki$6*x\8HizJ4M4Wc6Go6'Wg sܺΒj;nJLYCӜ )G" 5hk ,Uw鲯~7$Aw $s]V**5T2Z!T.HeɎl%I䋼y3*圌 ŒӜ d!˰T+"KΘʜ*/+|WB{!Rvm*rWoA1TE(XTG;QRRKs![|/ .5 |ximhR.o;.fbkWj4МVnbSԹ!;XSIJ.x{=ز 1:)R@,D%"jdr:D(9j]LIȐ yaAzQf‚^5 U9۞{ mtT}Cו[XxkWyn밑\|Wwq^?[[uR[jREo 4 tPcb#U$Ib8F AVW/ UX=zdDFA[!̡T8'1fα)Wffe9l^wN&B T'×Ih' -XFPPm=Ǎ.>58j] q8b`C"=r1vRT `ǨM 'Γ7 siBэUl %A-vc%1!ĕKgRp)%:-D!e pEe4%PsB5:kZXh.2ܢN LHeFJ)s)-F%>kxS)1g CZx?f[E0"hb ԋkbA$bo\hŊ-5 5+ 3o%34X@+BW>=xKPZP^v7Yz4[R5\iC\ⷞ[i*@Pĭ&t%[!NKRk}6KFrVj,'A l0p _ۨ\ j_$:Y T7^\2A,^K@ñ|qRՀVh3+ i|$D(5%D Z|Dc g#W"Q9I?Ƒ8m1iG6ޖGs~:-^8Gjp]FĮ8SFEDjUOs'/GhHxʣ6@5O{-ªQ[h:KƟƧ$`CoJ)0RdfykB +av/!8Č%*ehRl.Klw<,\|!VOB 8ajFjyԺADkcyq'_:nA[%G/pRN׭vJ&HڐVmOz5fc1Dl1.BVLB,!y*[pFZWuZ\6H2\uJ&] GppcCrEƙ&0(Tf2̊3#M.JJ]6-;<0P\كg1ZÌay()#$y.vn7XZcY. ,X}f]!$rYHwj傑,Cvd1 N.֥9 IБݑtX_@OJudwZjđ9sV!{ )Xn֥F޲m:eBmcZ߫vךswn//^A+ !7L1ӱ6TDU8@Gjp spxVl J'3۟ ZMJ-ٺR7p]r RY hߔ6)DHJAK!Npe+B`g [M'ұMh's0M-$.|D?EKǓ H\LfT)=7`\EH^:dsLAn,{fR$Y⊛&, ':욳heȧ`c.q(IR $qEG&-QzMG4DA6Ԍ%tj 6kI\/K2cz:Em815|"7~ 9.9O0ϩ5dBt5ɐ Y˂e@&sԎ &5] QJ6FW )x73O\V7Ms.+smlit*roVTfVsYaLaɊʬʅ}I_8뒿 <՜qˍ=.4"Z۝Hkc'ݶ@TIIBygXUkS?4p`ROuRN WC^HaM׬@Z-4y.BL0\jb6_E?/Soߓf%a%ӣ_%J:" ŧ/ϫYd$+*Y&U%6/Z,Vo1&{˯ ̠˺aƱQ_ T dM]/8Ωs[|(bAA5 ({n89\16S0\vA+n0yz ;HCA 0c$r>3܇0۰ (9VrNr9 aFHT 'aƻ\qH8 a:Gc2G㡧ucF۹-a%gRQAWKO^fU $6" 3 (Ea AR̪uOo5H/|٥٠CB#{cM#Ect+Tx+"AKKZ6P8zQine97ǒ,G,9I '>:dsiDLxvDOy$4$9J2ɵ)Vf_+p>= %IKe$:'Io>ᡱ QV' %5`\Og\G49t 6%:tN6+i$.R >yIKZM|e,*JVH.N?J&Z ;3ISL OdZ?_+0]E rU'-˧.DfIuvW18kig{əb'/h^FbCbhmr1-춴*&x!*)TtY.%EKYldiS.9@Qʘd2+da4rI7(a v~%G)7A{wv=@!$ۉδ1w9ձAW*5Z! w_PӾT22w'[gK'%6jZ-GMig# HHƪ*Q *Ϙȸuysb22qČ8 %Eg@&r=#0UXd;t}`a$ ,hq[r T +UJ 3H-']w7YoO{!QzQjN07t/\Am)p O!F(Sϧ8PJkjvm \̇fܖ!XWI8h XD@tҐJ,"u j<̦پ+>Awv wvj^ ˹,=٩!cNQ>u';;M'!;;ɉ!g%;;M':;I-u=}=ܡ[}O19`դ.hQBs1Lk~Brb./Vd9CZ rB*Ȋ3 Ӫ`XкHPs"qUWfaGz'g*+K`Y))uR:TE3*!~Pv} clkWKO}/&LTQ0rHL~.dEEP([LC)2S BD.~ @۾yVCad!>kSd`yЗ8 J1Ct(DƋ;q:t(gKr,^r:]~bt‹l_[($N(R7jn})ta*0tan[V5#޿nj܈ׯW(_g?]fqmd60ݺBeE~{?8[R+!1 o_=~OXO^݅bLK7? l9څqDٖ Efh=. o%hQ}9HʙY莻sw|w f+"&wGSh`iiXxMyD3Pcgఅ D8_F+պMa\=ݟjʃQ#M(vz˿XKOzO5ng.7n<=.NU4# *V7+/nwB]S]U~_>ޔMVwX]tS?}\v_^޺G=,cO-ᦵg {.wGFk^,~O=hɔ !pS fFoX7`Vn}y:}cwY[b@6UN!ǹa` Du~#ƺfYx@6US&PL0Q&ތAN?ܸkUkZ>xSPwu~'Eϗ,^Ȱ 2ĴSٿy@^KR RC [ƯOM }ЅSHZ3&K"V"y.%FBgb-tDhVf{J' chM2 Y5/s o{콚L7 }RJ9hK6k;W,j|wQM9˟>j쮹*y^_'sr&Z>}fSKZf7end/JzڞpKZ[}4e=>$$s#MQhr dJވd•P%;9BOӆ  2-U]}|N'~-ւd7X#`,L4d-cͤ8ƾ1v$ќhb=| 'q&.=dDI-FRr ~V&dCjVO޸! Րj6/8JW3̊tfʥdiTr%222sEDqYo%*[(P+.7#N"A譙 DsˆIc t;o~чp &#O0X8(c'+3 +уxAJ>8L6U浌m װZi2lXx7%59,ȰMTɶkEC^ޭidBC]݇2$+$ :~߲ӆeO90pf=( G-ϧ mjZdAVSh004d GBy,x*ֻ Љ@jE*J]TސsSwvZUJq :'X9y,KSэޒ'9&&oˌ ;? %`_rQJtx)\gC$Nn02ѦJ-0%'2ZWr$+(XZւभ,JwE&rK*og$A2,!@"Ղ)~VCCRBPC}HFVFZ[iS@>8V+-JOj" ȑ`p}ƐFf֗jcHc!|x(O߳2JUA%߿=[y(uoōKWsCMTp.yҟD.Jz1;s_7,Jk3\u}ɮ?{Ǝ`nޫx<9 fv1O]lI O%˲b;+-ꯪXXE<_u0֝=>DY/Su={op<Ճ&Y("32 ydȲ><[@@El ߛ)'oBŲ17D}i[{g-=hέ11X]k.w㭃:x*u۰!,CᅙTs[phg8$,Ǡ⛛3ߏ)LpӁmn$Oߥ0O?]s0S| N '}l[$º;M;!reji5u5^b Qro] /gվF]ע')g}藳u`?T[wJ"&v7_#vO;t~5aFO=O$f9n~ٕ/ћ$_bէiaz[&y9E O3&>*2-ge^z%jh9}ɇCT\ssr/eu.^crw9-Hg B9`TqB8U^->O;rӔ,(dpI: EžBa, ->?%ۂQHZD&Z3 9U/Z7(ɧvߋF*k߅]v*ߋI;|.&~q"8:||ݢBf} ϗ& }*ٚ7>FW\ XѧݴX^K}qwٖiNIdJ)0<4ٛRq_}|OôFs.8~Z<暩ZBzj#)8’_Vkƃ!s/HZ}&MښЧƧU=vxҞ ʞs@ GO Rr[}K2hq'&1ZVbR$ FHv\f9'iR"ӎdL?$/J)NB3T@{sR M0u8K"?Ep/;#ɡtv!46*r)+iUAUc fmx GEw<ղnj)ךĤ &6Rciy5Z }Էybm&͘ Ҳ/,m|BZ$2i/$ő1ԚG|о4-ɠaV*.$SoI2 =OVG%U j-'#{c8lpއ=zw?k&YSSqkH}d/Kto\@nI|Y\Co5c@EژX`ё9%3&fU5~(@40@ 3xzs UK-[sbARHG Aq)anu;o]kseȈɖ+xf^wubRd.smlAwn,.w-$#gwdaQCN# !$;x lU7ٙCnwe+5\wlc(;;Ӗ|ɢEa2DXpb.ӑ9 (pl>{`ً%h 2kt7NÙ͎܎kGA! x֏&Xx?i~4ح(RN!p##] ,k [ou<ޡ1O1 r+1!WI*9NVr^ (y$0=4?FTw0(dCL,T>;oxGꙬ0.n,i\ĚiWStT_\׶'hS9w\9f `,0\s_UR; ß~ xg8qy'[$cKP 2 2 2q ;(I:F7t<%3ݍɍ.Ke jhϖ qK's|)ՁIב'QZUZy'ڨ^N?S $*EH(I{r/yBdNl@eY|]qIЮJ7SL^e}Q>M8Aox:#O >4(5|iŸق-F)GVw]Tf ,sh9^.GukOcPv$,+0PS˛+ժ'>Ңrh=Am[BwӑA)B2iS1ژeNJ썐fp}xUgD *F'U;pXaZ?LW(   :LInLp'!k٬ZUABq>b\umco=Gp90VGHCt:qmr^휴B| y5 ΞWSx6~xv7kO WMzl6[IQw[?.==TSuNBHt?Zwxgx-ιuʁU)kR87``ӻVA3 n'{tf :If($Pj'B8L[/ʘ--ZĠ^$SVF.BPsJTQw53Oɷs亍 )։AѨ% 5+Ff~MӇnކڧ]KQDTy_+=Xٰ r̩gKҔb^PױlY6=MLY o0 h,叼 [VK{QVKmY]:_DYu1JowD‚OQB \z)bO^^{j!ZA)zw)nZڭx`KQ"RBʵβ)?!0(WFbeL<)YDgPۭw)}EX|p&ks|VEt,1 C,Qbt)u)x1\OB[⋛vL6b&UՑaZ?}nV})"4|\iDy7۟ {x]_{ǚ\} 39;bf_n81ͿػF$Wޤ< n,6l_ "O-R O$II%GX([ܐbVwFfԕ/w}-.BQGO65lq8eE pC W4OiWs4cA. #!lRQ)-O_ǓtޫlUVhr m̨F'tSI;ްk [m85.m4jny4"gfL&'׷[fCN(Vԕ!yd% c}50¯lM.ۓP׈&tlEk5 KRe遰h@AFZP΁,eWl/%pj~H-ml̇k:v]XlTä )L'3[Gέ^/l[1jN׻<1yG2:/#9l6[B8W-1L s1Nz4V W%^~idS@iYgKΐ@t\5۝,LȝjJgN!D&ք22+[QRa9?Z\ٶ|^:=Az .ȂBPo=m>$Cc- ڻ3^qa7ܘpW_qցcQh&_Nz fZ1,.hb+F'DkbYr'h*7´-,$j<.sk%Rn43D*rfw{Y<7)ylk huVwXc٘Iؓ&1g fL >Q^ԟe*9g=$HؙAh`Yh-2p~#KV_>tJ35aLdC-8bGAQLLjeugJQ' JA'Tk agNnsê6m _IಁIh(r8>'$vF`n^H?8U$|Wg_,|NGK*Y:%f1Q]3(OL3DP/zP<K e:R 0Wn@\WAk=w)Jr0ǧ`>ñ`TSD\k9TaQ?LR/rdQrkg_ށ jcߙ\̎pt\gn: hOpW*?o:㋫QHPFS#W̕Uˇ}E*>?  yԄYTx,WBV=(흧z P:~ӯ ec\`2šV/F_ SRXЙ^g圪EJQ#\2` kdj:4KP Z|)\i=u\U͹./䋯.)?3z~ (kcVDY<K{{1j} 72% d HLpvUxiUQ`mrnQ%T$*HL >&ʼn Qq-[f盋|MPr^y֗xߺH^tjϞ\q%Ǡ Gu&6PFdŸhAR5L%3XέKr$^?Vk2HwQ9EW ֛vV}PLh/ŽKo 5(wemf+`jRa )Q▃q,CXgqK 7h 93֐%~,"+r~ʕSMy\ۦrH ~VWߓr/S0ϥqpԔF;7;fl3% &H6Z4K|Oޥ+Qʩb_ޥsٞ.ׂ~y?֧BXVy;?'8D@_6f*Byɬ{7brFrݰTtI˕3+>^.Ξ޻.F[F+{]ԼA.ӨyI.C5B !r[f7:KiF~Ƅ 7z1v^76+ 69!ϕض*/Jbzr<Q+T   Ug`bD'4+WYl'2ZS6D7 {歒jnWcE0?fdWzy2_Ev ꛓ è+KҺnG9MWsMGMծZbEbuy UްFz x8: 冊* Pl#/xU+oX-,l+PjT*dA@^yd1Z`pAH;eA 1-Ų4u۴ik4vr'1TggLB*.P)ERh :KS 4Qqg Smu e-)gY8JR ۴,[VF7!#F-֬\yz 6{ic.OæxɘY$3Zij /`wԃGK/\6?g쩰橛pFQkv} # 8c0!`V@sI~ᄼtR ։̴\fJ6h3,&ۂV !|eĴ3T{OHrz1v$>iܳXwg/6K$\n"%(Rbzntd2+#"8L䅳xvL_b"\h <ܚX̬԰X,,qڥ+q] *g< 3e,KZwHURŵǒh/j%Z(3!JwZkV3EP56RQR, ȈRe6k}vmZ=1lmҟ5aq0Ua p8{1G xl舵B*b>5$\HM [] Rͷ,[F'Yp狙 DN_k%=%//Z jx!pBq7 p*T+0lܗ@Z0iT }C` LXnXِ1seq}]$ygR du5(T39[/m@jp[L|4jsZs z!T8Xg2BDǬCwöPW|b~dO|Ҕ͘h*AynjEbȒa S݀R?͍Yo/êBiK;wE)OHy)ݴ ݚ g4}Fv<hj*kּHCtkcηh=EO~!dQ5Ai:G(h% kּVtkcη7fܐh7qvۑJt;}um׏\*VhEKD שb Z|3Wwț|چJߏ7-]~?JiwUyr@[b*״4Le#k? pT\N.%@AJHrP%%UXZ=6"jJAA$y iB1r/R=Y^cMe<>iS8FiS*=M6tDiA*La*U;Dӱ1-BslA, #@{`[>21!I-aThs.vwlBY^@gr[x=8v rtBS {Qsg5[A{b> 5Ғsq@J x~, :*9Ob('+Y|Y_F<ƪ?J.KɃyۚ;'K-jIW?d{d2DUC(I6U3 m7|Lji;`vh#)#㎒ڨ-r l&ߡsɤ2Ήm LDŃI4NvݼdOr% vKd@D{~=N;{v7d~o _[LY8:8u6t/U+ǭr2aUqd͵ vCNGx Gg;fpU29an#axQw[Y5J{ j(.U}aG։%QM<}SI,#C%+CSqṁx D?i=aC1#m"| KMID'hȻtc"ue!%U}\V9{-':⠼W4p+]T+/m8 I2hׂ:J"T,5 54ɌI㆔b1;L.K-Ape` SS|a&}~ɫqds美S!MnNW?ٲ*r3zB&>ap_ Ua.|*?,B_'Oa -r~A, !~/o_ewnM~Mwf\ƧZ@XӷӋ?jB+,)u){G<4ݤki<`*#W{`^, >7гMX=ܻ/*(J],4R:X)+k}#3[c ``fyG p/q5"E8nH!A>k65ny&RSN"^تbT/Iī&{үσZW"ub(Ǔ6H-tHG8\J'sy;ELS]]S-{>\L^|h[F4=%"ݓ{Xc6+y&{%kÛ դSW)Hѽq2ZIAGQə4/+/Nk) oꗄlr𞩥yZ[?YXXݱ)i=דga-&Nd&NJ)8ojZ+\5Tj[oNeJQ]IiJS6' G Uh۱-6!M#A0!riMpx{ݞ|5Ɯn*xZoś}X F1 T"WCI1[(a% -zc5GGX U`Qfi0l2|j}a |m 0#Rs%JhF\ӛ1lglP(1]kUg^Xy&i1S&N< 8݈S_EF֋tȬr2n ~d!<4~E;1HH;8eي@M.܅':Ż' {lp<y&K/[g"_~5_]WXvZG#BrѣPܮR{]?)n+7<Q^ށ Se-'EdgE$J'.{]~ (Ѱe)KW4]E\KЌDYĬ1Qh_dyؿjݨ(sih2i KNB Β1~馭B:k߸pCq:EdS!Бv̴kOo ?,@G2%"?#w8!#8(['r5wcqO Eןt>2HQ0y%`+R, EO(Ĵm UQb z~~}B띶#z@]``x!0CHgبG񍢜 _B0a#^T쿱s "сL xh31ښe7&d3݊ YZf`Ku\]15é u19&``LҎވ#ӒS1ǖLQ;4.F/;Nm\ Ibi*#Q5O8GyofQ:LiN*Jcpw,-H(ɐUg@VD)GT-9I`2M5= "T=jB \m4Cć$LU"ۇZgqE^9;Q:I~sbgSBRqkdaC|xhFW{ 'V_7{+%#Z XCŦ N-Kq)~9Aq-%1EPQ!}=EŖ_Cq@~QBg၌v8E .W(V4@22Hث!w&淅 cvBY)0'aZ}P{̂ck/aX.z,r׆"cEj%: _R1./;EEnU成a"Ƥ0u)s:_Wr+UF%uQa[ܭ{tPR!#B *09J0pkQjs\KF9"Zƭ$sV8J0<`Dž`P Nwptl= r2CNj |֔Y'L[܎mnwmw=ڂw`pl*R^4rYrF1/BTQ>(1$\T;1LU w F`׉jy0*.$8+@"MvA,= xuk'^;qnO=dcݞ~Ȗ=+݈w$AYo]LYF˅3p?݂|?ƭ$_(  htQ5Og9Tp9@[&]˼=F;Fs" տ^ɐ~(6 O?eϜ; }=e߭*k=xRXq;_{~eߠ= ^9\up%8Np%>ޱu\lBz#3P9,| ѽ ڎ:/ 4O1$w;O~g7 '\!T%d/JNhгo~fQ5z+yd)+,W/8hDg4E冏'ן}SV,z?@ M?~MWX1o9xҴ=o$ogL[^T7;ԙFrn<$#$O)h$C YbIz0NjMeI/E17nBE}Q*zdU8!qT.B*R%mm^ߐQ$: l6C.$ _$-ikcZY5v=L<[.Ofu7{NOy?ㄒtGrW/3)[;)8UM K{HdcZ^5--Tp~@STоdfd'sԀA[8pI}GB`4&TkBFf&Wq:N]#f7dʚn?;%,֌.뇻GSéɣ_~OJ4$~YUw^53?qoO:=\No9լŝibՇ;{''\=6uyyR ͖Ť\rT{練j&d6RM8El_Aъ؏cIȭ[ȼpkH+k\ q& <{_oƫ+z"Q4OEs<얲-]>_w7_}l yo}rnI˥6&rn_T镣5'ZvkVa饿!7#NyE"@nXpQm_܋-> @'@!iV[sЂ9s! K0&p=Z=i!f  +N4v7kYW-ZܚZGOF#u.e΁XT譔CqUkik({dK-n?D󰣓T VѐH$@$up/u&U.XvĬK!m!`y PQcW{瓋ٿY^u76x߾{<հ(V}ja|DyՕ(IoNHU}jYJ՟yBjKmξ_voU}[JUZͤє{JU-Z0oR 5KRnG߬K)R